# HG changeset patch # User krauss # Date 1378836661 -7200 # Node ID c9d6f6285e1dd0d14f3dbb953d25e5d4bdec93fe # Parent 437c0a63bb16cbe390b2e913b66dde64859a96e4# Parent fa5b34ffe4a400a674ac5d89b98cb3b9b1fdc7da merged diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/Linux/Isabelle --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/Linux/Isabelle Tue Sep 10 20:11:01 2013 +0200 @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Author: Makarius +# +# Main Isabelle application wrapper. + +if [ -L "$0" ]; then + TARGET="$(LC_ALL=C ls -l "$0" | sed 's/.* -> //')" + exec "$(cd "$(dirname "$0")"; cd "$(pwd -P)"; cd "$(dirname "$TARGET")"; pwd)/$(basename "$TARGET")" "$@" +fi + + +## settings + +PRG="$(basename "$0")" + +ISABELLE_HOME="$(cd "$(dirname "$0")"; cd "$(pwd -P)"; pwd)" +source "$ISABELLE_HOME/lib/scripts/getsettings" || exit 2 + + +## main + +declare -a JAVA_ARGS +JAVA_ARGS=({JAVA_ARGS}) + +exec "$ISABELLE_HOME/bin/isabelle" java "${JAVA_ARGS[@]}" \ + -classpath "$ISABELLE_HOME/src/Tools/jEdit/dist/jedit.jar" isabelle.Main "$@" + diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/MacOS/App1/build --- a/Admin/MacOS/App1/build Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/MacOS/App1/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Make Isabelle application bundle diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/MacOS/App1/script --- a/Admin/MacOS/App1/script Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/MacOS/App1/script Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Author: Makarius # diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/MacOS/App2/mk --- a/Admin/MacOS/App2/mk Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/MacOS/App2/mk Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Make Isabelle/JVM application bundle diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/MacOS/App3/Info.plist --- a/Admin/MacOS/App3/Info.plist Tue Sep 10 20:09:53 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,45 +0,0 @@ - - - - -CFBundleDevelopmentRegion -English -CFBundleExecutable -JavaAppLauncher -CFBundleIconFile -isabelle.icns -CFBundleIdentifier -de.tum.in.isabelle -CFBundleDisplayName -{ISABELLE_NAME} -CFBundleInfoDictionaryVersion -6.0 -CFBundleName -{ISABELLE_NAME} -CFBundlePackageType -APPL -CFBundleShortVersionString -1.0 -CFBundleSignature -???? -CFBundleVersion -1 -NSHumanReadableCopyright - -LSApplicationCategoryType -public.app-category.developer-tools -JVMRuntime -jdk -JVMMainClassName -isabelle.Main -JVMOptions - --Dapple.laf.useScreenMenuBar=true -Xms128m -Xmx1024m -Xss2m -Dactors.corePoolSize=4 -Dactors.enableForkJoin=false --Disabelle.home=$APP_ROOT/Contents/Resources/{ISABELLE_NAME} - -JVMArguments - - - - - diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/MacOS/App3/Info.plist-part1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/App3/Info.plist-part1 Tue Sep 10 20:11:01 2013 +0200 @@ -0,0 +1,36 @@ + + + + +CFBundleDevelopmentRegion +English +CFBundleExecutable +JavaAppLauncher +CFBundleIconFile +isabelle.icns +CFBundleIdentifier +de.tum.in.isabelle +CFBundleDisplayName +{ISABELLE_NAME} +CFBundleInfoDictionaryVersion +6.0 +CFBundleName +{ISABELLE_NAME} +CFBundlePackageType +APPL +CFBundleShortVersionString +1.0 +CFBundleSignature +???? +CFBundleVersion +1 +NSHumanReadableCopyright + +LSApplicationCategoryType +public.app-category.developer-tools +JVMRuntime +jdk +JVMMainClassName +isabelle.Main +JVMOptions + diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/MacOS/App3/Info.plist-part2 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Admin/MacOS/App3/Info.plist-part2 Tue Sep 10 20:11:01 2013 +0200 @@ -0,0 +1,7 @@ +-Disabelle.home=$APP_ROOT/Contents/Resources/{ISABELLE_NAME} + +JVMArguments + + + + diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/PIDE/convert --- a/Admin/PIDE/convert Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/PIDE/convert Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash THIS="$(cd "$(dirname "$0")"; pwd)" SUPER="$(cd "$THIS/.."; pwd)" diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/Windows/WinRun4J/Isabelle.ini --- a/Admin/Windows/WinRun4J/Isabelle.ini Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/Windows/WinRun4J/Isabelle.ini Tue Sep 10 20:11:01 2013 +0200 @@ -7,13 +7,5 @@ classpath.6=lib\classes\ext\scala-reflect.jar classpath.7=src\Tools\jEdit\dist\jedit.jar vm.location=contrib\jdk\x86-cygwin\jre\bin\server\jvm.dll -vmarg.1=-Dfile.encoding=UTF-8 -vmarg.2=-server -vmarg.3=-Xms128m -vmarg.4=-Xmx1024m -vmarg.5=-Xss2m -vmarg.6=-Dactors.corePoolSize=4 -vmarg.7=-Dactors.enableForkJoin=false -vmarg.8=-Disabelle.home=%INI_DIR% splash.image=lib\logo\isabelle.bmp - +vmarg.1=-Disabelle.home=%INI_DIR% diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/isatest/settings/at64-poly --- a/Admin/isatest/settings/at64-poly Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/isatest/settings/at64-poly Tue Sep 10 20:11:01 2013 +0200 @@ -2,11 +2,11 @@ init_components /home/isabelle/contrib "$HOME/admin/components/main" - POLYML_HOME="/home/polyml/polyml-5.4.1" - ML_SYSTEM="polyml-5.4.1" + POLYML_HOME="/home/polyml/polyml-svn" + ML_SYSTEM="polyml-5.5.1" ML_PLATFORM="x86_64-linux" ML_HOME="$POLYML_HOME/$ML_PLATFORM" - ML_OPTIONS="-H 1000" + ML_OPTIONS="-H 1000 --gcthreads 1" ISABELLE_HOME_USER=~/isabelle-at64-poly diff -r 437c0a63bb16 -r c9d6f6285e1d Admin/lib/Tools/makedist_bundle --- a/Admin/lib/Tools/makedist_bundle Tue Sep 10 20:09:53 2013 +0200 +++ b/Admin/lib/Tools/makedist_bundle Tue Sep 10 20:11:01 2013 +0200 @@ -53,6 +53,8 @@ # bundled components +init_component "$JEDIT_HOME" + mkdir -p "$ARCHIVE_DIR/contrib" echo "#bundled components" >> "$ISABELLE_TARGET/etc/components" @@ -121,19 +123,14 @@ } -# platform-specific patches +# platform-specific setup (inside archive) case "$PLATFORM_FAMILY" in linux) purge_contrib '-name "x86*-darwin" -o -name "x86*-cygwin" -o -name "x86*-windows"' - cat > "$ISABELLE_TARGET/$ISABELLE_NAME" < "$ISABELLE_TARGET/$ISABELLE_NAME" chmod +x "$ISABELLE_TARGET/$ISABELLE_NAME" ;; macos) @@ -152,8 +149,19 @@ perl -pi -e "s,lookAndFeel=.*,lookAndFeel=com.sun.java.swing.plaf.windows.WindowsLookAndFeel,g;" \ "$ISABELLE_TARGET/src/Tools/jEdit/dist/properties/jEdit.props" + ( + cat "$ISABELLE_HOME/Admin/Windows/WinRun4J/Isabelle.ini" + declare -a JAVA_ARGS=() + eval "JAVA_ARGS=($ISABELLE_JAVA_SYSTEM_OPTIONS $JEDIT_JAVA_OPTIONS $JEDIT_SYSTEM_OPTIONS)" + A=2 + for ARG in "${JAVA_ARGS[@]}" + do + echo -e "vmarg.$A=$ARG\r" + A=$[ $A + 1 ] + done + ) > "$ISABELLE_TARGET/${ISABELLE_NAME}.ini" + cp "$TMP/windows_app/Isabelle.exe" "$ISABELLE_TARGET/${ISABELLE_NAME}.exe" - cp "$ISABELLE_HOME/Admin/Windows/WinRun4J/Isabelle.ini" "$ISABELLE_TARGET/${ISABELLE_NAME}.ini" cp "$ISABELLE_HOME/Admin/Windows/Cygwin/Cygwin-Setup.bat" \ "$ISABELLE_HOME/Admin/Windows/Cygwin/Cygwin-Latex-Setup.bat" \ "$ISABELLE_HOME/Admin/Windows/Cygwin/Cygwin-Terminal.bat" "$ISABELLE_TARGET" @@ -193,7 +201,7 @@ tar -C "$TMP" -c -z -f "$BUNDLE_ARCHIVE" "$ISABELLE_NAME" || exit 2 -# application +# platform-specific setup (outside archive) if [ "$ISABELLE_PLATFORM_FAMILY" = linux -a "$PLATFORM_FAMILY" != macos -o "$ISABELLE_PLATFORM_FAMILY" = macos ] then @@ -211,8 +219,19 @@ mkdir -p "$APP/Contents/$NAME" done - cat "$APP_TEMPLATE/Info.plist" | \ - perl -p -e "s,{ISABELLE_NAME},${ISABELLE_NAME},g;" > "$APP/Contents/Info.plist" + ( + cat "$APP_TEMPLATE/Info.plist-part1" + + declare -a OPTIONS=() + eval "OPTIONS=($ISABELLE_JAVA_SYSTEM_OPTIONS $JEDIT_JAVA_OPTIONS $JEDIT_SYSTEM_OPTIONS)" + for OPT in "${OPTIONS[@]}" + do + echo "$OPT" + done + echo "-Dapple.awt.application.name={ISABELLE_NAME}" + + cat "$APP_TEMPLATE/Info.plist-part2" + ) | perl -p -e "s,{ISABELLE_NAME},${ISABELLE_NAME},g;" > "$APP/Contents/Info.plist" for NAME in Pure.jar scala-compiler.jar scala-library.jar scala-swing.jar scala-actors.jar scala-reflect.jar do diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Classes/document/build --- a/src/Doc/Classes/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Classes/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Codegen/document/build --- a/src/Doc/Codegen/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Codegen/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Datatypes/Datatypes.thy --- a/src/Doc/Datatypes/Datatypes.thy Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Datatypes/Datatypes.thy Tue Sep 10 20:11:01 2013 +0200 @@ -6,24 +6,8 @@ theory Datatypes imports Setup -keywords - "primrec_new_notyet" :: thy_decl and - "primcorec_notyet" :: thy_decl begin -(*<*) -(* FIXME: Evil setup until "primrec_new" and "primcorec" are bug-free. *) -ML_command {* -fun add_dummy_cmd _ _ lthy = lthy; - -val _ = Outer_Syntax.local_theory @{command_spec "primrec_new_notyet"} "" - (Parse.fixes -- Parse_Spec.where_alt_specs >> uncurry add_dummy_cmd); - -val _ = Outer_Syntax.local_theory @{command_spec "primcorec_notyet"} "" - (Parse.fixes -- Parse_Spec.where_alt_specs >> uncurry add_dummy_cmd); -*} -(*>*) - section {* Introduction \label{sec:introduction} *} @@ -182,6 +166,7 @@ *} + section {* Defining Datatypes \label{sec:defining-datatypes} *} @@ -200,7 +185,7 @@ text {* Datatypes are introduced by specifying the desired names and argument types for -their constructors. \emph{Enumeration types} are the simplest form of datatype: +their constructors. \emph{Enumeration} types are the simplest form of datatype. All their constructors are nullary: *} @@ -208,7 +193,7 @@ text {* \noindent -All three constructors have the type @{typ trool}. +Here, @{const Truue}, @{const Faalse}, and @{const Perhaaps} have the type @{typ trool}. Polymorphic types are possible, such as the following option type, modeled after its homologue from the @{theory Option} theory: @@ -221,8 +206,8 @@ text {* \noindent -The constructors are @{term "None :: 'a option"} and -@{term "Some :: 'a \ 'a option"}. +The constructors are @{text "None :: 'a option"} and +@{text "Some :: 'a \ 'a option"}. The next example has three type parameters: *} @@ -232,7 +217,7 @@ text {* \noindent The constructor is -@{term "Triple :: 'a \ 'b \ 'c \ ('a, 'b, 'c) triple"}. +@{text "Triple :: 'a \ 'b \ 'c \ ('a, 'b, 'c) triple"}. Unlike in Standard ML, curried constructors are supported. The uncurried variant is also possible: *} @@ -243,7 +228,7 @@ subsubsection {* Simple Recursion *} text {* -simplest recursive type: copy of the natural numbers: +Natural numbers are the simplest example of a recursive type: *} datatype_new nat = Zero | Suc nat @@ -253,30 +238,41 @@ (*>*) text {* -lists were shown in the introduction; terminated lists are a variant: +\noindent +Lists were shown in the introduction. Terminated lists are a variant: *} +(*<*) + locale dummy_tlist + begin +(*>*) datatype_new ('a, 'b) tlist = TNil 'b | TCons 'a "('a, 'b) tlist" +(*<*) + end +(*>*) text {* -On the right-hand side of the equal sign, the usual Isabelle conventions apply: -Nonatomic types must be enclosed in double quotes. +\noindent +Nonatomic types must be enclosed in double quotes on the right-hand side of the +equal sign, as is customary in Isabelle. *} subsubsection {* Mutual Recursion *} text {* -Mutual recursion = Define several types simultaneously, referring to each other. - -Simple example: distinction between even and odd natural numbers: +\emph{Mutually recursive} types are introduced simultaneously and may refer to each +other. The example below introduces a pair of types for even and odd natural +numbers: *} datatype_new enat = EZero | ESuc onat and onat = OSuc enat text {* -More complex, and more realistic, example: +\noindent +Arithmetic expressions are defined via terms, terms via factors, and factors via +expressions: *} datatype_new ('a, 'b) exp = @@ -290,67 +286,81 @@ subsubsection {* Nested Recursion *} text {* -Nested recursion = Have recursion through a type constructor. - -The introduction showed some examples of trees with nesting through lists. - -More complex example, which reuses our option type: +\emph{Nested recursion} occurs when recursive occurrences of a type appear under +a type constructor. The introduction showed some examples of trees with nesting +through lists. A more complex example, that reuses our @{text option} type, +follows: *} datatype_new 'a btree = BNode 'a "'a btree option" "'a btree option" text {* -Recursion may not be arbitrary; e.g. impossible to define +\noindent +Not all nestings are admissible. For example, this command will fail: *} - datatype_new 'a evil = Evil (*<*)'a - typ (*>*)"'a evil \ 'a evil" + datatype_new 'a wrong = Wrong (*<*)'a + typ (*>*)"'a wrong \ 'a wrong" text {* -Issue: => allows recursion only on its right-hand side. -This issue is inherited by polymorphic datatypes (and codatatypes) -defined in terms of =>. -In general, type constructors "('a1, ..., 'an) k" allow recursion on a subset -of their type arguments. +\noindent +The issue is that the function arrow @{text "\"} allows recursion +only through its right-hand side. This issue is inherited by polymorphic +datatypes defined in terms of~@{text "\"}: +*} + + datatype_new ('a, 'b) fn = Fn "'a \ 'b" + datatype_new 'a also_wrong = Also_Wrong (*<*)'a + typ (*>*)"('a also_wrong, 'a also_wrong) fn" + +text {* +\noindent +In general, type constructors @{text "('a\<^sub>1, \, 'a\<^sub>m) t"} +allow recursion on a subset of their type arguments @{text 'a\<^sub>1}, \ldots, +@{text 'a\<^sub>m}. These type arguments are called \emph{live}; the remaining +type arguments are called \emph{dead}. In @{typ "'a \ 'b"} and +@{typ "('a, 'b) fn"}, the type variable @{typ 'a} is dead and @{typ 'b} is live. *} subsubsection {* Auxiliary Constants and Syntaxes *} text {* -The @{command datatype_new} command introduces various constants in addition to the -constructors. Given a type @{text "('a1,\,'aM) t"} with constructors -@{text "t.C\<^sub>1"}, \ldots, @{text "t.C\<^sub>m"}, the following auxiliary -constants are introduced (among others): +The @{command datatype_new} command introduces various constants in addition to +the constructors. Given a type @{text "('a\<^sub>1, \, 'a\<^sub>m) t"} +with $m > 0$ live type variables and $n$ constructors +@{text "t.C\<^sub>1"}, \ldots, @{text "t.C\<^sub>n"}, the +following auxiliary constants are introduced (among others): \begin{itemize} \setlength{\itemsep}{0pt} -\item \emph{Set functions} (\emph{natural transformations}): -@{text t_set1}, \ldots, @{text t_setM} +\item \relax{Set functions} (or \relax{natural transformations}): +@{text t_set1}, \ldots, @{text t_setm} -\item \emph{Map function} (\emph{functorial action}): @{text t_map} +\item \relax{Map function} (or \relax{functorial action}): @{text t_map} -\item \emph{Relator}: @{text t_rel} +\item \relax{Relator}: @{text t_rel} -\item \emph{Iterator}: @{text t_fold} +\item \relax{Iterator}: @{text t_fold} -\item \emph{Recursor}: @{text t_rec} +\item \relax{Recursor}: @{text t_rec} + +\item \relax{Discriminators}: @{text "t.is_C\<^sub>1"}, \ldots, +@{text "t.is_C\<^sub>n"} -\item \emph{Discriminators}: @{text "t.is_C\<^sub>1"}, \ldots, -@{text "t.is_C\<^sub>m"} - -\item \emph{Selectors}: -@{text t.un_C}$_{11}$, \ldots, @{text t.un_C}$_{1n_1}$, \ldots, -@{text t.un_C}$_{m1}$, \ldots, @{text t.un_C}$_{mn_m}$ +\item \relax{Selectors}: +@{text t.un_C11}$, \ldots, @{text t.un_C1k\<^sub>1}, \\ +\phantom{\relax{Selectors:}} \quad\vdots \\ +\phantom{\relax{Selectors:}} @{text t.un_Cn1}$, \ldots, @{text t.un_Cnk\<^sub>n}. \end{itemize} +\noindent The discriminators and selectors are collectively called \emph{destructors}. The -@{text "t."} prefix is an optional component of the name and is normally hidden. - -The set functions, map function, relator, discriminators, and selectors can be -given custom names, as in the example below: +prefix ``@{text "t."}'' is an optional component of the name and is normally +hidden. The set functions, map function, relator, discriminators, and selectors +can be given custom names, as in the example below: *} (*<*) @@ -380,26 +390,26 @@ \qquad @{thm list.collapse(2)[of xs, no_vars]}\] % For two-constructor datatypes, a single discriminator constant suffices. The -discriminator associated with @{const Cons} is simply @{text "\ null"}. +discriminator associated with @{const Cons} is simply +@{term "\xs. \ null xs"}. -The \keyw{defaults} keyword following the @{const Nil} constructor specifies a -default value for selectors associated with other constructors. Here, it is -used to specify that the tail of the empty list is the empty list (instead of -being unspecified). +The @{text "defaults"} keyword following the @{const Nil} constructor specifies +a default value for selectors associated with other constructors. Here, it is +used to ensure that the tail of the empty list is the empty list (instead of +being left unspecified). -Because @{const Nil} is a nullary constructor, it is also possible to use @{text -"= Nil"} as a discriminator. This is specified by specifying @{text "="} instead -of the identifier @{const null} in the declaration above. Although this may look -appealing, the mixture of constructors and selectors in the resulting -characteristic theorems can lead Isabelle's automation to switch between the -constructor and the destructor view in surprising ways. +Because @{const Nil} is a nullary constructor, it is also possible to use +@{term "\xs. xs = Nil"} as a discriminator. This is specified by +entering ``@{text "="}'' instead of the identifier @{const null} in the +declaration above. Although this may look appealing, the mixture of constructors +and selectors in the resulting characteristic theorems can lead Isabelle's +automation to switch between the constructor and the destructor view in +surprising ways. *} text {* -The usual mixfix syntaxes are available for both types and constructors. For example: - -%%% FIXME: remove trailing underscore and use locale trick instead once this is -%%% supported. +The usual mixfix syntaxes are available for both types and constructors. For +example: *} (*<*) @@ -645,9 +655,16 @@ Zero \ a | Suc j' \ at as j')" +(*<*) + context dummy_tlist + begin +(*>*) primrec_new tfold :: "('a \ 'b \ 'b) \ ('a, 'b) tlist \ 'b" where "tfold _ (TNil b) = b" | "tfold f (TCons a as) = f a (tfold f as)" +(*<*) + end +(*>*) text {* Show one example where fun is needed. @@ -855,14 +872,14 @@ consts termi\<^sub>0 :: 'a - datatype_new ('a, 'b) tlist_ = + datatype_new ('a, 'b) tlist = TNil (termi: 'b) (defaults ttl: TNil) - | TCons (thd: 'a) (ttl : "('a, 'b) tlist_") (defaults termi: "\_ xs. termi\<^sub>0 xs") + | TCons (thd: 'a) (ttl : "('a, 'b) tlist") (defaults termi: "\_ xs. termi\<^sub>0 xs") overloading - termi\<^sub>0 \ "termi\<^sub>0 \ ('a, 'b) tlist_ \ 'b" + termi\<^sub>0 \ "termi\<^sub>0 \ ('a, 'b) tlist \ 'b" begin - primrec_new termi\<^sub>0 :: "('a, 'b) tlist_ \ 'b" where + primrec_new termi\<^sub>0 :: "('a, 'b) tlist \ 'b" where "termi\<^sub>0 (TNil y) = y" | "termi\<^sub>0 (TCons x xs) = termi\<^sub>0 xs" end diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Datatypes/document/build --- a/src/Doc/Datatypes/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Datatypes/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Datatypes/document/root.tex --- a/src/Doc/Datatypes/document/root.tex Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Datatypes/document/root.tex Tue Sep 10 20:11:01 2013 +0200 @@ -19,7 +19,9 @@ \newcommand{\keyw}[1]{\isacommand{#1}} -\renewcommand{\isactrlsub}[1]{\/$\sb{\mathrm{#1}}$} +%\renewcommand{\isactrlsub}[1]{\/$\sb{\mathrm{#1}}$} +\renewcommand{\isactrlsub}[1]{\/$\sb{#1}$} +\renewcommand{\isadigit}[1]{\/\ensuremath{\mathrm{#1}}} \renewcommand{\isacharprime}{\isamath{{'}\mskip-2mu}} \renewcommand{\isacharunderscore}{\mbox{\_}} \renewcommand{\isacharunderscorekeyword}{\mbox{\_}} diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Functions/document/build --- a/src/Doc/Functions/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Functions/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Intro/document/build --- a/src/Doc/Intro/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Intro/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/IsarImplementation/document/build --- a/src/Doc/IsarImplementation/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/IsarImplementation/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/IsarRef/Misc.thy --- a/src/Doc/IsarRef/Misc.thy Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/IsarRef/Misc.thy Tue Sep 10 20:11:01 2013 +0200 @@ -46,7 +46,7 @@ \begin{description} \item @{command "print_theory"} prints the main logical content of - the theory context; the ``@{text "!"}'' option indicates extra + the background theory; the ``@{text "!"}'' option indicates extra verbosity. \item @{command "print_methods"} prints all proof methods @@ -55,8 +55,9 @@ \item @{command "print_attributes"} prints all attributes available in the current theory context. - \item @{command "print_theorems"} prints theorems resulting from the - last command; the ``@{text "!"}'' option indicates extra verbosity. + \item @{command "print_theorems"} prints theorems of the background + theory resulting from the last command; the ``@{text "!"}'' option + indicates extra verbosity. \item @{command "find_theorems"}~@{text criteria} retrieves facts from the theory or proof context matching all of given search diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/IsarRef/document/build --- a/src/Doc/IsarRef/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/IsarRef/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/LaTeXsugar/document/build --- a/src/Doc/LaTeXsugar/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/LaTeXsugar/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Locales/document/build --- a/src/Doc/Locales/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Locales/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Logics/document/build --- a/src/Doc/Logics/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Logics/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Main/document/build --- a/src/Doc/Main/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Main/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Nitpick/document/build --- a/src/Doc/Nitpick/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Nitpick/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/ProgProve/document/build --- a/src/Doc/ProgProve/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/ProgProve/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Sledgehammer/document/build --- a/src/Doc/Sledgehammer/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Sledgehammer/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/System/document/build --- a/src/Doc/System/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/System/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/Tutorial/document/build --- a/src/Doc/Tutorial/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/Tutorial/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/ZF/document/build --- a/src/Doc/ZF/document/build Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/ZF/document/build Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/fixbookmarks --- a/src/Doc/fixbookmarks Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/fixbookmarks Tue Sep 10 20:11:01 2013 +0200 @@ -1,3 +1,3 @@ -#!/bin/bash +#!/usr/bin/env bash perl -pi -e 's/\\([a-zA-Z]+)\s*/$1/g; s/\$//g; s/^BOOKMARK/\\BOOKMARK/g;' "$@" diff -r 437c0a63bb16 -r c9d6f6285e1d src/Doc/prepare_document --- a/src/Doc/prepare_document Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Doc/prepare_document Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -e diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/ATP.thy --- a/src/HOL/ATP.thy Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/ATP.thy Tue Sep 10 20:11:01 2013 +0200 @@ -11,7 +11,6 @@ ML_file "Tools/lambda_lifting.ML" ML_file "Tools/monomorph.ML" -ML_file "Tools/legacy_monomorph.ML" ML_file "Tools/ATP/atp_util.ML" ML_file "Tools/ATP/atp_problem.ML" ML_file "Tools/ATP/atp_proof.ML" diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/BNF/Tools/bnf_ctr_sugar.ML --- a/src/HOL/BNF/Tools/bnf_ctr_sugar.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/BNF/Tools/bnf_ctr_sugar.ML Tue Sep 10 20:11:01 2013 +0200 @@ -23,7 +23,11 @@ split_asm: thm, disc_thmss: thm list list, discIs: thm list, - sel_thmss: thm list list}; + sel_thmss: thm list list, + disc_exhausts: thm list, + collapses: thm list, + expands: thm list, + case_convs: thm list}; val morph_ctr_sugar: morphism -> ctr_sugar -> ctr_sugar @@ -68,10 +72,15 @@ split_asm: thm, disc_thmss: thm list list, discIs: thm list, - sel_thmss: thm list list}; + sel_thmss: thm list list, + disc_exhausts: thm list, + collapses: thm list, + expands: thm list, + case_convs: thm list}; fun morph_ctr_sugar phi {ctrs, casex, discs, selss, exhaust, nchotomy, injects, distincts, - case_thms, case_cong, weak_case_cong, split, split_asm, disc_thmss, discIs, sel_thmss} = + case_thms, case_cong, weak_case_cong, split, split_asm, disc_thmss, discIs, sel_thmss, + disc_exhausts, collapses, expands, case_convs} = {ctrs = map (Morphism.term phi) ctrs, casex = Morphism.term phi casex, discs = map (Morphism.term phi) discs, @@ -87,7 +96,11 @@ split_asm = Morphism.thm phi split_asm, disc_thmss = map (map (Morphism.thm phi)) disc_thmss, discIs = map (Morphism.thm phi) discIs, - sel_thmss = map (map (Morphism.thm phi)) sel_thmss}; + sel_thmss = map (map (Morphism.thm phi)) sel_thmss, + disc_exhausts = map (Morphism.thm phi) disc_exhausts, + collapses = map (Morphism.thm phi) collapses, + expands = map (Morphism.thm phi) expands, + case_convs = map (Morphism.thm phi) case_convs}; val rep_compat_prefix = "new"; @@ -762,7 +775,8 @@ nchotomy = nchotomy_thm, injects = inject_thms, distincts = distinct_thms, case_thms = case_thms, case_cong = case_cong_thm, weak_case_cong = weak_case_cong_thm, split = split_thm, split_asm = split_asm_thm, disc_thmss = disc_thmss, - discIs = discI_thms, sel_thmss = sel_thmss}, + discIs = discI_thms, sel_thmss = sel_thmss, disc_exhausts = disc_exhaust_thms, + collapses = collapse_thms, expands = expand_thms, case_convs = case_conv_thms}, lthy |> not rep_compat ? (Local_Theory.declaration {syntax = false, pervasive = true} diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/BNF/Tools/bnf_fp_def_sugar.ML --- a/src/HOL/BNF/Tools/bnf_fp_def_sugar.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/BNF/Tools/bnf_fp_def_sugar.ML Tue Sep 10 20:11:01 2013 +0200 @@ -18,8 +18,11 @@ ctr_defss: thm list list, ctr_sugars: BNF_Ctr_Sugar.ctr_sugar list, co_iterss: term list list, + mapss: thm list list, co_inducts: thm list, - co_iter_thmsss: thm list list list}; + co_iter_thmsss: thm list list list, + disc_co_itersss: thm list list list, + sel_co_iterssss: thm list list list list}; val of_fp_sugar: (fp_sugar -> 'a list) -> fp_sugar -> 'a val morph_fp_sugar: morphism -> fp_sugar -> fp_sugar @@ -80,7 +83,7 @@ * (thm list list * thm list list * Args.src list) * (thm list list * thm list list) * (thm list list * thm list list * Args.src list) * (thm list list * thm list list * Args.src list) - * (thm list list * thm list list * Args.src list) + * (thm list list list * thm list list list * Args.src list) val co_datatypes: BNF_FP_Util.fp_kind -> (mixfix list -> binding list -> binding list -> binding list list -> binding list -> (string * sort) list -> typ list * typ list list -> @@ -118,8 +121,11 @@ ctr_defss: thm list list, ctr_sugars: ctr_sugar list, co_iterss: term list list, + mapss: thm list list, co_inducts: thm list, - co_iter_thmsss: thm list list list}; + co_iter_thmsss: thm list list list, + disc_co_itersss: thm list list list, + sel_co_iterssss: thm list list list list}; fun of_fp_sugar f (fp_sugar as {index, ...}) = nth (f fp_sugar) index; @@ -128,15 +134,18 @@ T1 = T2 andalso fp1 = fp2 andalso index1 = index2 andalso eq_fp_result (fp_res1, fp_res2); fun morph_fp_sugar phi {T, fp, index, pre_bnfs, nested_bnfs, nesting_bnfs, fp_res, ctr_defss, - ctr_sugars, co_iterss, co_inducts, co_iter_thmsss} = + ctr_sugars, co_iterss, mapss, co_inducts, co_iter_thmsss, disc_co_itersss, sel_co_iterssss} = {T = Morphism.typ phi T, fp = fp, index = index, pre_bnfs = map (morph_bnf phi) pre_bnfs, nested_bnfs = map (morph_bnf phi) nested_bnfs, nesting_bnfs = map (morph_bnf phi) nesting_bnfs, fp_res = morph_fp_result phi fp_res, ctr_defss = map (map (Morphism.thm phi)) ctr_defss, ctr_sugars = map (morph_ctr_sugar phi) ctr_sugars, co_iterss = map (map (Morphism.term phi)) co_iterss, + mapss = map (map (Morphism.thm phi)) mapss, co_inducts = map (Morphism.thm phi) co_inducts, - co_iter_thmsss = map (map (map (Morphism.thm phi))) co_iter_thmsss}; + co_iter_thmsss = map (map (map (Morphism.thm phi))) co_iter_thmsss, + disc_co_itersss = map (map (map (Morphism.thm phi))) disc_co_itersss, + sel_co_iterssss = map (map (map (map (Morphism.thm phi)))) sel_co_iterssss}; structure Data = Generic_Data ( @@ -161,13 +170,14 @@ (fn phi => Data.map (Symtab.default (key, morph_fp_sugar phi fp_sugar))); fun register_fp_sugars fp pre_bnfs nested_bnfs nesting_bnfs (fp_res as {Ts, ...}) ctr_defss - ctr_sugars co_iterss co_inducts co_iter_thmsss lthy = + ctr_sugars co_iterss mapss co_inducts co_iter_thmsss disc_co_itersss sel_co_iterssss lthy = (0, lthy) |> fold (fn T as Type (s, _) => fn (kk, lthy) => (kk + 1, register_fp_sugar s {T = T, fp = fp, index = kk, pre_bnfs = pre_bnfs, nested_bnfs = nested_bnfs, nesting_bnfs = nesting_bnfs, fp_res = fp_res, - ctr_defss = ctr_defss, ctr_sugars = ctr_sugars, co_iterss = co_iterss, - co_inducts = co_inducts, co_iter_thmsss = co_iter_thmsss} + ctr_defss = ctr_defss, ctr_sugars = ctr_sugars, co_iterss = co_iterss, mapss = mapss, + co_inducts = co_inducts, co_iter_thmsss = co_iter_thmsss, disc_co_itersss = disc_co_itersss, + sel_co_iterssss = sel_co_iterssss} lthy)) Ts |> snd; @@ -999,10 +1009,10 @@ end; fun mk_sel_coiter_thms coiter_thmss = - map3 (map3 (map2 o mk_sel_coiter_thm)) coiter_thmss selsss sel_thmsss |> map flat; + map3 (map3 (map2 o mk_sel_coiter_thm)) coiter_thmss selsss sel_thmsss; - val sel_unfold_thmss = mk_sel_coiter_thms unfold_thmss; - val sel_corec_thmss = mk_sel_coiter_thms corec_thmss; + val sel_unfold_thmsss = mk_sel_coiter_thms unfold_thmss; + val sel_corec_thmsss = mk_sel_coiter_thms corec_thmss; val coinduct_consumes_attr = Attrib.internal (K (Rule_Cases.consumes nn)); val coinduct_case_names_attr = Attrib.internal (K (Rule_Cases.case_names coinduct_cases)); @@ -1018,7 +1028,7 @@ (safe_unfold_thmss, safe_corec_thmss), (disc_unfold_thmss, disc_corec_thmss, simp_attrs), (disc_unfold_iff_thmss, disc_corec_iff_thmss, simp_attrs), - (sel_unfold_thmss, sel_corec_thmss, simp_attrs)) + (sel_unfold_thmsss, sel_corec_thmsss, simp_attrs)) end; fun define_co_datatypes prepare_constraint prepare_typ prepare_term fp construct_fp @@ -1407,7 +1417,7 @@ @ rel_distincts @ flat setss); fun derive_and_note_induct_iters_thms_for_types - ((((mapsx, rel_injects, rel_distincts, setss), (ctrss, _, ctr_defss, ctr_sugars)), + ((((mapss, rel_injects, rel_distincts, setss), (ctrss, _, ctr_defss, ctr_sugars)), (iterss, iter_defss)), lthy) = let val ((induct_thms, induct_thm, induct_attrs), (fold_thmss, fold_attrs), @@ -1419,7 +1429,7 @@ val induct_type_attr = Attrib.internal o K o Induct.induct_type; val simp_thmss = - mk_simp_thmss ctr_sugars fold_thmss rec_thmss mapsx rel_injects rel_distincts setss; + mk_simp_thmss ctr_sugars fold_thmss rec_thmss mapss rel_injects rel_distincts setss; val common_notes = (if nn > 1 then [(inductN, [induct_thm], induct_attrs)] else []) @@ -1435,11 +1445,11 @@ lthy |> Local_Theory.notes (common_notes @ notes) |> snd |> register_fp_sugars Least_FP pre_bnfs nested_bnfs nesting_bnfs fp_res ctr_defss ctr_sugars - iterss [induct_thm] (transpose [fold_thmss, rec_thmss]) + iterss mapss [induct_thm] (transpose [fold_thmss, rec_thmss]) [] [] end; fun derive_and_note_coinduct_coiters_thms_for_types - ((((mapsx, rel_injects, rel_distincts, setss), (_, _, ctr_defss, ctr_sugars)), + ((((mapss, rel_injects, rel_distincts, setss), (_, _, ctr_defss, ctr_sugars)), (coiterss, coiter_defss)), lthy) = let val (([(coinduct_thms, coinduct_thm), (strong_coinduct_thms, strong_coinduct_thm)], @@ -1448,11 +1458,14 @@ (safe_unfold_thmss, safe_corec_thmss), (disc_unfold_thmss, disc_corec_thmss, disc_coiter_attrs), (disc_unfold_iff_thmss, disc_corec_iff_thmss, disc_coiter_iff_attrs), - (sel_unfold_thmss, sel_corec_thmss, sel_coiter_attrs)) = + (sel_unfold_thmsss, sel_corec_thmsss, sel_coiter_attrs)) = derive_coinduct_coiters_thms_for_types pre_bnfs (the coiters_args_types) xtor_co_induct dtor_injects dtor_ctors xtor_co_iter_thmss nesting_bnfs fpTs Cs kss mss ns ctr_defss ctr_sugars coiterss coiter_defss (Proof_Context.export lthy' no_defs_lthy) lthy; + val sel_unfold_thmss = map flat sel_unfold_thmsss; + val sel_corec_thmss = map flat sel_corec_thmsss; + val coinduct_type_attr = Attrib.internal o K o Induct.coinduct_type; fun flat_coiter_thms coiters disc_coiters sel_coiters = @@ -1462,7 +1475,7 @@ mk_simp_thmss ctr_sugars (map3 flat_coiter_thms safe_unfold_thmss disc_unfold_thmss sel_unfold_thmss) (map3 flat_coiter_thms safe_corec_thmss disc_corec_thmss sel_corec_thmss) - mapsx rel_injects rel_distincts setss; + mapss rel_injects rel_distincts setss; val anonymous_notes = [(flat safe_unfold_thmss @ flat safe_corec_thmss, simp_attrs)] @@ -1494,8 +1507,9 @@ lthy |> Local_Theory.notes (anonymous_notes @ common_notes @ notes) |> snd |> register_fp_sugars Greatest_FP pre_bnfs nested_bnfs nesting_bnfs fp_res ctr_defss - ctr_sugars coiterss [coinduct_thm, strong_coinduct_thm] - (transpose [unfold_thmss, corec_thmss]) + ctr_sugars coiterss mapss [coinduct_thm, strong_coinduct_thm] + (transpose [unfold_thmss, corec_thmss]) (transpose [disc_unfold_thmss, disc_corec_thmss]) + (transpose [sel_unfold_thmsss, sel_corec_thmsss]) end; val lthy'' = lthy' diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/BNF/Tools/bnf_fp_n2m_sugar.ML --- a/src/HOL/BNF/Tools/bnf_fp_n2m_sugar.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/BNF/Tools/bnf_fp_n2m_sugar.ML Tue Sep 10 20:11:01 2013 +0200 @@ -59,6 +59,7 @@ end; val ctr_defss = map (of_fp_sugar #ctr_defss) fp_sugars0; + val mapss = map (of_fp_sugar #mapss) fp_sugars0; val ctr_sugars0 = map2 target_ctr_sugar_of_fp_sugar fpTs fp_sugars0; val ctrss = map #ctrs ctr_sugars0; @@ -145,27 +146,33 @@ val ctr_sugars = map inst_ctr_sugar ctr_sugars0; - val (co_inducts, un_fold_thmss, co_rec_thmss) = + val (co_inducts, un_fold_thmss, co_rec_thmss, disc_unfold_thmss, disc_corec_thmss, + sel_unfold_thmsss, sel_corec_thmsss) = if fp = Least_FP then derive_induct_iters_thms_for_types pre_bnfs (the iters_args_types) xtor_co_induct xtor_co_iter_thmss nesting_bnfs nested_bnfs fpTs Cs Xs ctrXs_Tsss ctrss ctr_defss co_iterss co_iter_defss lthy |> (fn ((_, induct, _), (fold_thmss, _), (rec_thmss, _)) => - ([induct], fold_thmss, rec_thmss)) + ([induct], fold_thmss, rec_thmss, [], [], [], [])) else derive_coinduct_coiters_thms_for_types pre_bnfs (the coiters_args_types) xtor_co_induct dtor_injects dtor_ctors xtor_co_iter_thmss nesting_bnfs fpTs Cs kss mss ns ctr_defss ctr_sugars co_iterss co_iter_defss (Proof_Context.export lthy no_defs_lthy) lthy - |> (fn ((coinduct_thms_pairs, _), (unfold_thmss, corec_thmss, _), _, _, _, _) => - (map snd coinduct_thms_pairs, unfold_thmss, corec_thmss)); + |> (fn ((coinduct_thms_pairs, _), (unfold_thmss, corec_thmss, _), _, _, + (disc_unfold_thmss, disc_corec_thmss, _), + (sel_unfold_thmsss, sel_corec_thmsss, _)) => + (map snd coinduct_thms_pairs, unfold_thmss, corec_thmss, disc_unfold_thmss, + disc_corec_thmss, sel_unfold_thmsss, sel_corec_thmsss)); val phi = Proof_Context.export_morphism no_defs_lthy no_defs_lthy0; fun mk_target_fp_sugar (kk, T) = {T = T, fp = fp, index = kk, pre_bnfs = pre_bnfs, nested_bnfs = nested_bnfs, nesting_bnfs = nesting_bnfs, fp_res = fp_res, ctr_defss = ctr_defss, - ctr_sugars = ctr_sugars, co_inducts = co_inducts, co_iterss = co_iterss, - co_iter_thmsss = transpose [un_fold_thmss, co_rec_thmss]} + ctr_sugars = ctr_sugars, co_iterss = co_iterss, mapss = mapss, co_inducts = co_inducts, + co_iter_thmsss = transpose [un_fold_thmss, co_rec_thmss], + disc_co_itersss = transpose [disc_unfold_thmss, disc_corec_thmss], + sel_co_iterssss = transpose [sel_unfold_thmsss, sel_corec_thmsss]} |> morph_fp_sugar phi; in ((true, map_index mk_target_fp_sugar fpTs), lthy) diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/BNF/Tools/bnf_fp_rec_sugar_tactics.ML --- a/src/HOL/BNF/Tools/bnf_fp_rec_sugar_tactics.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/BNF/Tools/bnf_fp_rec_sugar_tactics.ML Tue Sep 10 20:11:01 2013 +0200 @@ -54,8 +54,8 @@ fun mk_primcorec_disc_tac ctxt defs disc k m exclsss = mk_primcorec_prelude ctxt defs disc THEN mk_primcorec_cases_tac ctxt k m exclsss; -fun mk_primcorec_eq_tac ctxt defs sel k m exclsss maps map_idents map_comps = - mk_primcorec_prelude ctxt defs (sel RS trans) THEN mk_primcorec_cases_tac ctxt k m exclsss THEN +fun mk_primcorec_eq_tac ctxt defs eq_thm k m exclsss maps map_idents map_comps = + mk_primcorec_prelude ctxt defs (eq_thm RS trans) THEN mk_primcorec_cases_tac ctxt k m exclsss THEN unfold_thms_tac ctxt (@{thms if_if_True if_if_False if_True if_False o_def split_def sum.cases} @ maps @ map_comps @ map_idents) THEN HEADGOAL (rtac refl); diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/BNF/Tools/bnf_fp_rec_sugar_util.ML --- a/src/HOL/BNF/Tools/bnf_fp_rec_sugar_util.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/BNF/Tools/bnf_fp_rec_sugar_util.ML Tue Sep 10 20:11:01 2013 +0200 @@ -31,7 +31,10 @@ sels: term list, pred: int option, calls: corec_call list, - corec_thm: thm} + collapse: thm, + corec_thm: thm, + disc_corec: thm, + sel_corecs: thm list} type rec_spec = {recx: term, @@ -41,6 +44,9 @@ type corec_spec = {corec: term, + nested_maps: thm list, + nested_map_idents: thm list, + nested_map_comps: thm list, ctr_specs: corec_ctr_spec list} val massage_indirect_rec_call: Proof.context -> (term -> bool) -> (typ -> typ -> term -> term) -> @@ -90,7 +96,10 @@ sels: term list, pred: int option, calls: corec_call list, - corec_thm: thm}; + collapse: thm, + corec_thm: thm, + disc_corec: thm, + sel_corecs: thm list}; type rec_spec = {recx: term, @@ -100,6 +109,9 @@ type corec_spec = {corec: term, + nested_maps: thm list, + nested_map_idents: thm list, + nested_map_comps: thm list, ctr_specs: corec_ctr_spec list}; val id_def = @{thm id_def}; @@ -260,6 +272,18 @@ fun find_index_eq hs h = find_index (curry (op =) h) hs; +(*FIXME: remove special cases for products and sum once they are registered as datatypes*) +fun map_thms_of_typ ctxt (Type (s, _)) = + if s = @{type_name prod} then + @{thms map_pair_simp} + else if s = @{type_name sum} then + @{thms sum_map.simps} + else + (case fp_sugar_of ctxt s of + SOME {index, mapss, ...} => nth mapss index + | NONE => []) + | map_thms_of_typ _ _ = []; + val lose_co_rec = false (*FIXME: try true?*); fun rec_specs_of bs arg_Ts res_Ts get_indices callssss0 lthy = @@ -348,8 +372,8 @@ val thy = Proof_Context.theory_of lthy; val ((nontriv, missing_res_Ts, perm0_kks, - fp_sugars as {fp_res = {xtor_co_iterss = dtor_coiters1 :: _, ...}, - co_inducts = coinduct_thms, ...} :: _), lthy') = + fp_sugars as {nested_bnfs, fp_res = {xtor_co_iterss = dtor_coiters1 :: _, ...}, + co_inducts = coinduct_thms, ...} :: _), lthy') = nested_to_mutual_fps lose_co_rec Greatest_FP bs res_Ts get_indices callssss0 lthy; val perm_fp_sugars = sort (int_ord o pairself #index) fp_sugars; @@ -407,27 +431,38 @@ else No_Corec) g_i | call_of _ [q_i] [g_i, g_i'] _ = Direct_Corec (q_i, g_i, g_i'); - fun mk_ctr_spec ctr disc sels p_ho q_iss f_iss f_Tss corec_thm = + fun mk_ctr_spec ctr disc sels p_ho q_iss f_iss f_Tss collapse corec_thm disc_corec sel_corecs = let val nullary = not (can dest_funT (fastype_of ctr)) in {ctr = substA ctr, disc = substA disc, sels = map substA sels, pred = p_ho, - calls = map3 (call_of nullary) q_iss f_iss f_Tss, corec_thm = corec_thm} + calls = map3 (call_of nullary) q_iss f_iss f_Tss, collapse = collapse, + corec_thm = corec_thm, disc_corec = disc_corec, sel_corecs = sel_corecs} end; - fun mk_ctr_specs index ctr_sugars p_is q_isss f_isss f_Tsss coiter_thmsss = + fun mk_ctr_specs index ctr_sugars p_is q_isss f_isss f_Tsss coiter_thmsss disc_coitersss + sel_coiterssss = let val ctrs = #ctrs (nth ctr_sugars index); val discs = #discs (nth ctr_sugars index); val selss = #selss (nth ctr_sugars index); val p_ios = map SOME p_is @ [NONE]; - val corec_thmss = co_rec_of (nth coiter_thmsss index); + val collapses = #collapses (nth ctr_sugars index); + val corec_thms = co_rec_of (nth coiter_thmsss index); + val disc_corecs = co_rec_of (nth disc_coitersss index); + val sel_corecss = co_rec_of (nth sel_coiterssss index); in - map8 mk_ctr_spec ctrs discs selss p_ios q_isss f_isss f_Tsss corec_thmss + map11 mk_ctr_spec ctrs discs selss p_ios q_isss f_isss f_Tsss collapses corec_thms + disc_corecs sel_corecss end; - fun mk_spec {T, index, ctr_sugars, co_iterss = coiterss, co_iter_thmsss = coiter_thmsss, ...} + fun mk_spec {T, index, ctr_sugars, co_iterss = coiterss, co_iter_thmsss = coiter_thmsss, + disc_co_itersss = disc_coitersss, sel_co_iterssss = sel_coiterssss, ...} p_is q_isss f_isss f_Tsss = {corec = mk_co_iter thy Greatest_FP (substAT T) perm_Cs' (co_rec_of (nth coiterss index)), - ctr_specs = mk_ctr_specs index ctr_sugars p_is q_isss f_isss f_Tsss coiter_thmsss}; + nested_maps = maps (map_thms_of_typ lthy o T_of_bnf) nested_bnfs, + nested_map_idents = map (unfold_thms lthy [id_def] o map_id0_of_bnf) nested_bnfs, + nested_map_comps = map map_comp_of_bnf nested_bnfs, + ctr_specs = mk_ctr_specs index ctr_sugars p_is q_isss f_isss f_Tsss coiter_thmsss + disc_coitersss sel_coiterssss}; in ((nontriv, map5 mk_spec fp_sugars p_iss q_issss f_issss f_Tssss, missing_res_Ts, co_induct_of coinduct_thms, strong_co_induct_of coinduct_thms, co_induct_of coinduct_thmss, diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/IMP/export.sh --- a/src/HOL/IMP/export.sh Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/IMP/export.sh Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Author: Gerwin Klein # diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Multivariate_Analysis/Integration.thy --- a/src/HOL/Multivariate_Analysis/Integration.thy Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Multivariate_Analysis/Integration.thy Tue Sep 10 20:11:01 2013 +0200 @@ -2999,7 +2999,7 @@ done lemma integrable_setsum: - "finite t \ \a \ t.(f a) integrable_on s \ (\x. setsum (\a. f a x) t) integrable_on s" + "finite t \ \a \ t. (f a) integrable_on s \ (\x. setsum (\a. f a x) t) integrable_on s" unfolding integrable_on_def apply (drule bchoice) using has_integral_setsum[of t] @@ -3141,7 +3141,12 @@ proof (rule, rule) case goal1 then have "e/2 > 0" by auto - then guess d apply- apply(drule y[rule_format]) by(erule exE,erule conjE) note d=this[rule_format] + then guess d + apply - + apply (drule y[rule_format]) + apply (elim exE conjE) + done + note d=this[rule_format] show ?case apply (rule_tac x=d in exI) apply rule @@ -3215,7 +3220,8 @@ by auto guess N2 using y[OF *] .. note N2=this show "\d. gauge d \ - (\p. p tagged_division_of {a..b} \ d fine p \ norm ((\(x, k)\p. content k *\<^sub>R f x) - y) < e)" + (\p. p tagged_division_of {a..b} \ d fine p \ + norm ((\(x, k)\p. content k *\<^sub>R f x) - y) < e)" apply (rule_tac x="d (N1 + N2)" in exI) apply rule defer @@ -3315,7 +3321,8 @@ shows "content(k1 \ {x. x\k \ c}) = 0" proof - note d=division_ofD[OF assms(1)] - have *: "\a b::'a. \ c. (content({a..b} \ {x. x\k \ c}) = 0 \ interior({a..b} \ {x. x\k \ c}) = {})" + have *: "\(a::'a) b c. content ({a..b} \ {x. x\k \ c}) = 0 \ + interior({a..b} \ {x. x\k \ c}) = {}" unfolding interval_split[OF k] content_eq_0_interior by auto guess u1 v1 using d(4)[OF assms(2)] apply-by(erule exE)+ note uv1=this guess u2 v2 using d(4)[OF assms(3)] apply-by(erule exE)+ note uv2=this @@ -3344,8 +3351,8 @@ have *: "\a b::'a. \c. content({a..b} \ {x. x\k \ c}) = 0 \ interior({a..b} \ {x. x\k \ c}) = {}" unfolding interval_split[OF k] content_eq_0_interior by auto - guess u1 v1 using d(4)[OF assms(2)] apply-by(erule exE)+ note uv1=this - guess u2 v2 using d(4)[OF assms(3)] apply-by(erule exE)+ note uv2=this + guess u1 v1 using d(4)[OF assms(2)] by (elim exE) note uv1=this + guess u2 v2 using d(4)[OF assms(3)] by (elim exE) note uv2=this have **: "\s t u. s \ t = {} \ u \ s \ u \ t \ u = {}" by auto show ?thesis @@ -3361,7 +3368,7 @@ lemma tagged_division_split_left_inj: fixes x1 :: "'a::ordered_euclidean_space" assumes "d tagged_division_of i" - and "(x1,k1) \ d" + and "(x1, k1) \ d" and "(x2, k2) \ d" and "k1 \ k2" and "k1 \ {x. x\k \ c} = k2 \ {x. x\k \ c}" @@ -3389,7 +3396,7 @@ and "k1 \ k2" and "k1 \ {x. x\k \ c} = k2 \ {x. x\k \ c}" and k: "k \ Basis" - shows "content(k1 \ {x. x\k \ c}) = 0" + shows "content (k1 \ {x. x\k \ c}) = 0" proof - have *: "\a b c. (a,b) \ c \ b \ snd ` c" unfolding image_iff @@ -3472,8 +3479,10 @@ case goal1 then have e: "e/2 > 0" by auto - guess d1 using has_integralD[OF assms(1)[unfolded interval_split[OF k]] e] . note d1=this[unfolded interval_split[symmetric,OF k]] - guess d2 using has_integralD[OF assms(2)[unfolded interval_split[OF k]] e] . note d2=this[unfolded interval_split[symmetric,OF k]] + guess d1 using has_integralD[OF assms(1)[unfolded interval_split[OF k]] e] . + note d1=this[unfolded interval_split[symmetric,OF k]] + guess d2 using has_integralD[OF assms(2)[unfolded interval_split[OF k]] e] . + note d2=this[unfolded interval_split[symmetric,OF k]] let ?d = "\x. if x\k = c then (d1 x \ d2 x) else ball x (abs(x\k - c)) \ d1 x \ d2 x" show ?case apply (rule_tac x="?d" in exI) @@ -3486,7 +3495,8 @@ show "gauge ?d" using d1(1) d2(1) unfolding gauge_def by auto fix p - assume "p tagged_division_of {a..b}" "?d fine p" note p = this tagged_division_ofD[OF this(1)] + assume "p tagged_division_of {a..b}" "?d fine p" + note p = this tagged_division_ofD[OF this(1)] have lem0: "\x kk. (x, kk) \ p \ kk \ {x. x\k \ c} \ {} \ x\k \ c" "\x kk. (x, kk) \ p \ kk \ {x. x\k \ c} \ {} \ x\k \ c" @@ -3701,7 +3711,8 @@ qed also note setsum_addf[symmetric] also have *: "\x. x \ p \ - (\(x,ka). content (ka \ {x. x \ k \ c}) *\<^sub>R f x) x + (\(x,ka). content (ka \ {x. c \ x \ k}) *\<^sub>R f x) x = + (\(x,ka). content (ka \ {x. x \ k \ c}) *\<^sub>R f x) x + + (\(x,ka). content (ka \ {x. c \ x \ k}) *\<^sub>R f x) x = (\(x,ka). content ka *\<^sub>R f x) x" unfolding split_paired_all split_conv proof - @@ -3728,170 +3739,296 @@ subsection {* A sort of converse, integrability on subintervals. *} -lemma tagged_division_union_interval: fixes a::"'a::ordered_euclidean_space" - assumes "p1 tagged_division_of ({a..b} \ {x. x\k \ (c::real)})" "p2 tagged_division_of ({a..b} \ {x. x\k \ c})" - and k:"k\Basis" +lemma tagged_division_union_interval: + fixes a :: "'a::ordered_euclidean_space" + assumes "p1 tagged_division_of ({a..b} \ {x. x\k \ (c::real)})" + and "p2 tagged_division_of ({a..b} \ {x. x\k \ c})" + and k: "k \ Basis" shows "(p1 \ p2) tagged_division_of ({a..b})" -proof- have *:"{a..b} = ({a..b} \ {x. x\k \ c}) \ ({a..b} \ {x. x\k \ c})" by auto - show ?thesis apply(subst *) apply(rule tagged_division_union[OF assms(1-2)]) - unfolding interval_split[OF k] interior_closed_interval using k - by(auto simp add: eucl_less[where 'a='a] elim!: ballE[where x=k]) qed - -lemma has_integral_separate_sides: fixes f::"'a::ordered_euclidean_space \ 'b::real_normed_vector" - assumes "(f has_integral i) ({a..b})" "e>0" and k:"k\Basis" - obtains d where "gauge d" "(\p1 p2. p1 tagged_division_of ({a..b} \ {x. x\k \ c}) \ d fine p1 \ - p2 tagged_division_of ({a..b} \ {x. x\k \ c}) \ d fine p2 - \ norm((setsum (\(x,k). content k *\<^sub>R f x) p1 + - setsum (\(x,k). content k *\<^sub>R f x) p2) - i) < e)" -proof- guess d using has_integralD[OF assms(1-2)] . note d=this - show ?thesis apply(rule that[of d]) apply(rule d) apply(rule,rule,rule,(erule conjE)+) - proof- fix p1 p2 assume "p1 tagged_division_of {a..b} \ {x. x \ k \ c}" "d fine p1" note p1=tagged_division_ofD[OF this(1)] this - assume "p2 tagged_division_of {a..b} \ {x. c \ x \ k}" "d fine p2" note p2=tagged_division_ofD[OF this(1)] this +proof - + have *: "{a..b} = ({a..b} \ {x. x\k \ c}) \ ({a..b} \ {x. x\k \ c})" + by auto + show ?thesis + apply (subst *) + apply (rule tagged_division_union[OF assms(1-2)]) + unfolding interval_split[OF k] interior_closed_interval + using k + apply (auto simp add: eucl_less[where 'a='a] elim!: ballE[where x=k]) + done +qed + +lemma has_integral_separate_sides: + fixes f :: "'a::ordered_euclidean_space \ 'b::real_normed_vector" + assumes "(f has_integral i) ({a..b})" + and "e > 0" + and k: "k \ Basis" + obtains d where "gauge d" + "\p1 p2. p1 tagged_division_of ({a..b} \ {x. x\k \ c}) \ d fine p1 \ + p2 tagged_division_of ({a..b} \ {x. x\k \ c}) \ d fine p2 \ + norm ((setsum (\(x,k). content k *\<^sub>R f x) p1 + setsum (\(x,k). content k *\<^sub>R f x) p2) - i) < e" +proof - + guess d using has_integralD[OF assms(1-2)] . note d=this + show ?thesis + apply (rule that[of d]) + apply (rule d) + apply rule + apply rule + apply rule + apply (elim conjE) + proof - + fix p1 p2 + assume "p1 tagged_division_of {a..b} \ {x. x \ k \ c}" "d fine p1" + note p1=tagged_division_ofD[OF this(1)] this + assume "p2 tagged_division_of {a..b} \ {x. c \ x \ k}" "d fine p2" + note p2=tagged_division_ofD[OF this(1)] this note tagged_division_union_interval[OF p1(7) p2(7)] note p12 = tagged_division_ofD[OF this] this - have "norm ((\(x, k)\p1. content k *\<^sub>R f x) + (\(x, k)\p2. content k *\<^sub>R f x) - i) = norm ((\(x, k)\p1 \ p2. content k *\<^sub>R f x) - i)" - apply(subst setsum_Un_zero) apply(rule p1 p2)+ apply(rule) unfolding split_paired_all split_conv - proof- fix a b assume ab:"(a,b) \ p1 \ p2" - have "(a,b) \ p1" using ab by auto from p1(4)[OF this] guess u v apply-by(erule exE)+ note uv =this - have "b \ {x. x\k = c}" using ab p1(3)[of a b] p2(3)[of a b] by fastforce - moreover have "interior {x::'a. x \ k = c} = {}" - proof(rule ccontr) case goal1 then obtain x where x:"x\interior {x::'a. x\k = c}" by auto + have "norm ((\(x, k)\p1. content k *\<^sub>R f x) + (\(x, k)\p2. content k *\<^sub>R f x) - i) = + norm ((\(x, k)\p1 \ p2. content k *\<^sub>R f x) - i)" + apply (subst setsum_Un_zero) + apply (rule p1 p2)+ + apply rule + unfolding split_paired_all split_conv + proof - + fix a b + assume ab: "(a, b) \ p1 \ p2" + have "(a, b) \ p1" + using ab by auto + from p1(4)[OF this] guess u v by (elim exE) note uv = this + have "b \ {x. x\k = c}" + using ab p1(3)[of a b] p2(3)[of a b] by fastforce + moreover + have "interior {x::'a. x \ k = c} = {}" + proof (rule ccontr) + assume "\ ?thesis" + then obtain x where x: "x \ interior {x::'a. x\k = c}" + by auto then guess e unfolding mem_interior .. note e=this - have x:"x\k = c" using x interior_subset by fastforce - have *:"\i. i\Basis \ \(x - (x + (e / 2) *\<^sub>R k)) \ i\ - = (if i = k then e/2 else 0)" using e k by (auto simp: inner_simps inner_not_same_Basis) + have x: "x\k = c" + using x interior_subset by fastforce + have *: "\i. i \ Basis \ \(x - (x + (e / 2) *\<^sub>R k)) \ i\ = (if i = k then e/2 else 0)" + using e k by (auto simp: inner_simps inner_not_same_Basis) have "(\i\Basis. \(x - (x + (e / 2 ) *\<^sub>R k)) \ i\) = - (\i\Basis. (if i = k then e / 2 else 0))" apply(rule setsum_cong2) apply(subst *) by auto - also have "... < e" apply(subst setsum_delta) using e by auto + (\i\Basis. (if i = k then e / 2 else 0))" + apply (rule setsum_cong2) + apply (subst *) + apply auto + done + also have "\ < e" + apply (subst setsum_delta) + using e + apply auto + done finally have "x + (e/2) *\<^sub>R k \ ball x e" unfolding mem_ball dist_norm by(rule le_less_trans[OF norm_le_l1]) - hence "x + (e/2) *\<^sub>R k \ {x. x\k = c}" using e by auto - thus False unfolding mem_Collect_eq using e x k by (auto simp: inner_simps) - qed ultimately have "content b = 0" unfolding uv content_eq_0_interior apply-apply(drule interior_mono) by auto - thus "content b *\<^sub>R f a = 0" by auto + then have "x + (e/2) *\<^sub>R k \ {x. x\k = c}" + using e by auto + then show False + unfolding mem_Collect_eq using e x k by (auto simp: inner_simps) + qed + ultimately have "content b = 0" + unfolding uv content_eq_0_interior + apply - + apply (drule interior_mono) + apply auto + done + then show "content b *\<^sub>R f a = 0" + by auto qed auto - also have "\ < e" by(rule k d(2) p12 fine_union p1 p2)+ - finally show "norm ((\(x, k)\p1. content k *\<^sub>R f x) + (\(x, k)\p2. content k *\<^sub>R f x) - i) < e" . qed qed + also have "\ < e" + by (rule k d(2) p12 fine_union p1 p2)+ + finally show "norm ((\(x, k)\p1. content k *\<^sub>R f x) + (\(x, k)\p2. content k *\<^sub>R f x) - i) < e" . + qed +qed lemma integrable_split[intro]: - fixes f::"'a::ordered_euclidean_space \ 'b::{real_normed_vector,complete_space}" - assumes "f integrable_on {a..b}" and k:"k\Basis" - shows "f integrable_on ({a..b} \ {x. x\k \ c})" (is ?t1) and "f integrable_on ({a..b} \ {x. x\k \ c})" (is ?t2) -proof- guess y using assms(1) unfolding integrable_on_def .. note y=this + fixes f :: "'a::ordered_euclidean_space \ 'b::{real_normed_vector,complete_space}" + assumes "f integrable_on {a..b}" + and k: "k \ Basis" + shows "f integrable_on ({a..b} \ {x. x\k \ c})" (is ?t1) + and "f integrable_on ({a..b} \ {x. x\k \ c})" (is ?t2) +proof - + guess y using assms(1) unfolding integrable_on_def .. note y=this def b' \ "\i\Basis. (if i = k then min (b\k) c else b\i)*\<^sub>R i::'a" def a' \ "\i\Basis. (if i = k then max (a\k) c else a\i)*\<^sub>R i::'a" - show ?t1 ?t2 unfolding interval_split[OF k] integrable_cauchy unfolding interval_split[symmetric,OF k] - proof(rule_tac[!] allI impI)+ fix e::real assume "e>0" hence "e/2>0" by auto + show ?t1 ?t2 + unfolding interval_split[OF k] integrable_cauchy + unfolding interval_split[symmetric,OF k] + proof (rule_tac[!] allI impI)+ + fix e :: real + assume "e > 0" + then have "e/2>0" + by auto from has_integral_separate_sides[OF y this k,of c] guess d . note d=this[rule_format] - let ?P = "\A. \d. gauge d \ (\p1 p2. p1 tagged_division_of {a..b} \ A \ d fine p1 - \ p2 tagged_division_of {a..b} \ A \ d fine p2 \ + let ?P = "\A. \d. gauge d \ (\p1 p2. p1 tagged_division_of {a..b} \ A \ d fine p1 \ + p2 tagged_division_of {a..b} \ A \ d fine p2 \ norm ((\(x, k)\p1. content k *\<^sub>R f x) - (\(x, k)\p2. content k *\<^sub>R f x)) < e)" - show "?P {x. x \ k \ c}" apply(rule_tac x=d in exI) apply(rule,rule d) apply(rule,rule,rule) - proof- fix p1 p2 assume as:"p1 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p1 - \ p2 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p2" + show "?P {x. x \ k \ c}" + apply (rule_tac x=d in exI) + apply rule + apply (rule d) + apply rule + apply rule + apply rule + proof - + fix p1 p2 + assume as: "p1 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p1 \ + p2 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p2" show "norm ((\(x, k)\p1. content k *\<^sub>R f x) - (\(x, k)\p2. content k *\<^sub>R f x)) < e" - proof- guess p using fine_division_exists[OF d(1), of a' b] . note p=this - show ?thesis using norm_triangle_half_l[OF d(2)[of p1 p] d(2)[of p2 p]] + proof - + guess p using fine_division_exists[OF d(1), of a' b] . note p=this + show ?thesis + using norm_triangle_half_l[OF d(2)[of p1 p] d(2)[of p2 p]] using as unfolding interval_split[OF k] b'_def[symmetric] a'_def[symmetric] - using p using assms by(auto simp add:algebra_simps) - qed qed - show "?P {x. x \ k \ c}" apply(rule_tac x=d in exI) apply(rule,rule d) apply(rule,rule,rule) - proof- fix p1 p2 assume as:"p1 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p1 - \ p2 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p2" + using p using assms + by (auto simp add: algebra_simps) + qed + qed + show "?P {x. x \ k \ c}" + apply (rule_tac x=d in exI) + apply rule + apply (rule d) + apply rule + apply rule + apply rule + proof - + fix p1 p2 + assume as: "p1 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p1 \ + p2 tagged_division_of {a..b} \ {x. x \ k \ c} \ d fine p2" show "norm ((\(x, k)\p1. content k *\<^sub>R f x) - (\(x, k)\p2. content k *\<^sub>R f x)) < e" - proof- guess p using fine_division_exists[OF d(1), of a b'] . note p=this - show ?thesis using norm_triangle_half_l[OF d(2)[of p p1] d(2)[of p p2]] - using as unfolding interval_split[OF k] b'_def[symmetric] a'_def[symmetric] - using p using assms by(auto simp add:algebra_simps) qed qed qed qed + proof - + guess p using fine_division_exists[OF d(1), of a b'] . note p=this + show ?thesis + using norm_triangle_half_l[OF d(2)[of p p1] d(2)[of p p2]] + using as + unfolding interval_split[OF k] b'_def[symmetric] a'_def[symmetric] + using p + using assms + by (auto simp add:algebra_simps) + qed + qed + qed +qed + subsection {* Generalized notion of additivity. *} definition "neutral opp = (SOME x. \y. opp x y = y \ opp y x = y)" -definition operative :: "('a \ 'a \ 'a) \ (('b::ordered_euclidean_space) set \ 'a) \ bool" where - "operative opp f \ - (\a b. content {a..b} = 0 \ f {a..b} = neutral(opp)) \ - (\a b c. \k\Basis. f({a..b}) = - opp (f({a..b} \ {x. x\k \ c})) - (f({a..b} \ {x. x\k \ c})))" - -lemma operativeD[dest]: fixes type::"'a::ordered_euclidean_space" assumes "operative opp f" - shows "\a b. content {a..b} = 0 \ f {a..b::'a} = neutral(opp)" - "\a b c k. k\Basis \ f({a..b}) = opp (f({a..b} \ {x. x\k \ c})) (f({a..b} \ {x. x\k \ c}))" +definition operative :: "('a \ 'a \ 'a) \ (('b::ordered_euclidean_space) set \ 'a) \ bool" + where "operative opp f \ + (\a b. content {a..b} = 0 \ f {a..b} = neutral opp) \ + (\a b c. \k\Basis. f {a..b} = opp (f({a..b} \ {x. x\k \ c})) (f ({a..b} \ {x. x\k \ c})))" + +lemma operativeD[dest]: + fixes type :: "'a::ordered_euclidean_space" + assumes "operative opp f" + shows "\a b::'a. content {a..b} = 0 \ f {a..b} = neutral opp" + and "\a b c k. k \ Basis \ f {a..b} = + opp (f ({a..b} \ {x. x\k \ c})) (f ({a..b} \ {x. x\k \ c}))" using assms unfolding operative_def by auto -lemma operative_trivial: - "operative opp f \ content({a..b}) = 0 \ f({a..b}) = neutral opp" +lemma operative_trivial: "operative opp f \ content({a..b}) = 0 \ f({a..b}) = neutral opp" unfolding operative_def by auto -lemma property_empty_interval: - "(\a b. content({a..b}) = 0 \ P({a..b})) \ P {}" +lemma property_empty_interval: "\a b. content {a..b} = 0 \ P {a..b} \ P {}" using content_empty unfolding empty_as_interval by auto lemma operative_empty: "operative opp f \ f {} = neutral opp" - unfolding operative_def apply(rule property_empty_interval) by auto + unfolding operative_def by (rule property_empty_interval) auto + subsection {* Using additivity of lifted function to encode definedness. *} -lemma forall_option: "(\x. P x) \ P None \ (\x. P(Some x))" - by (metis option.nchotomy) - -lemma exists_option: "(\x. P x) \ P None \ (\x. P(Some x))" +lemma forall_option: "(\x. P x) \ P None \ (\x. P (Some x))" by (metis option.nchotomy) -fun lifted -where - "lifted (opp::'a\'a\'b) (Some x) (Some y) = Some (opp x y)" +lemma exists_option: "(\x. P x) \ P None \ (\x. P (Some x))" + by (metis option.nchotomy) + +fun lifted where + "lifted (opp :: 'a \ 'a \ 'b) (Some x) (Some y) = Some (opp x y)" | "lifted opp None _ = (None::'b option)" | "lifted opp _ None = None" lemma lifted_simp_1[simp]: "lifted opp v None = None" by (induct v) auto -definition "monoidal opp \ (\x y. opp x y = opp y x) \ - (\x y z. opp x (opp y z) = opp (opp x y) z) \ - (\x. opp (neutral opp) x = x)" +definition "monoidal opp \ + (\x y. opp x y = opp y x) \ + (\x y z. opp x (opp y z) = opp (opp x y) z) \ + (\x. opp (neutral opp) x = x)" lemma monoidalI: assumes "\x y. opp x y = opp y x" - "\x y z. opp x (opp y z) = opp (opp x y) z" - "\x. opp (neutral opp) x = x" shows "monoidal opp" + and "\x y z. opp x (opp y z) = opp (opp x y) z" + and "\x. opp (neutral opp) x = x" + shows "monoidal opp" unfolding monoidal_def using assms by fastforce lemma monoidal_ac: assumes "monoidal opp" - shows "opp (neutral opp) a = a" "opp a (neutral opp) = a" "opp a b = opp b a" - "opp (opp a b) c = opp a (opp b c)" "opp a (opp b c) = opp b (opp a c)" + shows "opp (neutral opp) a = a" + and "opp a (neutral opp) = a" + and "opp a b = opp b a" + and "opp (opp a b) c = opp a (opp b c)" + and "opp a (opp b c) = opp b (opp a c)" using assms unfolding monoidal_def by metis+ -lemma monoidal_simps[simp]: assumes "monoidal opp" - shows "opp (neutral opp) a = a" "opp a (neutral opp) = a" +lemma monoidal_simps[simp]: + assumes "monoidal opp" + shows "opp (neutral opp) a = a" + and "opp a (neutral opp) = a" using monoidal_ac[OF assms] by auto -lemma neutral_lifted[cong]: assumes "monoidal opp" - shows "neutral (lifted opp) = Some(neutral opp)" - apply(subst neutral_def) apply(rule some_equality) apply(rule,induct_tac y) prefer 3 +lemma neutral_lifted[cong]: + assumes "monoidal opp" + shows "neutral (lifted opp) = Some (neutral opp)" + apply (subst neutral_def) + apply (rule some_equality) + apply rule + apply (induct_tac y) + prefer 3 proof - - fix x assume "\y. lifted opp x y = y \ lifted opp y x = y" - thus "x = Some (neutral opp)" - apply(induct x) defer - apply rule apply(subst neutral_def) apply(subst eq_commute,rule some_equality) - apply(rule,erule_tac x="Some y" in allE) defer apply(erule_tac x="Some x" in allE) + fix x + assume "\y. lifted opp x y = y \ lifted opp y x = y" + then show "x = Some (neutral opp)" + apply (induct x) + defer + apply rule + apply (subst neutral_def) + apply (subst eq_commute) + apply(rule some_equality) + apply rule + apply (erule_tac x="Some y" in allE) + defer + apply (erule_tac x="Some x" in allE) apply auto done -qed(auto simp add:monoidal_ac[OF assms]) - -lemma monoidal_lifted[intro]: assumes "monoidal opp" shows "monoidal(lifted opp)" - unfolding monoidal_def forall_option neutral_lifted[OF assms] using monoidal_ac[OF assms] by auto +qed (auto simp add:monoidal_ac[OF assms]) + +lemma monoidal_lifted[intro]: + assumes "monoidal opp" + shows "monoidal (lifted opp)" + unfolding monoidal_def forall_option neutral_lifted[OF assms] + using monoidal_ac[OF assms] + by auto definition "support opp f s = {x. x\s \ f x \ neutral opp}" -definition "fold' opp e s \ (if finite s then Finite_Set.fold opp e s else e)" -definition "iterate opp s f \ fold' (\x a. opp (f x) a) (neutral opp) (support opp f s)" - -lemma support_subset[intro]:"support opp f s \ s" unfolding support_def by auto -lemma support_empty[simp]:"support opp f {} = {}" using support_subset[of opp f "{}"] by auto - -lemma comp_fun_commute_monoidal[intro]: assumes "monoidal opp" shows "comp_fun_commute opp" - unfolding comp_fun_commute_def using monoidal_ac[OF assms] by auto +definition "fold' opp e s = (if finite s then Finite_Set.fold opp e s else e)" +definition "iterate opp s f = fold' (\x a. opp (f x) a) (neutral opp) (support opp f s)" + +lemma support_subset[intro]: "support opp f s \ s" + unfolding support_def by auto + +lemma support_empty[simp]: "support opp f {} = {}" + using support_subset[of opp f "{}"] by auto + +lemma comp_fun_commute_monoidal[intro]: + assumes "monoidal opp" + shows "comp_fun_commute opp" + unfolding comp_fun_commute_def + using monoidal_ac[OF assms] + by auto lemma support_clauses: "\f g s. support opp f {} = {}" @@ -3902,98 +4039,190 @@ "\f g s. support opp f (s \ t) = (support opp f s) \ (support opp f t)" "\f g s. support opp f (s - t) = (support opp f s) - (support opp f t)" "\f g s. support opp g (f ` s) = f ` (support opp (g o f) s)" -unfolding support_def by auto - -lemma finite_support[intro]:"finite s \ finite (support opp f s)" unfolding support_def by auto -lemma iterate_empty[simp]:"iterate opp {} f = neutral opp" +lemma finite_support[intro]: "finite s \ finite (support opp f s)" + unfolding support_def by auto + +lemma iterate_empty[simp]: "iterate opp {} f = neutral opp" unfolding iterate_def fold'_def by auto -lemma iterate_insert[simp]: assumes "monoidal opp" "finite s" - shows "iterate opp (insert x s) f = (if x \ s then iterate opp s f else opp (f x) (iterate opp s f))" -proof(cases "x\s") case True hence *:"insert x s = s" by auto +lemma iterate_insert[simp]: + assumes "monoidal opp" + and "finite s" + shows "iterate opp (insert x s) f = + (if x \ s then iterate opp s f else opp (f x) (iterate opp s f))" +proof (cases "x \ s") + case True + then have *: "insert x s = s" + by auto show ?thesis unfolding iterate_def if_P[OF True] * by auto -next case False note x=this +next + case False + note x = this note * = comp_fun_commute.comp_comp_fun_commute [OF comp_fun_commute_monoidal[OF assms(1)]] - show ?thesis proof(cases "f x = neutral opp") - case True show ?thesis unfolding iterate_def if_not_P[OF x] support_clauses if_P[OF True] - unfolding True monoidal_simps[OF assms(1)] by auto - next case False show ?thesis unfolding iterate_def fold'_def if_not_P[OF x] support_clauses if_not_P[OF False] - apply(subst comp_fun_commute.fold_insert[OF * finite_support, simplified comp_def]) - using `finite s` unfolding support_def using False x by auto qed qed + show ?thesis + proof (cases "f x = neutral opp") + case True + show ?thesis + unfolding iterate_def if_not_P[OF x] support_clauses if_P[OF True] + unfolding True monoidal_simps[OF assms(1)] + by auto + next + case False + show ?thesis + unfolding iterate_def fold'_def if_not_P[OF x] support_clauses if_not_P[OF False] + apply (subst comp_fun_commute.fold_insert[OF * finite_support, simplified comp_def]) + using `finite s` + unfolding support_def + using False x + apply auto + done + qed +qed lemma iterate_some: - assumes "monoidal opp" "finite s" - shows "iterate (lifted opp) s (\x. Some(f x)) = Some (iterate opp s f)" using assms(2) -proof(induct s) case empty thus ?case using assms by auto -next case (insert x F) show ?case apply(subst iterate_insert) prefer 3 apply(subst if_not_P) - defer unfolding insert(3) lifted.simps apply rule using assms insert by auto qed + assumes "monoidal opp" + and "finite s" + shows "iterate (lifted opp) s (\x. Some(f x)) = Some (iterate opp s f)" + using assms(2) +proof (induct s) + case empty + then show ?case + using assms by auto +next + case (insert x F) + show ?case + apply (subst iterate_insert) + prefer 3 + apply (subst if_not_P) + defer + unfolding insert(3) lifted.simps + apply rule + using assms insert + apply auto + done +qed + + subsection {* Two key instances of additivity. *} -lemma neutral_add[simp]: - "neutral op + = (0::_::comm_monoid_add)" unfolding neutral_def - apply(rule some_equality) defer apply(erule_tac x=0 in allE) by auto +lemma neutral_add[simp]: "neutral op + = (0::'a::comm_monoid_add)" + unfolding neutral_def + apply (rule some_equality) + defer + apply (erule_tac x=0 in allE) + apply auto + done lemma operative_content[intro]: "operative (op +) content" - unfolding operative_def neutral_add apply safe - unfolding content_split[symmetric] .. + unfolding operative_def neutral_add + apply safe + unfolding content_split[symmetric] + apply rule + done lemma neutral_monoid: "neutral ((op +)::('a::comm_monoid_add) \ 'a \ 'a) = 0" by (rule neutral_add) (* FIXME: duplicate *) -lemma monoidal_monoid[intro]: - shows "monoidal ((op +)::('a::comm_monoid_add) \ 'a \ 'a)" - unfolding monoidal_def neutral_monoid by(auto simp add: algebra_simps) - -lemma operative_integral: fixes f::"'a::ordered_euclidean_space \ 'b::banach" +lemma monoidal_monoid[intro]: "monoidal ((op +)::('a::comm_monoid_add) \ 'a \ 'a)" + unfolding monoidal_def neutral_monoid + by (auto simp add: algebra_simps) + +lemma operative_integral: + fixes f :: "'a::ordered_euclidean_space \ 'b::banach" shows "operative (lifted(op +)) (\i. if f integrable_on i then Some(integral i f) else None)" - unfolding operative_def unfolding neutral_lifted[OF monoidal_monoid] neutral_add - apply(rule,rule,rule,rule) defer apply(rule allI ballI)+ -proof- - fix a b c and k :: 'a assume k:"k\Basis" + unfolding operative_def + unfolding neutral_lifted[OF monoidal_monoid] neutral_add + apply rule + apply rule + apply rule + apply rule + defer + apply (rule allI ballI)+ +proof - + fix a b c + fix k :: 'a + assume k: "k \ Basis" show "(if f integrable_on {a..b} then Some (integral {a..b} f) else None) = lifted op + (if f integrable_on {a..b} \ {x. x \ k \ c} then Some (integral ({a..b} \ {x. x \ k \ c}) f) else None) (if f integrable_on {a..b} \ {x. c \ x \ k} then Some (integral ({a..b} \ {x. c \ x \ k}) f) else None)" - proof(cases "f integrable_on {a..b}") - case True show ?thesis unfolding if_P[OF True] using k apply- - unfolding if_P[OF integrable_split(1)[OF True]] unfolding if_P[OF integrable_split(2)[OF True]] - unfolding lifted.simps option.inject apply(rule integral_unique) apply(rule has_integral_split[OF _ _ k]) - apply(rule_tac[!] integrable_integral integrable_split)+ using True k by auto - next case False have "(\ (f integrable_on {a..b} \ {x. x \ k \ c})) \ (\ ( f integrable_on {a..b} \ {x. c \ x \ k}))" - proof(rule ccontr) case goal1 hence "f integrable_on {a..b}" apply- unfolding integrable_on_def - apply(rule_tac x="integral ({a..b} \ {x. x \ k \ c}) f + integral ({a..b} \ {x. x \ k \ c}) f" in exI) - apply(rule has_integral_split[OF _ _ k]) apply(rule_tac[!] integrable_integral) by auto - thus False using False by auto - qed thus ?thesis using False by auto - qed next - fix a b assume as:"content {a..b::'a} = 0" - thus "(if f integrable_on {a..b} then Some (integral {a..b} f) else None) = Some 0" - unfolding if_P[OF integrable_on_null[OF as]] using has_integral_null_eq[OF as] by auto qed + proof (cases "f integrable_on {a..b}") + case True + show ?thesis + unfolding if_P[OF True] + using k + apply - + unfolding if_P[OF integrable_split(1)[OF True]] + unfolding if_P[OF integrable_split(2)[OF True]] + unfolding lifted.simps option.inject + apply (rule integral_unique) + apply (rule has_integral_split[OF _ _ k]) + apply (rule_tac[!] integrable_integral integrable_split)+ + using True k + apply auto + done + next + case False + have "\ (f integrable_on {a..b} \ {x. x \ k \ c}) \ \ ( f integrable_on {a..b} \ {x. c \ x \ k})" + proof (rule ccontr) + assume "\ ?thesis" + then have "f integrable_on {a..b}" + apply - + unfolding integrable_on_def + apply (rule_tac x="integral ({a..b} \ {x. x \ k \ c}) f + integral ({a..b} \ {x. x \ k \ c}) f" in exI) + apply (rule has_integral_split[OF _ _ k]) + apply (rule_tac[!] integrable_integral) + apply auto + done + then show False + using False by auto + qed + then show ?thesis + using False by auto + qed +next + fix a b :: 'a + assume as: "content {a..b} = 0" + then show "(if f integrable_on {a..b} then Some (integral {a..b} f) else None) = Some 0" + unfolding if_P[OF integrable_on_null[OF as]] + using has_integral_null_eq[OF as] + by auto +qed + subsection {* Points of division of a partition. *} definition "division_points (k::('a::ordered_euclidean_space) set) d = - {(j,x). j\Basis \ (interval_lowerbound k)\j < x \ x < (interval_upperbound k)\j \ - (\i\d. (interval_lowerbound i)\j = x \ (interval_upperbound i)\j = x)}" - -lemma division_points_finite: fixes i::"('a::ordered_euclidean_space) set" - assumes "d division_of i" shows "finite (division_points i d)" -proof- note assm = division_ofD[OF assms] + {(j,x). j \ Basis \ (interval_lowerbound k)\j < x \ x < (interval_upperbound k)\j \ + (\i\d. (interval_lowerbound i)\j = x \ (interval_upperbound i)\j = x)}" + +lemma division_points_finite: + fixes i :: "'a::ordered_euclidean_space set" + assumes "d division_of i" + shows "finite (division_points i d)" +proof - + note assm = division_ofD[OF assms] let ?M = "\j. {(j,x)|x. (interval_lowerbound i)\j < x \ x < (interval_upperbound i)\j \ - (\i\d. (interval_lowerbound i)\j = x \ (interval_upperbound i)\j = x)}" - have *:"division_points i d = \(?M ` Basis)" + (\i\d. (interval_lowerbound i)\j = x \ (interval_upperbound i)\j = x)}" + have *: "division_points i d = \(?M ` Basis)" unfolding division_points_def by auto - show ?thesis unfolding * using assm by auto qed - -lemma division_points_subset: fixes a::"'a::ordered_euclidean_space" - assumes "d division_of {a..b}" "\i\Basis. a\i < b\i" "a\k < c" "c < b\k" and k:"k\Basis" - shows "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l . l \ d \ ~(l \ {x. x\k \ c} = {})} - \ division_points ({a..b}) d" (is ?t1) and - "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l . l \ d \ ~(l \ {x. x\k \ c} = {})} - \ division_points ({a..b}) d" (is ?t2) -proof- note assm = division_ofD[OF assms(1)] - have *:"\i\Basis. a\i \ b\i" + show ?thesis + unfolding * using assm by auto +qed + +lemma division_points_subset: + fixes a :: "'a::ordered_euclidean_space" + assumes "d division_of {a..b}" + and "\i\Basis. a\i < b\i" "a\k < c" "c < b\k" + and k: "k \ Basis" + shows "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l . l \ d \ l \ {x. x\k \ c} \ {}} \ + division_points ({a..b}) d" (is ?t1) + and "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l . l \ d \ ~(l \ {x. x\k \ c} = {})} \ + division_points ({a..b}) d" (is ?t2) +proof - + note assm = division_ofD[OF assms(1)] + have *: "\i\Basis. a\i \ b\i" "\i\Basis. a\i \ (\i\Basis. (if i = k then min (b \ k) c else b \ i) *\<^sub>R i) \ i" "\i\Basis. (\i\Basis. (if i = k then max (a \ k) c else a \ i) *\<^sub>R i) \ i \ b\i" "min (b \ k) c = c" "max (a \ k) c = c" @@ -4003,83 +4232,148 @@ unfolding interval_bounds[OF *(1)] interval_bounds[OF *(2)] interval_bounds[OF *(3)] unfolding * unfolding subset_eq - apply(rule) + apply rule unfolding mem_Collect_eq split_beta - apply(erule bexE conjE)+ - apply(simp only: mem_Collect_eq inner_setsum_left_Basis simp_thms) - apply(erule exE conjE)+ + apply (erule bexE conjE)+ + apply (simp only: mem_Collect_eq inner_setsum_left_Basis simp_thms) + apply (erule exE conjE)+ proof - fix i l x assume as:"a \ fst x < snd x" "snd x < (if fst x = k then c else b \ fst x)" + fix i l x + assume as: + "a \ fst x < snd x" "snd x < (if fst x = k then c else b \ fst x)" "interval_lowerbound i \ fst x = snd x \ interval_upperbound i \ fst x = snd x" - "i = l \ {x. x \ k \ c}" "l \ d" "l \ {x. x \ k \ c} \ {}" and fstx:"fst x \Basis" + "i = l \ {x. x \ k \ c}" "l \ d" "l \ {x. x \ k \ c} \ {}" + and fstx: "fst x \ Basis" from assm(4)[OF this(5)] guess u v apply-by(erule exE)+ note l=this - have *:"\i\Basis. u \ i \ (\i\Basis. (if i = k then min (v \ k) c else v \ i) *\<^sub>R i) \ i" + have *: "\i\Basis. u \ i \ (\i\Basis. (if i = k then min (v \ k) c else v \ i) *\<^sub>R i) \ i" using as(6) unfolding l interval_split[OF k] interval_ne_empty as . - have **:"\i\Basis. u\i \ v\i" using l using as(6) unfolding interval_ne_empty[symmetric] by auto + have **: "\i\Basis. u\i \ v\i" + using l using as(6) unfolding interval_ne_empty[symmetric] by auto show "\i\d. interval_lowerbound i \ fst x = snd x \ interval_upperbound i \ fst x = snd x" apply (rule bexI[OF _ `l \ d`]) using as(1-3,5) fstx unfolding l interval_bounds[OF **] interval_bounds[OF *] interval_split[OF k] as - by (auto split: split_if_asm) + apply (auto split: split_if_asm) + done show "snd x < b \ fst x" using as(2) `c < b\k` by (auto split: split_if_asm) qed show ?t2 unfolding division_points_def interval_split[OF k, of a b] - unfolding interval_bounds[OF *(1)] interval_bounds[OF *(2)] interval_bounds[OF *(3)] unfolding * - unfolding subset_eq apply(rule) unfolding mem_Collect_eq split_beta - apply(erule bexE conjE)+ - apply(simp only: mem_Collect_eq inner_setsum_left_Basis simp_thms) - apply(erule exE conjE)+ + unfolding interval_bounds[OF *(1)] interval_bounds[OF *(2)] interval_bounds[OF *(3)] + unfolding * + unfolding subset_eq + apply rule + unfolding mem_Collect_eq split_beta + apply (erule bexE conjE)+ + apply (simp only: mem_Collect_eq inner_setsum_left_Basis simp_thms) + apply (erule exE conjE)+ proof - fix i l x assume as:"(if fst x = k then c else a \ fst x) < snd x" "snd x < b \ fst x" + fix i l x + assume as: + "(if fst x = k then c else a \ fst x) < snd x" "snd x < b \ fst x" "interval_lowerbound i \ fst x = snd x \ interval_upperbound i \ fst x = snd x" - "i = l \ {x. c \ x \ k}" "l \ d" "l \ {x. c \ x \ k} \ {}" and fstx:"fst x \ Basis" - from assm(4)[OF this(5)] guess u v apply-by(erule exE)+ note l=this - have *:"\i\Basis. (\i\Basis. (if i = k then max (u \ k) c else u \ i) *\<^sub>R i) \ i \ v \ i" + "i = l \ {x. c \ x \ k}" "l \ d" "l \ {x. c \ x \ k} \ {}" + and fstx: "fst x \ Basis" + from assm(4)[OF this(5)] guess u v by (elim exE) note l=this + have *: "\i\Basis. (\i\Basis. (if i = k then max (u \ k) c else u \ i) *\<^sub>R i) \ i \ v \ i" using as(6) unfolding l interval_split[OF k] interval_ne_empty as . - have **:"\i\Basis. u\i \ v\i" using l using as(6) unfolding interval_ne_empty[symmetric] by auto + have **: "\i\Basis. u\i \ v\i" + using l using as(6) unfolding interval_ne_empty[symmetric] by auto show "\i\d. interval_lowerbound i \ fst x = snd x \ interval_upperbound i \ fst x = snd x" apply (rule bexI[OF _ `l \ d`]) using as(1-3,5) fstx unfolding l interval_bounds[OF **] interval_bounds[OF *] interval_split[OF k] as - by (auto split: split_if_asm) + apply (auto split: split_if_asm) + done show "a \ fst x < snd x" using as(1) `a\k < c` by (auto split: split_if_asm) qed qed -lemma division_points_psubset: fixes a::"'a::ordered_euclidean_space" - assumes "d division_of {a..b}" "\i\Basis. a\i < b\i" "a\k < c" "c < b\k" - "l \ d" "interval_lowerbound l\k = c \ interval_upperbound l\k = c" and k:"k\Basis" - shows "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l. l\d \ l \ {x. x\k \ c} \ {}} - \ division_points ({a..b}) d" (is "?D1 \ ?D") - "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l. l\d \ l \ {x. x\k \ c} \ {}} - \ division_points ({a..b}) d" (is "?D2 \ ?D") -proof- have ab:"\i\Basis. a\i \ b\i" using assms(2) by(auto intro!:less_imp_le) - guess u v using division_ofD(4)[OF assms(1,5)] apply-by(erule exE)+ note l=this - have uv:"\i\Basis. u\i \ v\i" "\i\Basis. a\i \ u\i \ v\i \ b\i" +lemma division_points_psubset: + fixes a :: "'a::ordered_euclidean_space" + assumes "d division_of {a..b}" + and "\i\Basis. a\i < b\i" "a\k < c" "c < b\k" + and "l \ d" + and "interval_lowerbound l\k = c \ interval_upperbound l\k = c" + and k: "k \ Basis" + shows "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l. l\d \ l \ {x. x\k \ c} \ {}} \ + division_points ({a..b}) d" (is "?D1 \ ?D") + and "division_points ({a..b} \ {x. x\k \ c}) {l \ {x. x\k \ c} | l. l\d \ l \ {x. x\k \ c} \ {}} \ + division_points ({a..b}) d" (is "?D2 \ ?D") +proof - + have ab: "\i\Basis. a\i \ b\i" + using assms(2) by (auto intro!:less_imp_le) + guess u v using division_ofD(4)[OF assms(1,5)] by (elim exE) note l=this + have uv: "\i\Basis. u\i \ v\i" "\i\Basis. a\i \ u\i \ v\i \ b\i" using division_ofD(2,2,3)[OF assms(1,5)] unfolding l interval_ne_empty - unfolding subset_eq apply- defer apply(erule_tac x=u in ballE, erule_tac x=v in ballE) unfolding mem_interval by auto - have *:"interval_upperbound ({a..b} \ {x. x \ k \ interval_upperbound l \ k}) \ k = interval_upperbound l \ k" - "interval_upperbound ({a..b} \ {x. x \ k \ interval_lowerbound l \ k}) \ k = interval_lowerbound l \ k" - unfolding interval_split[OF k] apply(subst interval_bounds) prefer 3 apply(subst interval_bounds) - unfolding l interval_bounds[OF uv(1)] using uv[rule_format,of k] ab k by auto - have "\x. x \ ?D - ?D1" using assms(2-) apply-apply(erule disjE) - apply(rule_tac x="(k,(interval_lowerbound l)\k)" in exI) defer - apply(rule_tac x="(k,(interval_upperbound l)\k)" in exI) - unfolding division_points_def unfolding interval_bounds[OF ab] by(auto simp add:*) - thus "?D1 \ ?D" apply-apply(rule,rule division_points_subset[OF assms(1-4)]) using k by auto - - have *:"interval_lowerbound ({a..b} \ {x. x \ k \ interval_lowerbound l \ k}) \ k = interval_lowerbound l \ k" - "interval_lowerbound ({a..b} \ {x. x \ k \ interval_upperbound l \ k}) \ k = interval_upperbound l \ k" - unfolding interval_split[OF k] apply(subst interval_bounds) prefer 3 apply(subst interval_bounds) - unfolding l interval_bounds[OF uv(1)] using uv[rule_format,of k] ab k by auto - have "\x. x \ ?D - ?D2" using assms(2-) apply-apply(erule disjE) - apply(rule_tac x="(k,(interval_lowerbound l)\k)" in exI) defer - apply(rule_tac x="(k,(interval_upperbound l)\k)" in exI) - unfolding division_points_def unfolding interval_bounds[OF ab] by(auto simp add:*) - thus "?D2 \ ?D" apply-apply(rule,rule division_points_subset[OF assms(1-4) k]) by auto qed + unfolding subset_eq + apply - + defer + apply (erule_tac x=u in ballE) + apply (erule_tac x=v in ballE) + unfolding mem_interval + apply auto + done + have *: "interval_upperbound ({a..b} \ {x. x \ k \ interval_upperbound l \ k}) \ k = interval_upperbound l \ k" + "interval_upperbound ({a..b} \ {x. x \ k \ interval_lowerbound l \ k}) \ k = interval_lowerbound l \ k" + unfolding interval_split[OF k] + apply (subst interval_bounds) + prefer 3 + apply (subst interval_bounds) + unfolding l interval_bounds[OF uv(1)] + using uv[rule_format,of k] ab k + apply auto + done + have "\x. x \ ?D - ?D1" + using assms(2-) + apply - + apply (erule disjE) + apply (rule_tac x="(k,(interval_lowerbound l)\k)" in exI) + defer + apply (rule_tac x="(k,(interval_upperbound l)\k)" in exI) + unfolding division_points_def + unfolding interval_bounds[OF ab] + apply (auto simp add:*) + done + then show "?D1 \ ?D" + apply - + apply rule + apply (rule division_points_subset[OF assms(1-4)]) + using k + apply auto + done + + have *: "interval_lowerbound ({a..b} \ {x. x \ k \ interval_lowerbound l \ k}) \ k = interval_lowerbound l \ k" + "interval_lowerbound ({a..b} \ {x. x \ k \ interval_upperbound l \ k}) \ k = interval_upperbound l \ k" + unfolding interval_split[OF k] + apply (subst interval_bounds) + prefer 3 + apply (subst interval_bounds) + unfolding l interval_bounds[OF uv(1)] + using uv[rule_format,of k] ab k + apply auto + done + have "\x. x \ ?D - ?D2" + using assms(2-) + apply - + apply (erule disjE) + apply (rule_tac x="(k,(interval_lowerbound l)\k)" in exI) + defer + apply (rule_tac x="(k,(interval_upperbound l)\k)" in exI) + unfolding division_points_def + unfolding interval_bounds[OF ab] + apply (auto simp add:*) + done + then show "?D2 \ ?D" + apply - + apply rule + apply (rule division_points_subset[OF assms(1-4) k]) + apply auto + done +qed + subsection {* Preservation by divisions and tagged divisions. *} @@ -4091,138 +4385,308 @@ lemma iterate_expand_cases: "iterate opp s f = (if finite(support opp f s) then iterate opp (support opp f s) f else neutral opp)" - apply(cases) apply(subst if_P,assumption) unfolding iterate_def support_support fold'_def by auto - -lemma iterate_image: assumes "monoidal opp" "inj_on f s" + apply cases + apply (subst if_P, assumption) + unfolding iterate_def support_support fold'_def + apply auto + done + +lemma iterate_image: + assumes "monoidal opp" + and "inj_on f s" shows "iterate opp (f ` s) g = iterate opp s (g \ f)" -proof- have *:"\s. finite s \ \x\s. \y\s. f x = f y \ x = y \ - iterate opp (f ` s) g = iterate opp s (g \ f)" - proof- case goal1 show ?case using goal1 - proof(induct s) case empty thus ?case using assms(1) by auto - next case (insert x s) show ?case unfolding iterate_insert[OF assms(1) insert(1)] - unfolding if_not_P[OF insert(2)] apply(subst insert(3)[symmetric]) - unfolding image_insert defer apply(subst iterate_insert[OF assms(1)]) - apply(rule finite_imageI insert)+ apply(subst if_not_P) - unfolding image_iff o_def using insert(2,4) by auto - qed qed +proof - + have *: "\s. finite s \ \x\s. \y\s. f x = f y \ x = y \ + iterate opp (f ` s) g = iterate opp s (g \ f)" + proof - + case goal1 + then show ?case + proof (induct s) + case empty + then show ?case + using assms(1) by auto + next + case (insert x s) + show ?case + unfolding iterate_insert[OF assms(1) insert(1)] + unfolding if_not_P[OF insert(2)] + apply (subst insert(3)[symmetric]) + unfolding image_insert + defer + apply (subst iterate_insert[OF assms(1)]) + apply (rule finite_imageI insert)+ + apply (subst if_not_P) + unfolding image_iff o_def + using insert(2,4) + apply auto + done + qed + qed show ?thesis - apply(cases "finite (support opp g (f ` s))") - apply(subst (1) iterate_support[symmetric],subst (2) iterate_support[symmetric]) - unfolding support_clauses apply(rule *)apply(rule finite_imageD,assumption) unfolding inj_on_def[symmetric] - apply(rule subset_inj_on[OF assms(2) support_subset])+ - apply(subst iterate_expand_cases) unfolding support_clauses apply(simp only: if_False) - apply(subst iterate_expand_cases) apply(subst if_not_P) by auto qed - - -(* This lemma about iterations comes up in a few places. *) + apply (cases "finite (support opp g (f ` s))") + apply (subst (1) iterate_support[symmetric],subst (2) iterate_support[symmetric]) + unfolding support_clauses + apply (rule *) + apply (rule finite_imageD,assumption) + unfolding inj_on_def[symmetric] + apply (rule subset_inj_on[OF assms(2) support_subset])+ + apply (subst iterate_expand_cases) + unfolding support_clauses + apply (simp only: if_False) + apply (subst iterate_expand_cases) + apply (subst if_not_P) + apply auto + done +qed + + +(* This lemma about iterations comes up in a few places. *) lemma iterate_nonzero_image_lemma: - assumes "monoidal opp" "finite s" "g(a) = neutral opp" - "\x\s. \y\s. f x = f y \ x \ y \ g(f x) = neutral opp" + assumes "monoidal opp" + and "finite s" "g(a) = neutral opp" + and "\x\s. \y\s. f x = f y \ x \ y \ g(f x) = neutral opp" shows "iterate opp {f x | x. x \ s \ f x \ a} g = iterate opp s (g \ f)" -proof- have *:"{f x |x. x \ s \ ~(f x = a)} = f ` {x. x \ s \ ~(f x = a)}" by auto - have **:"support opp (g \ f) {x \ s. f x \ a} = support opp (g \ f) s" +proof - + have *: "{f x |x. x \ s \ f x \ a} = f ` {x. x \ s \ f x \ a}" + by auto + have **: "support opp (g \ f) {x \ s. f x \ a} = support opp (g \ f) s" unfolding support_def using assms(3) by auto - show ?thesis unfolding * - apply(subst iterate_support[symmetric]) unfolding support_clauses - apply(subst iterate_image[OF assms(1)]) defer - apply(subst(2) iterate_support[symmetric]) apply(subst **) - unfolding inj_on_def using assms(3,4) unfolding support_def by auto qed + show ?thesis + unfolding * + apply (subst iterate_support[symmetric]) + unfolding support_clauses + apply (subst iterate_image[OF assms(1)]) + defer + apply (subst(2) iterate_support[symmetric]) + apply (subst **) + unfolding inj_on_def + using assms(3,4) + unfolding support_def + apply auto + done +qed lemma iterate_eq_neutral: - assumes "monoidal opp" "\x \ s. (f(x) = neutral opp)" - shows "(iterate opp s f = neutral opp)" -proof- have *:"support opp f s = {}" unfolding support_def using assms(2) by auto - show ?thesis apply(subst iterate_support[symmetric]) - unfolding * using assms(1) by auto qed - -lemma iterate_op: assumes "monoidal opp" "finite s" - shows "iterate opp s (\x. opp (f x) (g x)) = opp (iterate opp s f) (iterate opp s g)" using assms(2) -proof(induct s) case empty thus ?case unfolding iterate_insert[OF assms(1)] using assms(1) by auto -next case (insert x F) show ?case unfolding iterate_insert[OF assms(1) insert(1)] if_not_P[OF insert(2)] insert(3) - unfolding monoidal_ac[OF assms(1)] by(rule refl) qed - -lemma iterate_eq: assumes "monoidal opp" "\x. x \ s \ f x = g x" + assumes "monoidal opp" + and "\x \ s. f x = neutral opp" + shows "iterate opp s f = neutral opp" +proof - + have *: "support opp f s = {}" + unfolding support_def using assms(2) by auto + show ?thesis + apply (subst iterate_support[symmetric]) + unfolding * + using assms(1) + apply auto + done +qed + +lemma iterate_op: + assumes "monoidal opp" + and "finite s" + shows "iterate opp s (\x. opp (f x) (g x)) = opp (iterate opp s f) (iterate opp s g)" + using assms(2) +proof (induct s) + case empty + then show ?case + unfolding iterate_insert[OF assms(1)] using assms(1) by auto +next + case (insert x F) + show ?case + unfolding iterate_insert[OF assms(1) insert(1)] if_not_P[OF insert(2)] insert(3) + by (simp add: monoidal_ac[OF assms(1)]) +qed + +lemma iterate_eq: + assumes "monoidal opp" + and "\x. x \ s \ f x = g x" shows "iterate opp s f = iterate opp s g" -proof- have *:"support opp g s = support opp f s" +proof - + have *: "support opp g s = support opp f s" unfolding support_def using assms(2) by auto show ?thesis - proof(cases "finite (support opp f s)") - case False thus ?thesis apply(subst iterate_expand_cases,subst(2) iterate_expand_cases) - unfolding * by auto - next def su \ "support opp f s" + proof (cases "finite (support opp f s)") + case False + then show ?thesis + apply (subst iterate_expand_cases) + apply (subst(2) iterate_expand_cases) + unfolding * + apply auto + done + next + def su \ "support opp f s" case True note support_subset[of opp f s] - thus ?thesis apply- apply(subst iterate_support[symmetric],subst(2) iterate_support[symmetric]) unfolding * using True + then show ?thesis + apply - + apply (subst iterate_support[symmetric]) + apply (subst(2) iterate_support[symmetric]) + unfolding * + using True unfolding su_def[symmetric] - proof(induct su) case empty show ?case by auto - next case (insert x s) show ?case unfolding iterate_insert[OF assms(1) insert(1)] - unfolding if_not_P[OF insert(2)] apply(subst insert(3)) - defer apply(subst assms(2)[of x]) using insert by auto qed qed qed - -lemma nonempty_witness: assumes "s \ {}" obtains x where "x \ s" using assms by auto - -lemma operative_division: fixes f::"('a::ordered_euclidean_space) set \ 'b" - assumes "monoidal opp" "operative opp f" "d division_of {a..b}" + proof (induct su) + case empty + show ?case by auto + next + case (insert x s) + show ?case + unfolding iterate_insert[OF assms(1) insert(1)] + unfolding if_not_P[OF insert(2)] + apply (subst insert(3)) + defer + apply (subst assms(2)[of x]) + using insert + apply auto + done + qed + qed +qed + +lemma nonempty_witness: + assumes "s \ {}" + obtains x where "x \ s" + using assms by auto + +lemma operative_division: + fixes f :: "'a::ordered_euclidean_space set \ 'b" + assumes "monoidal opp" + and "operative opp f" + and "d division_of {a..b}" shows "iterate opp d f = f {a..b}" -proof- def C \ "card (division_points {a..b} d)" thus ?thesis using assms - proof(induct C arbitrary:a b d rule:full_nat_induct) +proof - + def C \ "card (division_points {a..b} d)" + then show ?thesis + using assms + proof (induct C arbitrary: a b d rule: full_nat_induct) case goal1 - { presume *:"content {a..b} \ 0 \ ?case" - thus ?case apply-apply(cases) defer apply assumption - proof- assume as:"content {a..b} = 0" - show ?case unfolding operativeD(1)[OF assms(2) as] apply(rule iterate_eq_neutral[OF goal1(2)]) - proof fix x assume x:"x\d" - then guess u v apply(drule_tac division_ofD(4)[OF goal1(4)]) by(erule exE)+ - thus "f x = neutral opp" using division_of_content_0[OF as goal1(4)] - using operativeD(1)[OF assms(2)] x by auto - qed qed } + { presume *: "content {a..b} \ 0 \ ?case" + then show ?case + apply - + apply cases + defer + apply assumption + proof - + assume as: "content {a..b} = 0" + show ?case + unfolding operativeD(1)[OF assms(2) as] + apply(rule iterate_eq_neutral[OF goal1(2)]) + proof + fix x + assume x: "x \ d" + then guess u v + apply (drule_tac division_ofD(4)[OF goal1(4)]) + apply (elim exE) + done + then show "f x = neutral opp" + using division_of_content_0[OF as goal1(4)] + using operativeD(1)[OF assms(2)] x + by auto + qed + qed + } assume "content {a..b} \ 0" note ab = this[unfolded content_lt_nz[symmetric] content_pos_lt_eq] - hence ab':"\i\Basis. a\i \ b\i" by (auto intro!: less_imp_le) show ?case - proof(cases "division_points {a..b} d = {}") - case True have d':"\i\d. \u v. i = {u..v} \ + then have ab': "\i\Basis. a\i \ b\i" + by (auto intro!: less_imp_le) + show ?case + proof (cases "division_points {a..b} d = {}") + case True + have d': "\i\d. \u v. i = {u..v} \ (\j\Basis. u\j = a\j \ v\j = a\j \ u\j = b\j \ v\j = b\j \ u\j = a\j \ v\j = b\j)" - unfolding forall_in_division[OF goal1(4)] apply(rule,rule,rule) - apply(rule_tac x=a in exI,rule_tac x=b in exI) apply(rule,rule refl) + unfolding forall_in_division[OF goal1(4)] + apply rule + apply rule + apply rule + apply (rule_tac x=a in exI) + apply (rule_tac x=b in exI) + apply rule + apply (rule refl) proof - fix u v and j :: 'a assume j:"j\Basis" assume as:"{u..v} \ d" note division_ofD(3)[OF goal1(4) this] - hence uv:"\i\Basis. u\i \ v\i" "u\j \ v\j" using j unfolding interval_ne_empty by auto - have *:"\p r Q. \ j\Basis \ p \ r \ (\x\d. Q x) \ p \ r \ (Q {u..v})" using as j by auto + fix u v + fix j :: 'a + assume j: "j \ Basis" + assume as: "{u..v} \ d" + note division_ofD(3)[OF goal1(4) this] + then have uv: "\i\Basis. u\i \ v\i" "u\j \ v\j" + using j unfolding interval_ne_empty by auto + have *: "\p r Q. \ j\Basis \ p \ r \ (\x\d. Q x) \ p \ r \ Q {u..v}" + using as j by auto have "(j, u\j) \ division_points {a..b} d" "(j, v\j) \ division_points {a..b} d" using True by auto note this[unfolded de_Morgan_conj division_points_def mem_Collect_eq split_conv interval_bounds[OF ab'] bex_simps] note *[OF this(1)] *[OF this(2)] note this[unfolded interval_bounds[OF uv(1)]] - moreover have "a\j \ u\j" "v\j \ b\j" using division_ofD(2,2,3)[OF goal1(4) as] - unfolding subset_eq apply- apply(erule_tac x=u in ballE,erule_tac[3] x=v in ballE) - unfolding interval_ne_empty mem_interval using j by auto + moreover + have "a\j \ u\j" "v\j \ b\j" + using division_ofD(2,2,3)[OF goal1(4) as] + unfolding subset_eq + apply - + apply (erule_tac x=u in ballE,erule_tac[3] x=v in ballE) + unfolding interval_ne_empty mem_interval + using j + apply auto + done ultimately show "u\j = a\j \ v\j = a\j \ u\j = b\j \ v\j = b\j \ u\j = a\j \ v\j = b\j" unfolding not_less de_Morgan_disj using ab[rule_format,of j] uv(2) j by auto qed have "(1/2) *\<^sub>R (a+b) \ {a..b}" unfolding mem_interval using ab by(auto intro!: less_imp_le simp: inner_simps) note this[unfolded division_ofD(6)[OF goal1(4),symmetric] Union_iff] - then guess i .. note i=this guess u v using d'[rule_format,OF i(1)] apply-by(erule exE conjE)+ note uv=this + then guess i .. note i=this + guess u v using d'[rule_format,OF i(1)] by (elim exE conjE) note uv=this have "{a..b} \ d" - proof- { presume "i = {a..b}" thus ?thesis using i by auto } - { presume "u = a" "v = b" thus "i = {a..b}" using uv by auto } - show "u = a" "v = b" unfolding euclidean_eq_iff[where 'a='a] - proof(safe) - fix j :: 'a assume j:"j\Basis" + proof - + { presume "i = {a..b}" then show ?thesis using i by auto } + { presume "u = a" "v = b" then show "i = {a..b}" using uv by auto } + show "u = a" "v = b" + unfolding euclidean_eq_iff[where 'a='a] + proof safe + fix j :: 'a + assume j: "j \ Basis" note i(2)[unfolded uv mem_interval,rule_format,of j] - thus "u \ j = a \ j" "v \ j = b \ j" using uv(2)[rule_format,of j] j by (auto simp: inner_simps) - qed qed - hence *:"d = insert {a..b} (d - {{a..b}})" by auto - have "iterate opp (d - {{a..b}}) f = neutral opp" apply(rule iterate_eq_neutral[OF goal1(2)]) - proof fix x assume x:"x \ d - {{a..b}}" hence "x\d" by auto note d'[rule_format,OF this] - then guess u v apply-by(erule exE conjE)+ note uv=this - have "u\a \ v\b" using x[unfolded uv] by auto - then obtain j where "u\j \ a\j \ v\j \ b\j" and j:"j\Basis" unfolding euclidean_eq_iff[where 'a='a] by auto - hence "u\j = v\j" using uv(2)[rule_format,OF j] by auto - hence "content {u..v} = 0" unfolding content_eq_0 apply(rule_tac x=j in bexI) using j by auto - thus "f x = neutral opp" unfolding uv(1) by(rule operativeD(1)[OF goal1(3)]) - qed thus "iterate opp d f = f {a..b}" apply-apply(subst *) - apply(subst iterate_insert[OF goal1(2)]) using goal1(2,4) by auto - next case False hence "\x. x\division_points {a..b} d" by auto - then guess k c unfolding split_paired_Ex apply- unfolding division_points_def mem_Collect_eq split_conv - by(erule exE conjE)+ note this(2-4,1) note kc=this[unfolded interval_bounds[OF ab']] + then show "u \ j = a \ j" and "v \ j = b \ j" + using uv(2)[rule_format,of j] j by (auto simp: inner_simps) + qed + qed + then have *: "d = insert {a..b} (d - {{a..b}})" + by auto + have "iterate opp (d - {{a..b}}) f = neutral opp" + apply (rule iterate_eq_neutral[OF goal1(2)]) + proof + fix x + assume x: "x \ d - {{a..b}}" + then have "x\d" + by auto note d'[rule_format,OF this] + then guess u v by (elim exE conjE) note uv=this + have "u \ a \ v \ b" + using x[unfolded uv] by auto + then obtain j where "u\j \ a\j \ v\j \ b\j" and j: "j \ Basis" + unfolding euclidean_eq_iff[where 'a='a] by auto + then have "u\j = v\j" + using uv(2)[rule_format,OF j] by auto + then have "content {u..v} = 0" + unfolding content_eq_0 + apply (rule_tac x=j in bexI) + using j + apply auto + done + then show "f x = neutral opp" + unfolding uv(1) by (rule operativeD(1)[OF goal1(3)]) + qed + then show "iterate opp d f = f {a..b}" + apply - + apply (subst *) + apply (subst iterate_insert[OF goal1(2)]) + using goal1(2,4) + apply auto + done + next + case False + then have "\x. x \ division_points {a..b} d" + by auto + then guess k c + unfolding split_paired_Ex + unfolding division_points_def mem_Collect_eq split_conv + apply (elim exE conjE) + done + note this(2-4,1) note kc=this[unfolded interval_bounds[OF ab']] from this(3) guess j .. note j=this def d1 \ "{l \ {x. x\k \ c} | l. l \ d \ l \ {x. x\k \ c} \ {}}" def d2 \ "{l \ {x. x\k \ c} | l. l \ d \ l \ {x. x\k \ c} \ {}}" @@ -4230,392 +4694,839 @@ def ca \ "(\i\Basis. (if i = k then c else a\i) *\<^sub>R i)::'a" note division_points_psubset[OF goal1(4) ab kc(1-2) j] note psubset_card_mono[OF _ this(1)] psubset_card_mono[OF _ this(2)] - hence *:"(iterate opp d1 f) = f ({a..b} \ {x. x\k \ c})" "(iterate opp d2 f) = f ({a..b} \ {x. x\k \ c})" - apply- unfolding interval_split[OF kc(4)] apply(rule_tac[!] goal1(1)[rule_format]) + then have *: "(iterate opp d1 f) = f ({a..b} \ {x. x\k \ c})" + "(iterate opp d2 f) = f ({a..b} \ {x. x\k \ c})" + unfolding interval_split[OF kc(4)] + apply (rule_tac[!] goal1(1)[rule_format]) using division_split[OF goal1(4), where k=k and c=c] - unfolding interval_split[OF kc(4)] d1_def[symmetric] d2_def[symmetric] unfolding goal1(2) Suc_le_mono - using goal1(2-3) using division_points_finite[OF goal1(4)] using kc(4) by auto + unfolding interval_split[OF kc(4)] d1_def[symmetric] d2_def[symmetric] + unfolding goal1(2) Suc_le_mono + using goal1(2-3) + using division_points_finite[OF goal1(4)] + using kc(4) + apply auto + done have "f {a..b} = opp (iterate opp d1 f) (iterate opp d2 f)" (is "_ = ?prev") - unfolding * apply(rule operativeD(2)) using goal1(3) using kc(4) by auto + unfolding * + apply (rule operativeD(2)) + using goal1(3) + using kc(4) + apply auto + done also have "iterate opp d1 f = iterate opp d (\l. f(l \ {x. x\k \ c}))" - unfolding d1_def apply(rule iterate_nonzero_image_lemma[unfolded o_def]) - unfolding empty_as_interval apply(rule goal1 division_of_finite operativeD[OF goal1(3)])+ - unfolding empty_as_interval[symmetric] apply(rule content_empty) - proof(rule,rule,rule,erule conjE) fix l y assume as:"l \ d" "y \ d" "l \ {x. x \ k \ c} = y \ {x. x \ k \ c}" "l \ y" - from division_ofD(4)[OF goal1(4) this(1)] guess u v apply-by(erule exE)+ note l=this - show "f (l \ {x. x \ k \ c}) = neutral opp" unfolding l interval_split[OF kc(4)] - apply(rule operativeD(1) goal1)+ unfolding interval_split[symmetric,OF kc(4)] apply(rule division_split_left_inj) - apply(rule goal1) unfolding l[symmetric] apply(rule as(1),rule as(2)) by(rule kc(4) as)+ - qed also have "iterate opp d2 f = iterate opp d (\l. f(l \ {x. x\k \ c}))" - unfolding d2_def apply(rule iterate_nonzero_image_lemma[unfolded o_def]) - unfolding empty_as_interval apply(rule goal1 division_of_finite operativeD[OF goal1(3)])+ - unfolding empty_as_interval[symmetric] apply(rule content_empty) - proof(rule,rule,rule,erule conjE) fix l y assume as:"l \ d" "y \ d" "l \ {x. c \ x \ k} = y \ {x. c \ x \ k}" "l \ y" - from division_ofD(4)[OF goal1(4) this(1)] guess u v apply-by(erule exE)+ note l=this - show "f (l \ {x. x \ k \ c}) = neutral opp" unfolding l interval_split[OF kc(4)] - apply(rule operativeD(1) goal1)+ unfolding interval_split[symmetric,OF kc(4)] apply(rule division_split_right_inj) - apply(rule goal1) unfolding l[symmetric] apply(rule as(1),rule as(2)) by(rule as kc(4))+ - qed also have *:"\x\d. f x = opp (f (x \ {x. x \ k \ c})) (f (x \ {x. c \ x \ k}))" - unfolding forall_in_division[OF goal1(4)] apply(rule,rule,rule,rule operativeD(2)) using goal1(3) kc by auto - have "opp (iterate opp d (\l. f (l \ {x. x \ k \ c}))) (iterate opp d (\l. f (l \ {x. c \ x \ k}))) - = iterate opp d f" apply(subst(3) iterate_eq[OF _ *[rule_format]]) prefer 3 - apply(rule iterate_op[symmetric]) using goal1 by auto + unfolding d1_def + apply (rule iterate_nonzero_image_lemma[unfolded o_def]) + unfolding empty_as_interval + apply (rule goal1 division_of_finite operativeD[OF goal1(3)])+ + unfolding empty_as_interval[symmetric] + apply (rule content_empty) + proof (rule, rule, rule, erule conjE) + fix l y + assume as: "l \ d" "y \ d" "l \ {x. x \ k \ c} = y \ {x. x \ k \ c}" "l \ y" + from division_ofD(4)[OF goal1(4) this(1)] guess u v by (elim exE) note l=this + show "f (l \ {x. x \ k \ c}) = neutral opp" + unfolding l interval_split[OF kc(4)] + apply (rule operativeD(1) goal1)+ + unfolding interval_split[symmetric,OF kc(4)] + apply (rule division_split_left_inj) + apply (rule goal1) + unfolding l[symmetric] + apply (rule as(1), rule as(2)) + apply (rule kc(4) as)+ + done + qed + also have "iterate opp d2 f = iterate opp d (\l. f(l \ {x. x\k \ c}))" + unfolding d2_def + apply (rule iterate_nonzero_image_lemma[unfolded o_def]) + unfolding empty_as_interval + apply (rule goal1 division_of_finite operativeD[OF goal1(3)])+ + unfolding empty_as_interval[symmetric] + apply (rule content_empty) + proof (rule, rule, rule, erule conjE) + fix l y + assume as: "l \ d" "y \ d" "l \ {x. c \ x \ k} = y \ {x. c \ x \ k}" "l \ y" + from division_ofD(4)[OF goal1(4) this(1)] guess u v by (elim exE) note l=this + show "f (l \ {x. x \ k \ c}) = neutral opp" + unfolding l interval_split[OF kc(4)] + apply (rule operativeD(1) goal1)+ + unfolding interval_split[symmetric,OF kc(4)] + apply (rule division_split_right_inj) + apply (rule goal1) + unfolding l[symmetric] + apply (rule as(1)) + apply (rule as(2)) + apply (rule as kc(4))+ + done + qed also have *: "\x\d. f x = opp (f (x \ {x. x \ k \ c})) (f (x \ {x. c \ x \ k}))" + unfolding forall_in_division[OF goal1(4)] + apply (rule, rule, rule, rule operativeD(2)) + using goal1(3) kc + by auto + have "opp (iterate opp d (\l. f (l \ {x. x \ k \ c}))) (iterate opp d (\l. f (l \ {x. c \ x \ k}))) = + iterate opp d f" + apply (subst(3) iterate_eq[OF _ *[rule_format]]) + prefer 3 + apply (rule iterate_op[symmetric]) + using goal1 + apply auto + done finally show ?thesis by auto - qed qed qed - -lemma iterate_image_nonzero: assumes "monoidal opp" - "finite s" "\x\s. \y\s. ~(x = y) \ f x = f y \ g(f x) = neutral opp" - shows "iterate opp (f ` s) g = iterate opp s (g \ f)" using assms -proof(induct rule:finite_subset_induct[OF assms(2) subset_refl]) - case goal1 show ?case using assms(1) by auto -next case goal2 have *:"\x y. y = neutral opp \ x = opp y x" using assms(1) by auto - show ?case unfolding image_insert apply(subst iterate_insert[OF assms(1)]) - apply(rule finite_imageI goal2)+ - apply(cases "f a \ f ` F") unfolding if_P if_not_P apply(subst goal2(4)[OF assms(1) goal2(1)]) defer - apply(subst iterate_insert[OF assms(1) goal2(1)]) defer - apply(subst iterate_insert[OF assms(1) goal2(1)]) - unfolding if_not_P[OF goal2(3)] defer unfolding image_iff defer apply(erule bexE) - apply(rule *) unfolding o_def apply(rule_tac y=x in goal2(7)[rule_format]) - using goal2 unfolding o_def by auto qed - -lemma operative_tagged_division: assumes "monoidal opp" "operative opp f" "d tagged_division_of {a..b}" - shows "iterate(opp) d (\(x,l). f l) = f {a..b}" -proof- have *:"(\(x,l). f l) = (f o snd)" unfolding o_def by(rule,auto) note assm = tagged_division_ofD[OF assms(3)] - have "iterate(opp) d (\(x,l). f l) = iterate opp (snd ` d) f" unfolding * - apply(rule iterate_image_nonzero[symmetric,OF assms(1)]) apply(rule tagged_division_of_finite assms)+ - unfolding Ball_def split_paired_All snd_conv apply(rule,rule,rule,rule,rule,rule,rule,erule conjE) - proof- fix a b aa ba assume as:"(a, b) \ d" "(aa, ba) \ d" "(a, b) \ (aa, ba)" "b = ba" - guess u v using assm(4)[OF as(1)] apply-by(erule exE)+ note uv=this - show "f b = neutral opp" unfolding uv apply(rule operativeD(1)[OF assms(2)]) - unfolding content_eq_0_interior using tagged_division_ofD(5)[OF assms(3) as(1-3)] - unfolding as(4)[symmetric] uv by auto - qed also have "\ = f {a..b}" + qed + qed +qed + +lemma iterate_image_nonzero: + assumes "monoidal opp" + and "finite s" + and "\x\s. \y\s. x \ y \ f x = f y \ g (f x) = neutral opp" + shows "iterate opp (f ` s) g = iterate opp s (g \ f)" + using assms +proof (induct rule: finite_subset_induct[OF assms(2) subset_refl]) + case goal1 + show ?case + using assms(1) by auto +next + case goal2 + have *: "\x y. y = neutral opp \ x = opp y x" + using assms(1) by auto + show ?case + unfolding image_insert + apply (subst iterate_insert[OF assms(1)]) + apply (rule finite_imageI goal2)+ + apply (cases "f a \ f ` F") + unfolding if_P if_not_P + apply (subst goal2(4)[OF assms(1) goal2(1)]) + defer + apply (subst iterate_insert[OF assms(1) goal2(1)]) + defer + apply (subst iterate_insert[OF assms(1) goal2(1)]) + unfolding if_not_P[OF goal2(3)] + defer unfolding image_iff + defer + apply (erule bexE) + apply (rule *) + unfolding o_def + apply (rule_tac y=x in goal2(7)[rule_format]) + using goal2 + unfolding o_def + apply auto + done +qed + +lemma operative_tagged_division: + assumes "monoidal opp" + and "operative opp f" + and "d tagged_division_of {a..b}" + shows "iterate opp d (\(x,l). f l) = f {a..b}" +proof - + have *: "(\(x,l). f l) = f \ snd" + unfolding o_def by rule auto note assm = tagged_division_ofD[OF assms(3)] + have "iterate opp d (\(x,l). f l) = iterate opp (snd ` d) f" + unfolding * + apply (rule iterate_image_nonzero[symmetric,OF assms(1)]) + apply (rule tagged_division_of_finite assms)+ + unfolding Ball_def split_paired_All snd_conv + apply (rule, rule, rule, rule, rule, rule, rule, erule conjE) + proof - + fix a b aa ba + assume as: "(a, b) \ d" "(aa, ba) \ d" "(a, b) \ (aa, ba)" "b = ba" + guess u v using assm(4)[OF as(1)] by (elim exE) note uv=this + show "f b = neutral opp" + unfolding uv + apply (rule operativeD(1)[OF assms(2)]) + unfolding content_eq_0_interior + using tagged_division_ofD(5)[OF assms(3) as(1-3)] + unfolding as(4)[symmetric] uv + apply auto + done + qed + also have "\ = f {a..b}" using operative_division[OF assms(1-2) division_of_tagged_division[OF assms(3)]] . - finally show ?thesis . qed + finally show ?thesis . +qed + subsection {* Additivity of content. *} lemma setsum_iterate: - assumes "finite s" shows "setsum f s = iterate op + s f" + assumes "finite s" + shows "setsum f s = iterate op + s f" proof - have *: "setsum f s = setsum f (support op + f s)" apply (rule setsum_mono_zero_right) - unfolding support_def neutral_monoid using assms by auto + unfolding support_def neutral_monoid + using assms + apply auto + done then show ?thesis unfolding * iterate_def fold'_def setsum.eq_fold unfolding neutral_monoid by (simp add: comp_def) qed -lemma additive_content_division: assumes "d division_of {a..b}" - shows "setsum content d = content({a..b})" +lemma additive_content_division: + assumes "d division_of {a..b}" + shows "setsum content d = content {a..b}" unfolding operative_division[OF monoidal_monoid operative_content assms,symmetric] - apply(subst setsum_iterate) using assms by auto + apply (subst setsum_iterate) + using assms + apply auto + done lemma additive_content_tagged_division: assumes "d tagged_division_of {a..b}" - shows "setsum (\(x,l). content l) d = content({a..b})" + shows "setsum (\(x,l). content l) d = content {a..b}" unfolding operative_tagged_division[OF monoidal_monoid operative_content assms,symmetric] - apply(subst setsum_iterate) using assms by auto + apply (subst setsum_iterate) + using assms + apply auto + done + subsection {* Finally, the integral of a constant *} lemma has_integral_const[intro]: - "((\x. c) has_integral (content({a..b::'a::ordered_euclidean_space}) *\<^sub>R c)) ({a..b})" - unfolding has_integral apply(rule,rule,rule_tac x="\x. ball x 1" in exI) - apply(rule,rule gauge_trivial)apply(rule,rule,erule conjE) - unfolding split_def apply(subst scaleR_left.setsum[symmetric, unfolded o_def]) - defer apply(subst additive_content_tagged_division[unfolded split_def]) apply assumption by auto + fixes a b :: "'a::ordered_euclidean_space" + shows "((\x. c) has_integral (content {a..b} *\<^sub>R c)) {a..b}" + unfolding has_integral + apply rule + apply rule + apply (rule_tac x="\x. ball x 1" in exI) + apply rule + apply (rule gauge_trivial) + apply rule + apply rule + apply (erule conjE) + unfolding split_def + apply (subst scaleR_left.setsum[symmetric, unfolded o_def]) + defer + apply (subst additive_content_tagged_division[unfolded split_def]) + apply assumption + apply auto + done lemma integral_const[simp]: fixes a b :: "'a::ordered_euclidean_space" shows "integral {a .. b} (\x. c) = content {a .. b} *\<^sub>R c" by (rule integral_unique) (rule has_integral_const) + subsection {* Bounds on the norm of Riemann sums and the integral itself. *} -lemma dsum_bound: assumes "p division_of {a..b}" "norm(c) \ e" - shows "norm(setsum (\l. content l *\<^sub>R c) p) \ e * content({a..b})" (is "?l \ ?r") - apply(rule order_trans,rule norm_setsum) unfolding norm_scaleR setsum_left_distrib[symmetric] - apply(rule order_trans[OF mult_left_mono],rule assms,rule setsum_abs_ge_zero) - apply(subst mult_commute) apply(rule mult_left_mono) - apply(rule order_trans[of _ "setsum content p"]) apply(rule eq_refl,rule setsum_cong2) - apply(subst abs_of_nonneg) unfolding additive_content_division[OF assms(1)] -proof- from order_trans[OF norm_ge_zero[of c] assms(2)] show "0 \ e" . - fix x assume "x\p" from division_ofD(4)[OF assms(1) this] guess u v apply-by(erule exE)+ - thus "0 \ content x" using content_pos_le by auto -qed(insert assms,auto) - -lemma rsum_bound: assumes "p tagged_division_of {a..b}" "\x\{a..b}. norm(f x) \ e" - shows "norm(setsum (\(x,k). content k *\<^sub>R f x) p) \ e * content({a..b})" -proof(cases "{a..b} = {}") case True - show ?thesis using assms(1) unfolding True tagged_division_of_trivial by auto -next case False show ?thesis - apply(rule order_trans,rule norm_setsum) unfolding split_def norm_scaleR - apply(rule order_trans[OF setsum_mono]) apply(rule mult_left_mono[OF _ abs_ge_zero, of _ e]) defer - unfolding setsum_left_distrib[symmetric] apply(subst mult_commute) apply(rule mult_left_mono) - apply(rule order_trans[of _ "setsum (content \ snd) p"]) apply(rule eq_refl,rule setsum_cong2) - apply(subst o_def, rule abs_of_nonneg) - proof- show "setsum (content \ snd) p \ content {a..b}" apply(rule eq_refl) - unfolding additive_content_tagged_division[OF assms(1),symmetric] split_def by auto +lemma dsum_bound: + assumes "p division_of {a..b}" + and "norm c \ e" + shows "norm (setsum (\l. content l *\<^sub>R c) p) \ e * content({a..b})" + apply (rule order_trans) + apply (rule norm_setsum) + unfolding norm_scaleR setsum_left_distrib[symmetric] + apply (rule order_trans[OF mult_left_mono]) + apply (rule assms) + apply (rule setsum_abs_ge_zero) + apply (subst mult_commute) + apply (rule mult_left_mono) + apply (rule order_trans[of _ "setsum content p"]) + apply (rule eq_refl) + apply (rule setsum_cong2) + apply (subst abs_of_nonneg) + unfolding additive_content_division[OF assms(1)] +proof - + from order_trans[OF norm_ge_zero[of c] assms(2)] + show "0 \ e" . + fix x assume "x \ p" + from division_ofD(4)[OF assms(1) this] guess u v by (elim exE) + then show "0 \ content x" + using content_pos_le by auto +qed (insert assms, auto) + +lemma rsum_bound: + assumes "p tagged_division_of {a..b}" + and "\x\{a..b}. norm (f x) \ e" + shows "norm (setsum (\(x,k). content k *\<^sub>R f x) p) \ e * content {a..b}" +proof (cases "{a..b} = {}") + case True + show ?thesis + using assms(1) unfolding True tagged_division_of_trivial by auto +next + case False + show ?thesis + apply (rule order_trans) + apply (rule norm_setsum) + unfolding split_def norm_scaleR + apply (rule order_trans[OF setsum_mono]) + apply (rule mult_left_mono[OF _ abs_ge_zero, of _ e]) + defer + unfolding setsum_left_distrib[symmetric] + apply (subst mult_commute) + apply (rule mult_left_mono) + apply (rule order_trans[of _ "setsum (content \ snd) p"]) + apply (rule eq_refl) + apply (rule setsum_cong2) + apply (subst o_def) + apply (rule abs_of_nonneg) + proof - + show "setsum (content \ snd) p \ content {a..b}" + apply (rule eq_refl) + unfolding additive_content_tagged_division[OF assms(1),symmetric] split_def + apply auto + done guess w using nonempty_witness[OF False] . - thus "e\0" apply-apply(rule order_trans) defer apply(rule assms(2)[rule_format],assumption) by auto - fix xk assume *:"xk\p" guess x k using surj_pair[of xk] apply-by(erule exE)+ note xk = this *[unfolded this] - from tagged_division_ofD(4)[OF assms(1) xk(2)] guess u v apply-by(erule exE)+ note uv=this - show "0\ content (snd xk)" unfolding xk snd_conv uv by(rule content_pos_le) - show "norm (f (fst xk)) \ e" unfolding xk fst_conv using tagged_division_ofD(2,3)[OF assms(1) xk(2)] assms(2) by auto - qed qed + then show "e \ 0" + apply - + apply (rule order_trans) + defer + apply (rule assms(2)[rule_format]) + apply assumption + apply auto + done + fix xk + assume *: "xk \ p" + guess x k using surj_pair[of xk] by (elim exE) note xk = this *[unfolded this] + from tagged_division_ofD(4)[OF assms(1) xk(2)] guess u v by (elim exE) note uv=this + show "0 \ content (snd xk)" + unfolding xk snd_conv uv by(rule content_pos_le) + show "norm (f (fst xk)) \ e" + unfolding xk fst_conv using tagged_division_ofD(2,3)[OF assms(1) xk(2)] assms(2) by auto + qed +qed lemma rsum_diff_bound: - assumes "p tagged_division_of {a..b}" "\x\{a..b}. norm(f x - g x) \ e" - shows "norm(setsum (\(x,k). content k *\<^sub>R f x) p - setsum (\(x,k). content k *\<^sub>R g x) p) \ e * content({a..b})" - apply(rule order_trans[OF _ rsum_bound[OF assms]]) apply(rule eq_refl) apply(rule arg_cong[where f=norm]) - unfolding setsum_subtractf[symmetric] apply(rule setsum_cong2) unfolding scaleR_diff_right by auto - -lemma has_integral_bound: fixes f::"'a::ordered_euclidean_space \ 'b::real_normed_vector" - assumes "0 \ B" "(f has_integral i) ({a..b})" "\x\{a..b}. norm(f x) \ B" + assumes "p tagged_division_of {a..b}" + and "\x\{a..b}. norm (f x - g x) \ e" + shows "norm (setsum (\(x,k). content k *\<^sub>R f x) p - setsum (\(x,k). content k *\<^sub>R g x) p) \ + e * content {a..b}" + apply (rule order_trans[OF _ rsum_bound[OF assms]]) + apply (rule eq_refl) + apply (rule arg_cong[where f=norm]) + unfolding setsum_subtractf[symmetric] + apply (rule setsum_cong2) + unfolding scaleR_diff_right + apply auto + done + +lemma has_integral_bound: + fixes f :: "'a::ordered_euclidean_space \ 'b::real_normed_vector" + assumes "0 \ B" + and "(f has_integral i) {a..b}" + and "\x\{a..b}. norm (f x) \ B" shows "norm i \ B * content {a..b}" -proof- let ?P = "content {a..b} > 0" { presume "?P \ ?thesis" - thus ?thesis proof(cases ?P) case False - hence *:"content {a..b} = 0" using content_lt_nz by auto - hence **:"i = 0" using assms(2) apply(subst has_integral_null_eq[symmetric]) by auto - show ?thesis unfolding * ** using assms(1) by auto - qed auto } assume ab:?P - { presume "\ ?thesis \ False" thus ?thesis by auto } - assume "\ ?thesis" hence *:"norm i - B * content {a..b} > 0" by auto - from assms(2)[unfolded has_integral,rule_format,OF *] guess d apply-by(erule exE conjE)+ note d=this[rule_format] +proof - + let ?P = "content {a..b} > 0" + { + presume "?P \ ?thesis" + then show ?thesis + proof (cases ?P) + case False + then have *: "content {a..b} = 0" + using content_lt_nz by auto + hence **: "i = 0" + using assms(2) + apply (subst has_integral_null_eq[symmetric]) + apply auto + done + show ?thesis + unfolding * ** using assms(1) by auto + qed auto + } + assume ab: ?P + { presume "\ ?thesis \ False" then show ?thesis by auto } + assume "\ ?thesis" + then have *: "norm i - B * content {a..b} > 0" + by auto + from assms(2)[unfolded has_integral,rule_format,OF *] + guess d by (elim exE conjE) note d=this[rule_format] from fine_division_exists[OF this(1), of a b] guess p . note p=this - have *:"\s B. norm s \ B \ \ (norm (s - i) < norm i - B)" - proof- case goal1 thus ?case unfolding not_less - using norm_triangle_sub[of i s] unfolding norm_minus_commute by auto - qed show False using d(2)[OF conjI[OF p]] *[OF rsum_bound[OF p(1) assms(3)]] by auto qed + have *: "\s B. norm s \ B \ \ norm (s - i) < norm i - B" + proof - + case goal1 + then show ?case + unfolding not_less + using norm_triangle_sub[of i s] + unfolding norm_minus_commute + by auto + qed + show False + using d(2)[OF conjI[OF p]] *[OF rsum_bound[OF p(1) assms(3)]] by auto +qed + subsection {* Similar theorems about relationship among components. *} -lemma rsum_component_le: fixes f::"'a::ordered_euclidean_space \ 'b::euclidean_space" - assumes "p tagged_division_of {a..b}" "\x\{a..b}. (f x)\i \ (g x)\i" +lemma rsum_component_le: + fixes f :: "'a::ordered_euclidean_space \ 'b::euclidean_space" + assumes "p tagged_division_of {a..b}" + and "\x\{a..b}. (f x)\i \ (g x)\i" shows "(setsum (\(x,k). content k *\<^sub>R f x) p)\i \ (setsum (\(x,k). content k *\<^sub>R g x) p)\i" - unfolding inner_setsum_left apply(rule setsum_mono) apply safe -proof- fix a b assume ab:"(a,b) \ p" note assm = tagged_division_ofD(2-4)[OF assms(1) ab] - from this(3) guess u v apply-by(erule exE)+ note b=this - show "(content b *\<^sub>R f a) \ i \ (content b *\<^sub>R g a) \ i" unfolding b - unfolding inner_simps real_scaleR_def apply(rule mult_left_mono) - defer apply(rule content_pos_le,rule assms(2)[rule_format]) using assm by auto qed + unfolding inner_setsum_left + apply (rule setsum_mono) + apply safe +proof - + fix a b + assume ab: "(a, b) \ p" + note assm = tagged_division_ofD(2-4)[OF assms(1) ab] + from this(3) guess u v by (elim exE) note b=this + show "(content b *\<^sub>R f a) \ i \ (content b *\<^sub>R g a) \ i" + unfolding b + unfolding inner_simps real_scaleR_def + apply (rule mult_left_mono) + defer + apply (rule content_pos_le,rule assms(2)[rule_format]) + using assm + apply auto + done +qed lemma has_integral_component_le: - fixes f g::"'a::ordered_euclidean_space \ 'b::euclidean_space" + fixes f g :: "'a::ordered_euclidean_space \ 'b::euclidean_space" assumes k: "k \ Basis" - assumes "(f has_integral i) s" "(g has_integral j) s" "\x\s. (f x)\k \ (g x)\k" + assumes "(f has_integral i) s" "(g has_integral j) s" + and "\x\s. (f x)\k \ (g x)\k" shows "i\k \ j\k" proof - have lem:"\a b i (j::'b). \g f::'a \ 'b. (f has_integral i) ({a..b}) \ (g has_integral j) ({a..b}) \ \x\{a..b}. (f x)\k \ (g x)\k \ i\k \ j\k" proof (rule ccontr) case goal1 - then have *: "0 < (i\k - j\k) / 3" by auto - guess d1 using goal1(1)[unfolded has_integral,rule_format,OF *] apply-by(erule exE conjE)+ note d1=this[rule_format] - guess d2 using goal1(2)[unfolded has_integral,rule_format,OF *] apply-by(erule exE conjE)+ note d2=this[rule_format] + then have *: "0 < (i\k - j\k) / 3" + by auto + guess d1 using goal1(1)[unfolded has_integral,rule_format,OF *] by (elim exE conjE) note d1=this[rule_format] + guess d2 using goal1(2)[unfolded has_integral,rule_format,OF *] by (elim exE conjE) note d2=this[rule_format] guess p using fine_division_exists[OF gauge_inter[OF d1(1) d2(1)], of a b] unfolding fine_inter . note p = this(1) conjunctD2[OF this(2)] note le_less_trans[OF Basis_le_norm[OF k]] note this[OF d1(2)[OF conjI[OF p(1,2)]]] this[OF d2(2)[OF conjI[OF p(1,3)]]] - thus False + then show False unfolding inner_simps using rsum_component_le[OF p(1) goal1(3)] by (simp add: abs_real_def split: split_if_asm) - qed let ?P = "\a b. s = {a..b}" - { presume "\ ?P \ ?thesis" thus ?thesis proof(cases ?P) - case True then guess a b apply-by(erule exE)+ note s=this - show ?thesis apply(rule lem) using assms[unfolded s] by auto - qed auto } assume as:"\ ?P" - { presume "\ ?thesis \ False" thus ?thesis by auto } - assume "\ i\k \ j\k" hence ij:"(i\k - j\k) / 3 > 0" by auto + qed + let ?P = "\a b. s = {a..b}" + { + presume "\ ?P \ ?thesis" + then show ?thesis + proof (cases ?P) + case True + then guess a b by (elim exE) note s=this + show ?thesis + apply (rule lem) + using assms[unfolded s] + apply auto + done + qed auto + } + assume as: "\ ?P" + { presume "\ ?thesis \ False" then show ?thesis by auto } + assume "\ i\k \ j\k" + then have ij: "(i\k - j\k) / 3 > 0" + by auto note has_integral_altD[OF _ as this] from this[OF assms(2)] this[OF assms(3)] guess B1 B2 . note B=this[rule_format] - have "bounded (ball 0 B1 \ ball (0::'a) B2)" unfolding bounded_Un by(rule conjI bounded_ball)+ - from bounded_subset_closed_interval[OF this] guess a b apply- by(erule exE)+ + have "bounded (ball 0 B1 \ ball (0::'a) B2)" + unfolding bounded_Un by(rule conjI bounded_ball)+ + from bounded_subset_closed_interval[OF this] guess a b by (elim exE) note ab = conjunctD2[OF this[unfolded Un_subset_iff]] guess w1 using B(2)[OF ab(1)] .. note w1=conjunctD2[OF this] guess w2 using B(4)[OF ab(2)] .. note w2=conjunctD2[OF this] - have *:"\w1 w2 j i::real .\w1 - i\ < (i - j) / 3 \ \w2 - j\ < (i - j) / 3 \ w1 \ w2 \ False" + have *: "\w1 w2 j i::real .\w1 - i\ < (i - j) / 3 \ \w2 - j\ < (i - j) / 3 \ w1 \ w2 \ False" by (simp add: abs_real_def split: split_if_asm) - note le_less_trans[OF Basis_le_norm[OF k]] note this[OF w1(2)] this[OF w2(2)] moreover - have "w1\k \ w2\k" apply(rule lem[OF w1(1) w2(1)]) using assms by auto ultimately - show False unfolding inner_simps by(rule *) + note le_less_trans[OF Basis_le_norm[OF k]] + note this[OF w1(2)] this[OF w2(2)] + moreover + have "w1\k \ w2\k" + apply (rule lem[OF w1(1) w2(1)]) + using assms + apply auto + done + ultimately show False + unfolding inner_simps by(rule *) qed -lemma integral_component_le: fixes g f::"'a::ordered_euclidean_space \ 'b::euclidean_space" - assumes "k\Basis" "f integrable_on s" "g integrable_on s" "\x\s. (f x)\k \ (g x)\k" +lemma integral_component_le: + fixes g f :: "'a::ordered_euclidean_space \ 'b::euclidean_space" + assumes "k \ Basis" + and "f integrable_on s" "g integrable_on s" + and "\x\s. (f x)\k \ (g x)\k" shows "(integral s f)\k \ (integral s g)\k" - apply(rule has_integral_component_le) using integrable_integral assms by auto - -lemma has_integral_component_nonneg: fixes f::"'a::ordered_euclidean_space \ 'b::euclidean_space" - assumes "k\Basis" "(f has_integral i) s" "\x\s. 0 \ (f x)\k" shows "0 \ i\k" - using has_integral_component_le[OF assms(1) has_integral_0 assms(2)] using assms(3-) by auto - -lemma integral_component_nonneg: fixes f::"'a::ordered_euclidean_space \ 'b::euclidean_space" - assumes "k\Basis" "f integrable_on s" "\x\s. 0 \ (f x)\k" shows "0 \ (integral s f)\k" - apply(rule has_integral_component_nonneg) using assms by auto - -lemma has_integral_component_neg: fixes f::"'a::ordered_euclidean_space \ 'b::ordered_euclidean_space" - assumes "k\Basis" "(f has_integral i) s" "\x\s. (f x)\k \ 0"shows "i\k \ 0" - using has_integral_component_le[OF assms(1,2) has_integral_0] assms(2-) by auto + apply (rule has_integral_component_le) + using integrable_integral assms + apply auto + done + +lemma has_integral_component_nonneg: + fixes f :: "'a::ordered_euclidean_space \ 'b::euclidean_space" + assumes "k \ Basis" + and "(f has_integral i) s" + and "\x\s. 0 \ (f x)\k" + shows "0 \ i\k" + using has_integral_component_le[OF assms(1) has_integral_0 assms(2)] + using assms(3-) + by auto + +lemma integral_component_nonneg: + fixes f :: "'a::ordered_euclidean_space \ 'b::euclidean_space" + assumes "k \ Basis" + and "f integrable_on s" "\x\s. 0 \ (f x)\k" + shows "0 \ (integral s f)\k" + apply (rule has_integral_component_nonneg) + using assms + apply auto + done + +lemma has_integral_component_neg: + fixes f :: "'a::ordered_euclidean_space \ 'b::ordered_euclidean_space" + assumes "k \ Basis" + and "(f has_integral i) s" + and "\x\s. (f x)\k \ 0" + shows "i\k \ 0" + using has_integral_component_le[OF assms(1,2) has_integral_0] assms(2-) + by auto lemma has_integral_component_lbound: - fixes f::"'a::ordered_euclidean_space => 'b::ordered_euclidean_space" - assumes "(f has_integral i) {a..b}" "\x\{a..b}. B \ f(x)\k" "k\Basis" + fixes f :: "'a::ordered_euclidean_space => 'b::ordered_euclidean_space" + assumes "(f has_integral i) {a..b}" + and "\x\{a..b}. B \ f(x)\k" + and "k \ Basis" shows "B * content {a..b} \ i\k" using has_integral_component_le[OF assms(3) has_integral_const assms(1),of "(\i\Basis. B *\<^sub>R i)::'b"] assms(2-) - by (auto simp add:field_simps) + by (auto simp add: field_simps) lemma has_integral_component_ubound: fixes f::"'a::ordered_euclidean_space => 'b::ordered_euclidean_space" - assumes "(f has_integral i) {a..b}" "\x\{a..b}. f x\k \ B" "k\Basis" - shows "i\k \ B * content({a..b})" - using has_integral_component_le[OF assms(3,1) has_integral_const, of "\i\Basis. B *\<^sub>R i"] assms(2-) - by(auto simp add:field_simps) - -lemma integral_component_lbound: fixes f::"'a::ordered_euclidean_space => 'b::ordered_euclidean_space" - assumes "f integrable_on {a..b}" "\x\{a..b}. B \ f(x)\k" "k\Basis" - shows "B * content({a..b}) \ (integral({a..b}) f)\k" - apply(rule has_integral_component_lbound) using assms unfolding has_integral_integral by auto - -lemma integral_component_ubound: fixes f::"'a::ordered_euclidean_space => 'b::ordered_euclidean_space" - assumes "f integrable_on {a..b}" "\x\{a..b}. f(x)\k \ B" "k\Basis" - shows "(integral({a..b}) f)\k \ B * content({a..b})" - apply(rule has_integral_component_ubound) using assms unfolding has_integral_integral by auto + assumes "(f has_integral i) {a..b}" + and "\x\{a..b}. f x\k \ B" + and "k \ Basis" + shows "i\k \ B * content {a..b}" + using has_integral_component_le[OF assms(3,1) has_integral_const, of "\i\Basis. B *\<^sub>R i"] assms(2-) + by (auto simp add: field_simps) + +lemma integral_component_lbound: + fixes f :: "'a::ordered_euclidean_space \ 'b::ordered_euclidean_space" + assumes "f integrable_on {a..b}" + and "\x\{a..b}. B \ f(x)\k" + and "k \ Basis" + shows "B * content {a..b} \ (integral({a..b}) f)\k" + apply (rule has_integral_component_lbound) + using assms + unfolding has_integral_integral + apply auto + done + +lemma integral_component_ubound: + fixes f :: "'a::ordered_euclidean_space \ 'b::ordered_euclidean_space" + assumes "f integrable_on {a..b}" + and "\x\{a..b}. f x\k \ B" + and "k \ Basis" + shows "(integral {a..b} f)\k \ B * content {a..b}" + apply (rule has_integral_component_ubound) + using assms + unfolding has_integral_integral + apply auto + done + subsection {* Uniform limit of integrable functions is integrable. *} -lemma integrable_uniform_limit: fixes f::"'a::ordered_euclidean_space \ 'b::banach" - assumes "\e>0. \g. (\x\{a..b}. norm(f x - g x) \ e) \ g integrable_on {a..b}" +lemma integrable_uniform_limit: + fixes f :: "'a::ordered_euclidean_space \ 'b::banach" + assumes "\e>0. \g. (\x\{a..b}. norm (f x - g x) \ e) \ g integrable_on {a..b}" shows "f integrable_on {a..b}" -proof- { presume *:"content {a..b} > 0 \ ?thesis" - show ?thesis apply cases apply(rule *,assumption) - unfolding content_lt_nz integrable_on_def using has_integral_null by auto } - assume as:"content {a..b} > 0" - have *:"\P. \e>(0::real). P e \ \n::nat. P (inverse (real n+1))" by auto +proof - + { + presume *: "content {a..b} > 0 \ ?thesis" + show ?thesis + apply cases + apply (rule *) + apply assumption + unfolding content_lt_nz integrable_on_def + using has_integral_null + apply auto + done + } + assume as: "content {a..b} > 0" + have *: "\P. \e>(0::real). P e \ \n::nat. P (inverse (real n + 1))" + by auto from choice[OF *[OF assms]] guess g .. note g=conjunctD2[OF this[rule_format],rule_format] from choice[OF allI[OF g(2)[unfolded integrable_on_def], of "\x. x"]] guess i .. note i=this[rule_format] - have "Cauchy i" unfolding Cauchy_def - proof(rule,rule) fix e::real assume "e>0" - hence "e / 4 / content {a..b} > 0" using as by(auto simp add:field_simps) - then guess M apply-apply(subst(asm) real_arch_inv) by(erule exE conjE)+ note M=this - show "\M. \m\M. \n\M. dist (i m) (i n) < e" apply(rule_tac x=M in exI,rule,rule,rule,rule) - proof- case goal1 have "e/4>0" using `e>0` by auto note * = i[unfolded has_integral,rule_format,OF this] - from *[of m] guess gm apply-by(erule conjE exE)+ note gm=this[rule_format] - from *[of n] guess gn apply-by(erule conjE exE)+ note gn=this[rule_format] + have "Cauchy i" + unfolding Cauchy_def + proof (rule, rule) + fix e :: real + assume "e>0" + then have "e / 4 / content {a..b} > 0" + using as by (auto simp add: field_simps) + then guess M + apply - + apply (subst(asm) real_arch_inv) + apply (elim exE conjE) + done + note M=this + show "\M. \m\M. \n\M. dist (i m) (i n) < e" + apply (rule_tac x=M in exI,rule,rule,rule,rule) + proof - + case goal1 + have "e/4>0" using `e>0` by auto + note * = i[unfolded has_integral,rule_format,OF this] + from *[of m] guess gm by (elim conjE exE) note gm=this[rule_format] + from *[of n] guess gn by (elim conjE exE) note gn=this[rule_format] from fine_division_exists[OF gauge_inter[OF gm(1) gn(1)], of a b] guess p . note p=this - have lem2:"\s1 s2 i1 i2. norm(s2 - s1) \ e/2 \ norm(s1 - i1) < e / 4 \ norm(s2 - i2) < e / 4 \norm(i1 - i2) < e" - proof- case goal1 have "norm (i1 - i2) \ norm (i1 - s1) + norm (s1 - s2) + norm (s2 - i2)" + have lem2: "\s1 s2 i1 i2. norm(s2 - s1) \ e/2 \ norm (s1 - i1) < e / 4 \ + norm (s2 - i2) < e / 4 \ norm (i1 - i2) < e" + proof - + case goal1 + have "norm (i1 - i2) \ norm (i1 - s1) + norm (s1 - s2) + norm (s2 - i2)" using norm_triangle_ineq[of "i1 - s1" "s1 - i2"] - using norm_triangle_ineq[of "s1 - s2" "s2 - i2"] by(auto simp add:algebra_simps) - also have "\ < e" using goal1 unfolding norm_minus_commute by(auto simp add:algebra_simps) + using norm_triangle_ineq[of "s1 - s2" "s2 - i2"] + by (auto simp add: algebra_simps) + also have "\ < e" + using goal1 + unfolding norm_minus_commute + by (auto simp add: algebra_simps) finally show ?case . qed - show ?case unfolding dist_norm apply(rule lem2) defer - apply(rule gm(2)[OF conjI[OF p(1)]],rule_tac[2] gn(2)[OF conjI[OF p(1)]]) - using conjunctD2[OF p(2)[unfolded fine_inter]] apply- apply assumption+ apply(rule order_trans) - apply(rule rsum_diff_bound[OF p(1), where e="2 / real M"]) - proof show "2 / real M * content {a..b} \ e / 2" unfolding divide_inverse - using M as by(auto simp add:field_simps) - fix x assume x:"x \ {a..b}" + show ?case + unfolding dist_norm + apply (rule lem2) + defer + apply (rule gm(2)[OF conjI[OF p(1)]],rule_tac[2] gn(2)[OF conjI[OF p(1)]]) + using conjunctD2[OF p(2)[unfolded fine_inter]] + apply - + apply assumption+ + apply (rule order_trans) + apply (rule rsum_diff_bound[OF p(1), where e="2 / real M"]) + proof + show "2 / real M * content {a..b} \ e / 2" + unfolding divide_inverse + using M as + by (auto simp add: field_simps) + fix x + assume x: "x \ {a..b}" have "norm (f x - g n x) + norm (f x - g m x) \ inverse (real n + 1) + inverse (real m + 1)" - using g(1)[OF x, of n] g(1)[OF x, of m] by auto - also have "\ \ inverse (real M) + inverse (real M)" apply(rule add_mono) - apply(rule_tac[!] le_imp_inverse_le) using goal1 M by auto - also have "\ = 2 / real M" unfolding divide_inverse by auto + using g(1)[OF x, of n] g(1)[OF x, of m] by auto + also have "\ \ inverse (real M) + inverse (real M)" + apply (rule add_mono) + apply (rule_tac[!] le_imp_inverse_le) + using goal1 M + apply auto + done + also have "\ = 2 / real M" + unfolding divide_inverse by auto finally show "norm (g n x - g m x) \ 2 / real M" using norm_triangle_le[of "g n x - f x" "f x - g m x" "2 / real M"] - by(auto simp add:algebra_simps simp add:norm_minus_commute) - qed qed qed + by (auto simp add: algebra_simps simp add: norm_minus_commute) + qed + qed + qed from this[unfolded convergent_eq_cauchy[symmetric]] guess s .. note s=this - show ?thesis unfolding integrable_on_def apply(rule_tac x=s in exI) unfolding has_integral - proof(rule,rule) - case goal1 hence *:"e/3 > 0" by auto + show ?thesis + unfolding integrable_on_def + apply (rule_tac x=s in exI) + unfolding has_integral + proof (rule, rule) + case goal1 + then have *: "e/3 > 0" by auto from LIMSEQ_D [OF s this] guess N1 .. note N1=this - from goal1 as have "e / 3 / content {a..b} > 0" by(auto simp add:field_simps) - from real_arch_invD[OF this] guess N2 apply-by(erule exE conjE)+ note N2=this + from goal1 as have "e / 3 / content {a..b} > 0" + by (auto simp add: field_simps) + from real_arch_invD[OF this] guess N2 by (elim exE conjE) note N2=this from i[of "N1 + N2",unfolded has_integral,rule_format,OF *] guess g' .. note g'=conjunctD2[OF this,rule_format] - have lem:"\sf sg i. norm(sf - sg) \ e / 3 \ norm(i - s) < e / 3 \ norm(sg - i) < e / 3 \ norm(sf - s) < e" - proof- case goal1 have "norm (sf - s) \ norm (sf - sg) + norm (sg - i) + norm (i - s)" + have lem: "\sf sg i. norm (sf - sg) \ e / 3 \ + norm(i - s) < e / 3 \ norm (sg - i) < e / 3 \ norm (sf - s) < e" + proof - + case goal1 + have "norm (sf - s) \ norm (sf - sg) + norm (sg - i) + norm (i - s)" using norm_triangle_ineq[of "sf - sg" "sg - s"] - using norm_triangle_ineq[of "sg - i" " i - s"] by(auto simp add:algebra_simps) - also have "\ < e" using goal1 unfolding norm_minus_commute by(auto simp add:algebra_simps) + using norm_triangle_ineq[of "sg - i" " i - s"] + by (auto simp add: algebra_simps) + also have "\ < e" + using goal1 + unfolding norm_minus_commute + by (auto simp add: algebra_simps) finally show ?case . qed - show ?case apply(rule_tac x=g' in exI) apply(rule,rule g') - proof(rule,rule) fix p assume p:"p tagged_division_of {a..b} \ g' fine p" note * = g'(2)[OF this] - show "norm ((\(x, k)\p. content k *\<^sub>R f x) - s) < e" apply-apply(rule lem[OF _ _ *]) - apply(rule order_trans,rule rsum_diff_bound[OF p[THEN conjunct1]]) apply(rule,rule g,assumption) - proof- have "content {a..b} < e / 3 * (real N2)" - using N2 unfolding inverse_eq_divide using as by(auto simp add:field_simps) - hence "content {a..b} < e / 3 * (real (N1 + N2) + 1)" - apply-apply(rule less_le_trans,assumption) using `e>0` by auto - thus "inverse (real (N1 + N2) + 1) * content {a..b} \ e / 3" - unfolding inverse_eq_divide by(auto simp add:field_simps) - show "norm (i (N1 + N2) - s) < e / 3" by(rule N1[rule_format],auto) - qed qed qed qed + show ?case + apply (rule_tac x=g' in exI) + apply rule + apply (rule g') + proof (rule, rule) + fix p + assume p: "p tagged_division_of {a..b} \ g' fine p" + note * = g'(2)[OF this] + show "norm ((\(x, k)\p. content k *\<^sub>R f x) - s) < e" + apply - + apply (rule lem[OF _ _ *]) + apply (rule order_trans) + apply (rule rsum_diff_bound[OF p[THEN conjunct1]]) + apply rule + apply (rule g) + apply assumption + proof - + have "content {a..b} < e / 3 * (real N2)" + using N2 unfolding inverse_eq_divide using as by (auto simp add: field_simps) + then have "content {a..b} < e / 3 * (real (N1 + N2) + 1)" + apply - + apply (rule less_le_trans,assumption) + using `e>0` + apply auto + done + then show "inverse (real (N1 + N2) + 1) * content {a..b} \ e / 3" + unfolding inverse_eq_divide + by (auto simp add: field_simps) + show "norm (i (N1 + N2) - s) < e / 3" + by (rule N1[rule_format]) auto + qed + qed + qed +qed + subsection {* Negligible sets. *} -definition "negligible (s::('a::ordered_euclidean_space) set) \ (\a b. ((indicator s :: 'a\real) has_integral 0) {a..b})" +definition "negligible (s:: 'a::ordered_euclidean_space set) \ + (\a b. ((indicator s :: 'a\real) has_integral 0) {a..b})" + subsection {* Negligibility of hyperplane. *} lemma vsum_nonzero_image_lemma: - assumes "finite s" "g(a) = 0" - "\x\s. \y\s. f x = f y \ x \ y \ g(f x) = 0" + assumes "finite s" + and "g a = 0" + and "\x\s. \y\s. f x = f y \ x \ y \ g (f x) = 0" shows "setsum g {f x |x. x \ s \ f x \ a} = setsum (g o f) s" - unfolding setsum_iterate[OF assms(1)] apply(subst setsum_iterate) defer - apply(rule iterate_nonzero_image_lemma) apply(rule assms monoidal_monoid)+ - unfolding assms using neutral_add unfolding neutral_add using assms by auto - -lemma interval_doublesplit: fixes a::"'a::ordered_euclidean_space" assumes "k\Basis" + unfolding setsum_iterate[OF assms(1)] + apply (subst setsum_iterate) + defer + apply (rule iterate_nonzero_image_lemma) + apply (rule assms monoidal_monoid)+ + unfolding assms + using neutral_add + unfolding neutral_add + using assms + apply auto + done + +lemma interval_doublesplit: + fixes a :: "'a::ordered_euclidean_space" + assumes "k \ Basis" shows "{a..b} \ {x . abs(x\k - c) \ (e::real)} = - {(\i\Basis. (if i = k then max (a\k) (c - e) else a\i) *\<^sub>R i) .. - (\i\Basis. (if i = k then min (b\k) (c + e) else b\i) *\<^sub>R i)}" -proof- have *:"\x c e::real. abs(x - c) \ e \ x \ c - e \ x \ c + e" by auto - have **:"\s P Q. s \ {x. P x \ Q x} = (s \ {x. Q x}) \ {x. P x}" by blast - show ?thesis unfolding * ** interval_split[OF assms] by(rule refl) qed - -lemma division_doublesplit: fixes a::"'a::ordered_euclidean_space" assumes "p division_of {a..b}" and k:"k\Basis" + {(\i\Basis. (if i = k then max (a\k) (c - e) else a\i) *\<^sub>R i) .. + (\i\Basis. (if i = k then min (b\k) (c + e) else b\i) *\<^sub>R i)}" +proof - + have *: "\x c e::real. abs(x - c) \ e \ x \ c - e \ x \ c + e" + by auto + have **: "\s P Q. s \ {x. P x \ Q x} = (s \ {x. Q x}) \ {x. P x}" + by blast + show ?thesis + unfolding * ** interval_split[OF assms] by (rule refl) +qed + +lemma division_doublesplit: + fixes a :: "'a::ordered_euclidean_space" + assumes "p division_of {a..b}" + and k: "k \ Basis" shows "{l \ {x. abs(x\k - c) \ e} |l. l \ p \ l \ {x. abs(x\k - c) \ e} \ {}} division_of ({a..b} \ {x. abs(x\k - c) \ e})" -proof- have *:"\x c. abs(x - c) \ e \ x \ c - e \ x \ c + e" by auto - have **:"\p q p' q'. p division_of q \ p = p' \ q = q' \ p' division_of q'" by auto +proof - + have *: "\x c. abs (x - c) \ e \ x \ c - e \ x \ c + e" + by auto + have **: "\p q p' q'. p division_of q \ p = p' \ q = q' \ p' division_of q'" + by auto note division_split(1)[OF assms, where c="c+e",unfolded interval_split[OF k]] note division_split(2)[OF this, where c="c-e" and k=k,OF k] - thus ?thesis apply(rule **) using k apply- unfolding interval_doublesplit unfolding * unfolding interval_split interval_doublesplit - apply(rule set_eqI) unfolding mem_Collect_eq apply rule apply(erule conjE exE)+ apply(rule_tac x=la in exI) defer - apply(erule conjE exE)+ apply(rule_tac x="l \ {x. c + e \ x \ k}" in exI) apply rule defer apply rule - apply(rule_tac x=l in exI) by blast+ qed - -lemma content_doublesplit: fixes a::"'a::ordered_euclidean_space" assumes "0 < e" and k:"k\Basis" - obtains d where "0 < d" "content({a..b} \ {x. abs(x\k - c) \ d}) < e" -proof(cases "content {a..b} = 0") - case True show ?thesis apply(rule that[of 1]) defer unfolding interval_doublesplit[OF k] - apply(rule le_less_trans[OF content_subset]) defer apply(subst True) - unfolding interval_doublesplit[symmetric,OF k] using assms by auto -next case False def d \ "e / 3 / setprod (\i. b\i - a\i) (Basis - {k})" + then show ?thesis + apply (rule **) + using k + apply - + unfolding interval_doublesplit + unfolding * + unfolding interval_split interval_doublesplit + apply (rule set_eqI) + unfolding mem_Collect_eq + apply rule + apply (erule conjE exE)+ + apply (rule_tac x=la in exI) + defer + apply (erule conjE exE)+ + apply (rule_tac x="l \ {x. c + e \ x \ k}" in exI) + apply rule + defer + apply rule + apply (rule_tac x=l in exI) + apply blast+ + done +qed + +lemma content_doublesplit: + fixes a :: "'a::ordered_euclidean_space" + assumes "0 < e" + and k: "k \ Basis" + obtains d where "0 < d" and "content ({a..b} \ {x. abs(x\k - c) \ d}) < e" +proof (cases "content {a..b} = 0") + case True + show ?thesis + apply (rule that[of 1]) + defer + unfolding interval_doublesplit[OF k] + apply (rule le_less_trans[OF content_subset]) + defer + apply (subst True) + unfolding interval_doublesplit[symmetric,OF k] + using assms + apply auto + done +next + case False + def d \ "e / 3 / setprod (\i. b\i - a\i) (Basis - {k})" note False[unfolded content_eq_0 not_ex not_le, rule_format] - hence "\x. x\Basis \ b\x > a\x" by(auto simp add:not_le) - hence prod0:"0 < setprod (\i. b\i - a\i) (Basis - {k})" apply-apply(rule setprod_pos) by(auto simp add:field_simps) - hence "d > 0" unfolding d_def using assms by(auto simp add:field_simps) thus ?thesis - proof(rule that[of d]) have *:"Basis = insert k (Basis - {k})" using k by auto - have **:"{a..b} \ {x. \x \ k - c\ \ d} \ {} \ - (\i\Basis - {k}. interval_upperbound ({a..b} \ {x. \x \ k - c\ \ d}) \ i - - interval_lowerbound ({a..b} \ {x. \x \ k - c\ \ d}) \ i) - = (\i\Basis - {k}. b\i - a\i)" apply(rule setprod_cong,rule refl) - unfolding interval_doublesplit[OF k] apply(subst interval_bounds) defer apply(subst interval_bounds) - unfolding interval_eq_empty not_ex not_less by auto - show "content ({a..b} \ {x. \x \ k - c\ \ d}) < e" apply(cases) unfolding content_def apply(subst if_P,assumption,rule assms) - unfolding if_not_P apply(subst *) apply(subst setprod_insert) unfolding ** - unfolding interval_doublesplit[OF k] interval_eq_empty not_ex not_less prefer 3 - apply(subst interval_bounds) defer apply(subst interval_bounds) + then have "\x. x \ Basis \ b\x > a\x" + by (auto simp add:not_le) + then have prod0: "0 < setprod (\i. b\i - a\i) (Basis - {k})" + apply - + apply (rule setprod_pos) + apply (auto simp add: field_simps) + done + then have "d > 0" + unfolding d_def + using assms + by (auto simp add:field_simps) + then show ?thesis + proof (rule that[of d]) + have *: "Basis = insert k (Basis - {k})" + using k by auto + have **: "{a..b} \ {x. \x \ k - c\ \ d} \ {} \ + (\i\Basis - {k}. interval_upperbound ({a..b} \ {x. \x \ k - c\ \ d}) \ i - + interval_lowerbound ({a..b} \ {x. \x \ k - c\ \ d}) \ i) = + (\i\Basis - {k}. b\i - a\i)" + apply (rule setprod_cong) + apply (rule refl) + unfolding interval_doublesplit[OF k] + apply (subst interval_bounds) + defer + apply (subst interval_bounds) + unfolding interval_eq_empty not_ex not_less + apply auto + done + show "content ({a..b} \ {x. \x \ k - c\ \ d}) < e" + apply cases + unfolding content_def + apply (subst if_P) + apply assumption + apply (rule assms) + unfolding if_not_P + apply (subst *) + apply (subst setprod_insert) + unfolding ** + unfolding interval_doublesplit[OF k] interval_eq_empty not_ex not_less + prefer 3 + apply (subst interval_bounds) + defer + apply (subst interval_bounds) apply (simp_all only: k inner_setsum_left_Basis simp_thms if_P cong: bex_cong ball_cong) proof - - have "(min (b \ k) (c + d) - max (a \ k) (c - d)) \ 2 * d" by auto - also have "... < e / (\i\Basis - {k}. b \ i - a \ i)" unfolding d_def using assms prod0 by(auto simp add:field_simps) + have "(min (b \ k) (c + d) - max (a \ k) (c - d)) \ 2 * d" + by auto + also have "\ < e / (\i\Basis - {k}. b \ i - a \ i)" + unfolding d_def + using assms prod0 + by (auto simp add: field_simps) finally show "(min (b \ k) (c + d) - max (a \ k) (c - d)) * (\i\Basis - {k}. b \ i - a \ i) < e" unfolding pos_less_divide_eq[OF prod0] . qed auto @@ -4626,261 +5537,707 @@ fixes k :: "'a::ordered_euclidean_space" assumes k: "k \ Basis" shows "negligible {x. x\k = c}" - unfolding negligible_def has_integral apply(rule,rule,rule,rule) -proof- - case goal1 from content_doublesplit[OF this k,of a b c] guess d . note d=this + unfolding negligible_def has_integral + apply (rule, rule, rule, rule) +proof - + case goal1 + from content_doublesplit[OF this k,of a b c] guess d . note d=this let ?i = "indicator {x::'a. x\k = c} :: 'a\real" - show ?case apply(rule_tac x="\x. ball x d" in exI) apply(rule,rule gauge_ball,rule d) - proof(rule,rule) fix p assume p:"p tagged_division_of {a..b} \ (\x. ball x d) fine p" - have *:"(\(x, ka)\p. content ka *\<^sub>R ?i x) = (\(x, ka)\p. content (ka \ {x. abs(x\k - c) \ d}) *\<^sub>R ?i x)" - apply(rule setsum_cong2) unfolding split_paired_all real_scaleR_def mult_cancel_right split_conv - apply(cases,rule disjI1,assumption,rule disjI2) - proof- fix x l assume as:"(x,l)\p" "?i x \ 0" hence xk:"x\k = c" unfolding indicator_def apply-by(rule ccontr,auto) - show "content l = content (l \ {x. \x \ k - c\ \ d})" apply(rule arg_cong[where f=content]) - apply(rule set_eqI,rule,rule) unfolding mem_Collect_eq - proof- fix y assume y:"y\l" note p[THEN conjunct2,unfolded fine_def,rule_format,OF as(1),unfolded split_conv] - note this[unfolded subset_eq mem_ball dist_norm,rule_format,OF y] note le_less_trans[OF Basis_le_norm[OF k] this] - thus "\y \ k - c\ \ d" unfolding inner_simps xk by auto - qed auto qed + show ?case + apply (rule_tac x="\x. ball x d" in exI) + apply rule + apply (rule gauge_ball) + apply (rule d) + proof (rule, rule) + fix p + assume p: "p tagged_division_of {a..b} \ (\x. ball x d) fine p" + have *: "(\(x, ka)\p. content ka *\<^sub>R ?i x) = + (\(x, ka)\p. content (ka \ {x. abs(x\k - c) \ d}) *\<^sub>R ?i x)" + apply (rule setsum_cong2) + unfolding split_paired_all real_scaleR_def mult_cancel_right split_conv + apply cases + apply (rule disjI1) + apply assumption + apply (rule disjI2) + proof - + fix x l + assume as: "(x, l) \ p" "?i x \ 0" + then have xk: "x\k = c" + unfolding indicator_def + apply - + apply (rule ccontr) + apply auto + done + show "content l = content (l \ {x. \x \ k - c\ \ d})" + apply (rule arg_cong[where f=content]) + apply (rule set_eqI) + apply rule + apply rule + unfolding mem_Collect_eq + proof - + fix y + assume y: "y \ l" + note p[THEN conjunct2,unfolded fine_def,rule_format,OF as(1),unfolded split_conv] + note this[unfolded subset_eq mem_ball dist_norm,rule_format,OF y] + note le_less_trans[OF Basis_le_norm[OF k] this] + then show "\y \ k - c\ \ d" + unfolding inner_simps xk by auto + qed auto + qed note p'= tagged_division_ofD[OF p[THEN conjunct1]] and p''=division_of_tagged_division[OF p[THEN conjunct1]] - show "norm ((\(x, ka)\p. content ka *\<^sub>R ?i x) - 0) < e" unfolding diff_0_right * unfolding real_scaleR_def real_norm_def - apply(subst abs_of_nonneg) apply(rule setsum_nonneg,rule) unfolding split_paired_all split_conv - apply(rule mult_nonneg_nonneg) apply(drule p'(4)) apply(erule exE)+ apply(rule_tac b=b in back_subst) - prefer 2 apply(subst(asm) eq_commute) apply assumption - apply(subst interval_doublesplit[OF k]) apply(rule content_pos_le) apply(rule indicator_pos_le) - proof- have "(\(x, ka)\p. content (ka \ {x. \x \ k - c\ \ d}) * ?i x) \ (\(x, ka)\p. content (ka \ {x. \x \ k - c\ \ d}))" - apply(rule setsum_mono) unfolding split_paired_all split_conv - apply(rule mult_right_le_one_le) apply(drule p'(4)) by(auto simp add:interval_doublesplit[OF k]) - also have "... < e" apply(subst setsum_over_tagged_division_lemma[OF p[THEN conjunct1]]) - proof- case goal1 have "content ({u..v} \ {x. \x \ k - c\ \ d}) \ content {u..v}" - unfolding interval_doublesplit[OF k] apply(rule content_subset) unfolding interval_doublesplit[symmetric,OF k] by auto - thus ?case unfolding goal1 unfolding interval_doublesplit[OF k] + show "norm ((\(x, ka)\p. content ka *\<^sub>R ?i x) - 0) < e" + unfolding diff_0_right * + unfolding real_scaleR_def real_norm_def + apply (subst abs_of_nonneg) + apply (rule setsum_nonneg) + apply rule + unfolding split_paired_all split_conv + apply (rule mult_nonneg_nonneg) + apply (drule p'(4)) + apply (erule exE)+ + apply(rule_tac b=b in back_subst) + prefer 2 + apply (subst(asm) eq_commute) + apply assumption + apply (subst interval_doublesplit[OF k]) + apply (rule content_pos_le) + apply (rule indicator_pos_le) + proof - + have "(\(x, ka)\p. content (ka \ {x. \x \ k - c\ \ d}) * ?i x) \ + (\(x, ka)\p. content (ka \ {x. \x \ k - c\ \ d}))" + apply (rule setsum_mono) + unfolding split_paired_all split_conv + apply (rule mult_right_le_one_le) + apply (drule p'(4)) + apply (auto simp add:interval_doublesplit[OF k]) + done + also have "\ < e" + apply (subst setsum_over_tagged_division_lemma[OF p[THEN conjunct1]]) + proof - + case goal1 + have "content ({u..v} \ {x. \x \ k - c\ \ d}) \ content {u..v}" + unfolding interval_doublesplit[OF k] + apply (rule content_subset) + unfolding interval_doublesplit[symmetric,OF k] + apply auto + done + then show ?case + unfolding goal1 + unfolding interval_doublesplit[OF k] by (blast intro: antisym) - next have *:"setsum content {l \ {x. \x \ k - c\ \ d} |l. l \ snd ` p \ l \ {x. \x \ k - c\ \ d} \ {}} \ 0" - apply(rule setsum_nonneg,rule) unfolding mem_Collect_eq image_iff apply(erule exE bexE conjE)+ unfolding split_paired_all - proof- fix x l a b assume as:"x = l \ {x. \x \ k - c\ \ d}" "(a, b) \ p" "l = snd (a, b)" - guess u v using p'(4)[OF as(2)] apply-by(erule exE)+ note * = this - show "content x \ 0" unfolding as snd_conv * interval_doublesplit[OF k] by(rule content_pos_le) - qed have **:"norm (1::real) \ 1" by auto note division_doublesplit[OF p'' k,unfolded interval_doublesplit[OF k]] + next + have *: "setsum content {l \ {x. \x \ k - c\ \ d} |l. l \ snd ` p \ l \ {x. \x \ k - c\ \ d} \ {}} \ 0" + apply (rule setsum_nonneg) + apply rule + unfolding mem_Collect_eq image_iff + apply (erule exE bexE conjE)+ + unfolding split_paired_all + proof - + fix x l a b + assume as: "x = l \ {x. \x \ k - c\ \ d}" "(a, b) \ p" "l = snd (a, b)" + guess u v using p'(4)[OF as(2)] by (elim exE) note * = this + show "content x \ 0" + unfolding as snd_conv * interval_doublesplit[OF k] + by (rule content_pos_le) + qed + have **: "norm (1::real) \ 1" + by auto + note division_doublesplit[OF p'' k,unfolded interval_doublesplit[OF k]] note dsum_bound[OF this **,unfolded interval_doublesplit[symmetric,OF k]] - note this[unfolded real_scaleR_def real_norm_def mult_1_right mult_1, of c d] note le_less_trans[OF this d(2)] - from this[unfolded abs_of_nonneg[OF *]] show "(\ka\snd ` p. content (ka \ {x. \x \ k - c\ \ d})) < e" - apply(subst vsum_nonzero_image_lemma[of "snd ` p" content "{}", unfolded o_def,symmetric]) - apply(rule finite_imageI p' content_empty)+ unfolding forall_in_division[OF p''] - proof(rule,rule,rule,rule,rule,rule,rule,erule conjE) fix m n u v - assume as:"{m..n} \ snd ` p" "{u..v} \ snd ` p" "{m..n} \ {u..v}" "{m..n} \ {x. \x \ k - c\ \ d} = {u..v} \ {x. \x \ k - c\ \ d}" - have "({m..n} \ {x. \x \ k - c\ \ d}) \ ({u..v} \ {x. \x \ k - c\ \ d}) \ {m..n} \ {u..v}" by blast + note this[unfolded real_scaleR_def real_norm_def mult_1_right mult_1, of c d] + note le_less_trans[OF this d(2)] + from this[unfolded abs_of_nonneg[OF *]] + show "(\ka\snd ` p. content (ka \ {x. \x \ k - c\ \ d})) < e" + apply (subst vsum_nonzero_image_lemma[of "snd ` p" content "{}", unfolded o_def,symmetric]) + apply (rule finite_imageI p' content_empty)+ + unfolding forall_in_division[OF p''] + proof (rule,rule,rule,rule,rule,rule,rule,erule conjE) + fix m n u v + assume as: + "{m..n} \ snd ` p" "{u..v} \ snd ` p" + "{m..n} \ {u..v}" + "{m..n} \ {x. \x \ k - c\ \ d} = {u..v} \ {x. \x \ k - c\ \ d}" + have "({m..n} \ {x. \x \ k - c\ \ d}) \ ({u..v} \ {x. \x \ k - c\ \ d}) \ {m..n} \ {u..v}" + by blast note interior_mono[OF this, unfolded division_ofD(5)[OF p'' as(1-3)] interior_inter[of "{m..n}"]] - hence "interior ({m..n} \ {x. \x \ k - c\ \ d}) = {}" unfolding as Int_absorb by auto - thus "content ({m..n} \ {x. \x \ k - c\ \ d}) = 0" unfolding interval_doublesplit[OF k] content_eq_0_interior[symmetric] . - qed qed + then have "interior ({m..n} \ {x. \x \ k - c\ \ d}) = {}" + unfolding as Int_absorb by auto + then show "content ({m..n} \ {x. \x \ k - c\ \ d}) = 0" + unfolding interval_doublesplit[OF k] content_eq_0_interior[symmetric] . + qed + qed finally show "(\(x, ka)\p. content (ka \ {x. \x \ k - c\ \ d}) * ?i x) < e" . - qed qed qed + qed + qed +qed + subsection {* A technical lemma about "refinement" of division. *} -lemma tagged_division_finer: fixes p::"(('a::ordered_euclidean_space) \ (('a::ordered_euclidean_space) set)) set" - assumes "p tagged_division_of {a..b}" "gauge d" - obtains q where "q tagged_division_of {a..b}" "d fine q" "\(x,k) \ p. k \ d(x) \ (x,k) \ q" -proof- +lemma tagged_division_finer: + fixes p :: "('a::ordered_euclidean_space \ ('a::ordered_euclidean_space set)) set" + assumes "p tagged_division_of {a..b}" + and "gauge d" + obtains q where "q tagged_division_of {a..b}" + and "d fine q" + and "\(x,k) \ p. k \ d(x) \ (x,k) \ q" +proof - let ?P = "\p. p tagged_partial_division_of {a..b} \ gauge d \ (\q. q tagged_division_of (\{k. \x. (x,k) \ p}) \ d fine q \ - (\(x,k) \ p. k \ d(x) \ (x,k) \ q))" - { have *:"finite p" "p tagged_partial_division_of {a..b}" using assms(1) unfolding tagged_division_of_def by auto - presume "\p. finite p \ ?P p" from this[rule_format,OF * assms(2)] guess q .. note q=this - thus ?thesis apply-apply(rule that[of q]) unfolding tagged_division_ofD[OF assms(1)] by auto - } fix p::"(('a::ordered_euclidean_space) \ (('a::ordered_euclidean_space) set)) set" assume as:"finite p" - show "?P p" apply(rule,rule) using as proof(induct p) - case empty show ?case apply(rule_tac x="{}" in exI) unfolding fine_def by auto - next case (insert xk p) guess x k using surj_pair[of xk] apply- by(erule exE)+ note xk=this + (\(x,k) \ p. k \ d(x) \ (x,k) \ q))" + { + have *: "finite p" "p tagged_partial_division_of {a..b}" + using assms(1) + unfolding tagged_division_of_def + by auto + presume "\p. finite p \ ?P p" + from this[rule_format,OF * assms(2)] guess q .. note q=this + then show ?thesis + apply - + apply (rule that[of q]) + unfolding tagged_division_ofD[OF assms(1)] + apply auto + done + } + fix p :: "('a::ordered_euclidean_space \ ('a::ordered_euclidean_space set)) set" + assume as: "finite p" + show "?P p" + apply rule + apply rule + using as + proof (induct p) + case empty + show ?case + apply (rule_tac x="{}" in exI) + unfolding fine_def + apply auto + done + next + case (insert xk p) + guess x k using surj_pair[of xk] by (elim exE) note xk=this note tagged_partial_division_subset[OF insert(4) subset_insertI] from insert(3)[OF this insert(5)] guess q1 .. note q1 = conjunctD3[OF this] - have *:"\{l. \y. (y,l) \ insert xk p} = k \ \{l. \y. (y,l) \ p}" unfolding xk by auto + have *: "\{l. \y. (y,l) \ insert xk p} = k \ \{l. \y. (y,l) \ p}" + unfolding xk by auto note p = tagged_partial_division_ofD[OF insert(4)] - from p(4)[unfolded xk, OF insertI1] guess u v apply-by(erule exE)+ note uv=this + from p(4)[unfolded xk, OF insertI1] guess u v by (elim exE) note uv=this have "finite {k. \x. (x, k) \ p}" - apply(rule finite_subset[of _ "snd ` p"],rule) unfolding subset_eq image_iff mem_Collect_eq - apply(erule exE,rule_tac x="(xa,x)" in bexI) using p by auto - hence int:"interior {u..v} \ interior (\{k. \x. (x, k) \ p}) = {}" - apply(rule inter_interior_unions_intervals) apply(rule open_interior) apply(rule_tac[!] ballI) - unfolding mem_Collect_eq apply(erule_tac[!] exE) apply(drule p(4)[OF insertI2],assumption) - apply(rule p(5)) unfolding uv xk apply(rule insertI1,rule insertI2) apply assumption - using insert(2) unfolding uv xk by auto - - show ?case proof(cases "{u..v} \ d x") - case True thus ?thesis apply(rule_tac x="{(x,{u..v})} \ q1" in exI) apply rule - unfolding * uv apply(rule tagged_division_union,rule tagged_division_of_self) - apply(rule p[unfolded xk uv] insertI1)+ apply(rule q1,rule int) - apply(rule,rule fine_union,subst fine_def) defer apply(rule q1) - unfolding Ball_def split_paired_All split_conv apply(rule,rule,rule,rule) - apply(erule insertE) defer apply(rule UnI2) apply(drule q1(3)[rule_format]) unfolding xk uv by auto - next case False from fine_division_exists[OF assms(2), of u v] guess q2 . note q2=this - show ?thesis apply(rule_tac x="q2 \ q1" in exI) - apply rule unfolding * uv apply(rule tagged_division_union q2 q1 int fine_union)+ - unfolding Ball_def split_paired_All split_conv apply rule apply(rule fine_union) - apply(rule q1 q2)+ apply(rule,rule,rule,rule) apply(erule insertE) - apply(rule UnI2) defer apply(drule q1(3)[rule_format])using False unfolding xk uv by auto - qed qed qed + apply (rule finite_subset[of _ "snd ` p"],rule) + unfolding subset_eq image_iff mem_Collect_eq + apply (erule exE) + apply (rule_tac x="(xa,x)" in bexI) + using p + apply auto + done + then have int: "interior {u..v} \ interior (\{k. \x. (x, k) \ p}) = {}" + apply (rule inter_interior_unions_intervals) + apply (rule open_interior) + apply (rule_tac[!] ballI) + unfolding mem_Collect_eq + apply (erule_tac[!] exE) + apply (drule p(4)[OF insertI2]) + apply assumption + apply (rule p(5)) + unfolding uv xk + apply (rule insertI1) + apply (rule insertI2) + apply assumption + using insert(2) + unfolding uv xk + apply auto + done + show ?case + proof (cases "{u..v} \ d x") + case True + then show ?thesis + apply (rule_tac x="{(x,{u..v})} \ q1" in exI) + apply rule + unfolding * uv + apply (rule tagged_division_union) + apply (rule tagged_division_of_self) + apply (rule p[unfolded xk uv] insertI1)+ + apply (rule q1) + apply (rule int) + apply rule + apply (rule fine_union) + apply (subst fine_def) + defer + apply (rule q1) + unfolding Ball_def split_paired_All split_conv + apply rule + apply rule + apply rule + apply rule + apply (erule insertE) + defer + apply (rule UnI2) + apply (drule q1(3)[rule_format]) + unfolding xk uv + apply auto + done + next + case False + from fine_division_exists[OF assms(2), of u v] guess q2 . note q2=this + show ?thesis + apply (rule_tac x="q2 \ q1" in exI) + apply rule + unfolding * uv + apply (rule tagged_division_union q2 q1 int fine_union)+ + unfolding Ball_def split_paired_All split_conv + apply rule + apply (rule fine_union) + apply (rule q1 q2)+ + apply rule + apply rule + apply rule + apply rule + apply (erule insertE) + apply (rule UnI2) + defer + apply (drule q1(3)[rule_format]) + using False + unfolding xk uv + apply auto + done + qed + qed +qed + subsection {* Hence the main theorem about negligible sets. *} -lemma finite_product_dependent: assumes "finite s" "\x. x\s\ finite (t x)" - shows "finite {(i, j) |i j. i \ s \ j \ t i}" using assms -proof(induct) case (insert x s) - have *:"{(i, j) |i j. i \ insert x s \ j \ t i} = (\y. (x,y)) ` (t x) \ {(i, j) |i j. i \ s \ j \ t i}" by auto - show ?case unfolding * apply(rule finite_UnI) using insert by auto qed auto - -lemma sum_sum_product: assumes "finite s" "\i\s. finite (t i)" - shows "setsum (\i. setsum (x i) (t i)::real) s = setsum (\(i,j). x i j) {(i,j) | i j. i \ s \ j \ t i}" using assms -proof(induct) case (insert a s) - have *:"{(i, j) |i j. i \ insert a s \ j \ t i} = (\y. (a,y)) ` (t a) \ {(i, j) |i j. i \ s \ j \ t i}" by auto - show ?case unfolding * apply(subst setsum_Un_disjoint) unfolding setsum_insert[OF insert(1-2)] - prefer 4 apply(subst insert(3)) unfolding add_right_cancel - proof- show "setsum (x a) (t a) = (\(xa, y)\Pair a ` t a. x xa y)" apply(subst setsum_reindex) unfolding inj_on_def by auto - show "finite {(i, j) |i j. i \ s \ j \ t i}" apply(rule finite_product_dependent) using insert by auto - qed(insert insert, auto) qed auto - -lemma has_integral_negligible: fixes f::"'b::ordered_euclidean_space \ 'a::real_normed_vector" - assumes "negligible s" "\x\(t - s). f x = 0" +lemma finite_product_dependent: + assumes "finite s" + and "\x. x \ s \ finite (t x)" + shows "finite {(i, j) |i j. i \ s \ j \ t i}" + using assms +proof induct + case (insert x s) + have *: "{(i, j) |i j. i \ insert x s \ j \ t i} = + (\y. (x,y)) ` (t x) \ {(i, j) |i j. i \ s \ j \ t i}" by auto + show ?case + unfolding * + apply (rule finite_UnI) + using insert + apply auto + done +qed auto + +lemma sum_sum_product: + assumes "finite s" + and "\i\s. finite (t i)" + shows "setsum (\i. setsum (x i) (t i)::real) s = + setsum (\(i,j). x i j) {(i,j) | i j. i \ s \ j \ t i}" + using assms +proof induct + case (insert a s) + have *: "{(i, j) |i j. i \ insert a s \ j \ t i} = + (\y. (a,y)) ` (t a) \ {(i, j) |i j. i \ s \ j \ t i}" by auto + show ?case + unfolding * + apply (subst setsum_Un_disjoint) + unfolding setsum_insert[OF insert(1-2)] + prefer 4 + apply (subst insert(3)) + unfolding add_right_cancel + proof - + show "setsum (x a) (t a) = (\(xa, y)\ Pair a ` t a. x xa y)" + apply (subst setsum_reindex) + unfolding inj_on_def + apply auto + done + show "finite {(i, j) |i j. i \ s \ j \ t i}" + apply (rule finite_product_dependent) + using insert + apply auto + done + qed (insert insert, auto) +qed auto + +lemma has_integral_negligible: + fixes f :: "'b::ordered_euclidean_space \ 'a::real_normed_vector" + assumes "negligible s" + and "\x\(t - s). f x = 0" shows "(f has_integral 0) t" -proof- presume P:"\f::'b::ordered_euclidean_space \ 'a. \a b. (\x. ~(x \ s) \ f x = 0) \ (f has_integral 0) ({a..b})" +proof - + presume P: "\f::'b::ordered_euclidean_space \ 'a. + \a b. \x. x \ s \ f x = 0 \ (f has_integral 0) {a..b}" let ?f = "(\x. if x \ t then f x else 0)" - show ?thesis apply(rule_tac f="?f" in has_integral_eq) apply(rule) unfolding if_P apply(rule refl) - apply(subst has_integral_alt) apply(cases,subst if_P,assumption) unfolding if_not_P - proof- assume "\a b. t = {a..b}" then guess a b apply-by(erule exE)+ note t = this - show "(?f has_integral 0) t" unfolding t apply(rule P) using assms(2) unfolding t by auto - next show "\e>0. \B>0. \a b. ball 0 B \ {a..b} \ (\z. ((\x. if x \ t then ?f x else 0) has_integral z) {a..b} \ norm (z - 0) < e)" - apply(safe,rule_tac x=1 in exI,rule) apply(rule zero_less_one,safe) apply(rule_tac x=0 in exI) - apply(rule,rule P) using assms(2) by auto + show ?thesis + apply (rule_tac f="?f" in has_integral_eq) + apply rule + unfolding if_P + apply (rule refl) + apply (subst has_integral_alt) + apply cases + apply (subst if_P, assumption) + unfolding if_not_P + proof - + assume "\a b. t = {a..b}" + then guess a b apply - by (erule exE)+ note t = this + show "(?f has_integral 0) t" + unfolding t + apply (rule P) + using assms(2) + unfolding t + apply auto + done + next + show "\e>0. \B>0. \a b. ball 0 B \ {a..b} \ + (\z. ((\x. if x \ t then ?f x else 0) has_integral z) {a..b} \ norm (z - 0) < e)" + apply safe + apply (rule_tac x=1 in exI) + apply rule + apply (rule zero_less_one) + apply safe + apply (rule_tac x=0 in exI) + apply rule + apply (rule P) + using assms(2) + apply auto + done qed -next fix f::"'b \ 'a" and a b::"'b" assume assm:"\x. x \ s \ f x = 0" - show "(f has_integral 0) {a..b}" unfolding has_integral - proof(safe) case goal1 - hence "\n. e / 2 / ((real n+1) * (2 ^ n)) > 0" - apply-apply(rule divide_pos_pos) defer apply(rule mult_pos_pos) by(auto simp add:field_simps) - note assms(1)[unfolded negligible_def has_integral,rule_format,OF this,of a b] note allI[OF this,of "\x. x"] +next + fix f :: "'b \ 'a" + fix a b :: 'b + assume assm: "\x. x \ s \ f x = 0" + show "(f has_integral 0) {a..b}" + unfolding has_integral + proof safe + case goal1 + then have "\n. e / 2 / ((real n+1) * (2 ^ n)) > 0" + apply - + apply (rule divide_pos_pos) + defer + apply (rule mult_pos_pos) + apply (auto simp add:field_simps) + done + note assms(1)[unfolded negligible_def has_integral,rule_format,OF this,of a b] + note allI[OF this,of "\x. x"] from choice[OF this] guess d .. note d=conjunctD2[OF this[rule_format]] - show ?case apply(rule_tac x="\x. d (nat \norm (f x)\) x" in exI) - proof safe show "gauge (\x. d (nat \norm (f x)\) x)" using d(1) unfolding gauge_def by auto - fix p assume as:"p tagged_division_of {a..b}" "(\x. d (nat \norm (f x)\) x) fine p" + show ?case + apply (rule_tac x="\x. d (nat \norm (f x)\) x" in exI) + proof safe + show "gauge (\x. d (nat \norm (f x)\) x)" + using d(1) unfolding gauge_def by auto + fix p + assume as: "p tagged_division_of {a..b}" "(\x. d (nat \norm (f x)\) x) fine p" let ?goal = "norm ((\(x, k)\p. content k *\<^sub>R f x) - 0) < e" - { presume "p\{} \ ?goal" thus ?goal apply(cases "p={}") using goal1 by auto } - assume as':"p \ {}" from real_arch_simple[of "Sup((\(x,k). norm(f x)) ` p)"] guess N .. - hence N:"\x\(\(x, k). norm (f x)) ` p. x \ real N" apply(subst(asm) cSup_finite_le_iff) using as as' by auto + { + presume "p \ {} \ ?goal" + then show ?goal + apply (cases "p = {}") + using goal1 + apply auto + done + } + assume as': "p \ {}" + from real_arch_simple[of "Sup((\(x,k). norm(f x)) ` p)"] guess N .. + then have N: "\x\(\(x, k). norm (f x)) ` p. x \ real N" + apply (subst(asm) cSup_finite_le_iff) + using as as' + apply auto + done have "\i. \q. q tagged_division_of {a..b} \ (d i) fine q \ (\(x, k)\p. k \ (d i) x \ (x, k) \ q)" - apply(rule,rule tagged_division_finer[OF as(1) d(1)]) by auto + apply rule + apply (rule tagged_division_finer[OF as(1) d(1)]) + apply auto + done from choice[OF this] guess q .. note q=conjunctD3[OF this[rule_format]] - have *:"\i. (\(x, k)\q i. content k *\<^sub>R indicator s x) \ (0::real)" apply(rule setsum_nonneg,safe) - unfolding real_scaleR_def apply(rule mult_nonneg_nonneg) apply(drule tagged_division_ofD(4)[OF q(1)]) by auto - have **:"\f g s t. finite s \ finite t \ (\(x,y) \ t. (0::real) \ g(x,y)) \ (\y\s. \x. (x,y) \ t \ f(y) \ g(x,y)) \ setsum f s \ setsum g t" - proof- case goal1 thus ?case apply-apply(rule setsum_le_included[of s t g snd f]) prefer 4 - apply safe apply(erule_tac x=x in ballE) apply(erule exE) apply(rule_tac x="(xa,x)" in bexI) by auto qed + have *: "\i. (\(x, k)\q i. content k *\<^sub>R indicator s x) \ (0::real)" + apply (rule setsum_nonneg) + apply safe + unfolding real_scaleR_def + apply (rule mult_nonneg_nonneg) + apply (drule tagged_division_ofD(4)[OF q(1)]) + apply auto + done + have **: "\f g s t. finite s \ finite t \ (\(x,y) \ t. (0::real) \ g(x,y)) \ + (\y\s. \x. (x,y) \ t \ f(y) \ g(x,y)) \ setsum f s \ setsum g t" + proof - + case goal1 + then show ?case + apply - + apply (rule setsum_le_included[of s t g snd f]) + prefer 4 + apply safe + apply (erule_tac x=x in ballE) + apply (erule exE) + apply (rule_tac x="(xa,x)" in bexI) + apply auto + done + qed have "norm ((\(x, k)\p. content k *\<^sub>R f x) - 0) \ setsum (\i. (real i + 1) * - norm(setsum (\(x,k). content k *\<^sub>R indicator s x :: real) (q i))) {0..N+1}" + norm (setsum (\(x,k). content k *\<^sub>R indicator s x :: real) (q i))) {0..N+1}" unfolding real_norm_def setsum_right_distrib abs_of_nonneg[OF *] diff_0_right - apply(rule order_trans,rule norm_setsum) apply(subst sum_sum_product) prefer 3 - proof(rule **,safe) show "finite {(i, j) |i j. i \ {0..N + 1} \ j \ q i}" apply(rule finite_product_dependent) using q by auto - fix i a b assume as'':"(a,b) \ q i" show "0 \ (real i + 1) * (content b *\<^sub>R indicator s a)" - unfolding real_scaleR_def apply(rule mult_nonneg_nonneg) defer apply(rule mult_nonneg_nonneg) - using tagged_division_ofD(4)[OF q(1) as''] by auto - next fix i::nat show "finite (q i)" using q by auto - next fix x k assume xk:"(x,k) \ p" def n \ "nat \norm (f x)\" - have *:"norm (f x) \ (\(x, k). norm (f x)) ` p" using xk by auto - have nfx:"real n \ norm(f x)" "norm(f x) \ real n + 1" unfolding n_def by auto - hence "n \ {0..N + 1}" using N[rule_format,OF *] by auto - moreover note as(2)[unfolded fine_def,rule_format,OF xk,unfolded split_conv] - note q(3)[rule_format,OF xk,unfolded split_conv,rule_format,OF this] note this[unfolded n_def[symmetric]] - moreover have "norm (content k *\<^sub>R f x) \ (real n + 1) * (content k * indicator s x)" - proof(cases "x\s") case False thus ?thesis using assm by auto - next case True have *:"content k \ 0" using tagged_division_ofD(4)[OF as(1) xk] by auto - moreover have "content k * norm (f x) \ content k * (real n + 1)" apply(rule mult_mono) using nfx * by auto - ultimately show ?thesis unfolding abs_mult using nfx True by(auto simp add:field_simps) - qed ultimately show "\y. (y, x, k) \ {(i, j) |i j. i \ {0..N + 1} \ j \ q i} \ norm (content k *\<^sub>R f x) \ (real y + 1) * (content k *\<^sub>R indicator s x)" - apply(rule_tac x=n in exI,safe) apply(rule_tac x=n in exI,rule_tac x="(x,k)" in exI,safe) by auto - qed(insert as, auto) - also have "... \ setsum (\i. e / 2 / 2 ^ i) {0..N+1}" apply(rule setsum_mono) - proof- case goal1 thus ?case apply(subst mult_commute, subst pos_le_divide_eq[symmetric]) - using d(2)[rule_format,of "q i" i] using q[rule_format] by(auto simp add:field_simps) - qed also have "... < e * inverse 2 * 2" unfolding divide_inverse setsum_right_distrib[symmetric] - apply(rule mult_strict_left_mono) unfolding power_inverse atLeastLessThanSuc_atLeastAtMost[symmetric] - apply(subst sumr_geometric) using goal1 by auto - finally show "?goal" by auto qed qed qed - -lemma has_integral_spike: fixes f::"'b::ordered_euclidean_space \ 'a::real_normed_vector" - assumes "negligible s" "(\x\(t - s). g x = f x)" "(f has_integral y) t" + apply (rule order_trans) + apply (rule norm_setsum) + apply (subst sum_sum_product) + prefer 3 + proof (rule **, safe) + show "finite {(i, j) |i j. i \ {0..N + 1} \ j \ q i}" + apply (rule finite_product_dependent) + using q + apply auto + done + fix i a b + assume as'': "(a, b) \ q i" + show "0 \ (real i + 1) * (content b *\<^sub>R indicator s a)" + unfolding real_scaleR_def + apply (rule mult_nonneg_nonneg) + defer + apply (rule mult_nonneg_nonneg) + using tagged_division_ofD(4)[OF q(1) as''] + apply auto + done + next + fix i :: nat + show "finite (q i)" + using q by auto + next + fix x k + assume xk: "(x, k) \ p" + def n \ "nat \norm (f x)\" + have *: "norm (f x) \ (\(x, k). norm (f x)) ` p" + using xk by auto + have nfx: "real n \ norm (f x)" "norm (f x) \ real n + 1" + unfolding n_def by auto + then have "n \ {0..N + 1}" + using N[rule_format,OF *] by auto + moreover + note as(2)[unfolded fine_def,rule_format,OF xk,unfolded split_conv] + note q(3)[rule_format,OF xk,unfolded split_conv,rule_format,OF this] + note this[unfolded n_def[symmetric]] + moreover + have "norm (content k *\<^sub>R f x) \ (real n + 1) * (content k * indicator s x)" + proof (cases "x \ s") + case False + then show ?thesis + using assm by auto + next + case True + have *: "content k \ 0" + using tagged_division_ofD(4)[OF as(1) xk] by auto + moreover + have "content k * norm (f x) \ content k * (real n + 1)" + apply (rule mult_mono) + using nfx * + apply auto + done + ultimately + show ?thesis + unfolding abs_mult + using nfx True + by (auto simp add: field_simps) + qed + ultimately show "\y. (y, x, k) \ {(i, j) |i j. i \ {0..N + 1} \ j \ q i} \ norm (content k *\<^sub>R f x) \ + (real y + 1) * (content k *\<^sub>R indicator s x)" + apply (rule_tac x=n in exI) + apply safe + apply (rule_tac x=n in exI) + apply (rule_tac x="(x,k)" in exI) + apply safe + apply auto + done + qed (insert as, auto) + also have "\ \ setsum (\i. e / 2 / 2 ^ i) {0..N+1}" + apply (rule setsum_mono) + proof - + case goal1 + then show ?case + apply (subst mult_commute, subst pos_le_divide_eq[symmetric]) + using d(2)[rule_format,of "q i" i] + using q[rule_format] + apply (auto simp add: field_simps) + done + qed + also have "\ < e * inverse 2 * 2" + unfolding divide_inverse setsum_right_distrib[symmetric] + apply (rule mult_strict_left_mono) + unfolding power_inverse atLeastLessThanSuc_atLeastAtMost[symmetric] + apply (subst sumr_geometric) + using goal1 + apply auto + done + finally show "?goal" by auto + qed + qed +qed + +lemma has_integral_spike: + fixes f :: "'b::ordered_euclidean_space \ 'a::real_normed_vector" + assumes "negligible s" + and "(\x\(t - s). g x = f x)" + and "(f has_integral y) t" shows "(g has_integral y) t" -proof- { fix a b::"'b" and f g ::"'b \ 'a" and y::'a - assume as:"\x \ {a..b} - s. g x = f x" "(f has_integral y) {a..b}" - have "((\x. f x + (g x - f x)) has_integral (y + 0)) {a..b}" apply(rule has_integral_add[OF as(2)]) - apply(rule has_integral_negligible[OF assms(1)]) using as by auto - hence "(g has_integral y) {a..b}" by auto } note * = this - show ?thesis apply(subst has_integral_alt) using assms(2-) apply-apply(rule cond_cases,safe) - apply(rule *, assumption+) apply(subst(asm) has_integral_alt) unfolding if_not_P - apply(erule_tac x=e in allE,safe,rule_tac x=B in exI,safe) apply(erule_tac x=a in allE,erule_tac x=b in allE,safe) - apply(rule_tac x=z in exI,safe) apply(rule *[where fa2="\x. if x\t then f x else 0"]) by auto qed +proof - + { + fix a b :: 'b + fix f g :: "'b \ 'a" + fix y :: 'a + assume as: "\x \ {a..b} - s. g x = f x" "(f has_integral y) {a..b}" + have "((\x. f x + (g x - f x)) has_integral (y + 0)) {a..b}" + apply (rule has_integral_add[OF as(2)]) + apply (rule has_integral_negligible[OF assms(1)]) + using as + apply auto + done + then have "(g has_integral y) {a..b}" + by auto + } note * = this + show ?thesis + apply (subst has_integral_alt) + using assms(2-) + apply - + apply (rule cond_cases) + apply safe + apply (rule *) + apply assumption+ + apply (subst(asm) has_integral_alt) + unfolding if_not_P + apply (erule_tac x=e in allE) + apply safe + apply (rule_tac x=B in exI) + apply safe + apply (erule_tac x=a in allE) + apply (erule_tac x=b in allE) + apply safe + apply (rule_tac x=z in exI) + apply safe + apply (rule *[where fa2="\x. if x\t then f x else 0"]) + apply auto + done +qed lemma has_integral_spike_eq: - assumes "negligible s" "\x\(t - s). g x = f x" + assumes "negligible s" + and "\x\(t - s). g x = f x" shows "((f has_integral y) t \ (g has_integral y) t)" - apply rule apply(rule_tac[!] has_integral_spike[OF assms(1)]) using assms(2) by auto - -lemma integrable_spike: assumes "negligible s" "\x\(t - s). g x = f x" "f integrable_on t" + apply rule + apply (rule_tac[!] has_integral_spike[OF assms(1)]) + using assms(2) + apply auto + done + +lemma integrable_spike: + assumes "negligible s" + and "\x\(t - s). g x = f x" + and "f integrable_on t" shows "g integrable_on t" - using assms unfolding integrable_on_def apply-apply(erule exE) - apply(rule,rule has_integral_spike) by fastforce+ - -lemma integral_spike: assumes "negligible s" "\x\(t - s). g x = f x" + using assms + unfolding integrable_on_def + apply - + apply (erule exE) + apply rule + apply (rule has_integral_spike) + apply fastforce+ + done + +lemma integral_spike: + assumes "negligible s" + and "\x\(t - s). g x = f x" shows "integral t f = integral t g" - unfolding integral_def using has_integral_spike_eq[OF assms] by auto + unfolding integral_def + using has_integral_spike_eq[OF assms] + by auto + subsection {* Some other trivialities about negligible sets. *} -lemma negligible_subset[intro]: assumes "negligible s" "t \ s" shows "negligible t" unfolding negligible_def -proof(safe) case goal1 show ?case using assms(1)[unfolded negligible_def,rule_format,of a b] - apply-apply(rule has_integral_spike[OF assms(1)]) defer apply assumption - using assms(2) unfolding indicator_def by auto qed - -lemma negligible_diff[intro?]: assumes "negligible s" shows "negligible(s - t)" using assms by auto - -lemma negligible_inter: assumes "negligible s \ negligible t" shows "negligible(s \ t)" using assms by auto - -lemma negligible_union: assumes "negligible s" "negligible t" shows "negligible (s \ t)" unfolding negligible_def -proof safe case goal1 note assm = assms[unfolded negligible_def,rule_format,of a b] - thus ?case apply(subst has_integral_spike_eq[OF assms(2)]) - defer apply assumption unfolding indicator_def by auto qed - -lemma negligible_union_eq[simp]: "negligible (s \ t) \ (negligible s \ negligible t)" +lemma negligible_subset[intro]: + assumes "negligible s" + and "t \ s" + shows "negligible t" + unfolding negligible_def +proof safe + case goal1 + show ?case + using assms(1)[unfolded negligible_def,rule_format,of a b] + apply - + apply (rule has_integral_spike[OF assms(1)]) + defer + apply assumption + using assms(2) + unfolding indicator_def + apply auto + done +qed + +lemma negligible_diff[intro?]: + assumes "negligible s" + shows "negligible (s - t)" + using assms by auto + +lemma negligible_inter: + assumes "negligible s \ negligible t" + shows "negligible (s \ t)" + using assms by auto + +lemma negligible_union: + assumes "negligible s" + and "negligible t" + shows "negligible (s \ t)" + unfolding negligible_def +proof safe + case goal1 + note assm = assms[unfolded negligible_def,rule_format,of a b] + then show ?case + apply (subst has_integral_spike_eq[OF assms(2)]) + defer + apply assumption + unfolding indicator_def + apply auto + done +qed + +lemma negligible_union_eq[simp]: "negligible (s \ t) \ negligible s \ negligible t" using negligible_union by auto -lemma negligible_sing[intro]: "negligible {a::_::ordered_euclidean_space}" +lemma negligible_sing[intro]: "negligible {a::'a::ordered_euclidean_space}" using negligible_standard_hyperplane[OF SOME_Basis, of "a \ (SOME i. i \ Basis)"] by auto -lemma negligible_insert[simp]: "negligible(insert a s) \ negligible s" - apply(subst insert_is_Un) unfolding negligible_union_eq by auto - -lemma negligible_empty[intro]: "negligible {}" by auto - -lemma negligible_finite[intro]: assumes "finite s" shows "negligible s" - using assms apply(induct s) by auto - -lemma negligible_unions[intro]: assumes "finite s" "\t\s. negligible t" shows "negligible(\s)" - using assms by(induct,auto) - -lemma negligible: "negligible s \ (\t::('a::ordered_euclidean_space) set. ((indicator s::'a\real) has_integral 0) t)" - apply safe defer apply(subst negligible_def) +lemma negligible_insert[simp]: "negligible (insert a s) \ negligible s" + apply (subst insert_is_Un) + unfolding negligible_union_eq + apply auto + done + +lemma negligible_empty[intro]: "negligible {}" + by auto + +lemma negligible_finite[intro]: + assumes "finite s" + shows "negligible s" + using assms by (induct s) auto + +lemma negligible_unions[intro]: + assumes "finite s" + and "\t\s. negligible t" + shows "negligible(\s)" + using assms by induct auto + +lemma negligible: + "negligible s \ (\t::('a::ordered_euclidean_space) set. ((indicator s::'a\real) has_integral 0) t)" + apply safe + defer + apply (subst negligible_def) proof - - fix t::"'a set" assume as:"negligible s" - have *:"(\x. if x \ s \ t then 1 else 0) = (\x. if x\t then if x\s then 1 else 0 else 0)" + fix t :: "'a set" + assume as: "negligible s" + have *: "(\x. if x \ s \ t then 1 else 0) = (\x. if x\t then if x\s then 1 else 0 else 0)" by auto show "((indicator s::'a\real) has_integral 0) t" - apply(subst has_integral_alt) - apply(cases,subst if_P,assumption) + apply (subst has_integral_alt) + apply cases + apply (subst if_P,assumption) unfolding if_not_P - apply(safe,rule as[unfolded negligible_def,rule_format]) - apply(rule_tac x=1 in exI) - apply(safe,rule zero_less_one) - apply(rule_tac x=0 in exI) + apply safe + apply (rule as[unfolded negligible_def,rule_format]) + apply (rule_tac x=1 in exI) + apply safe + apply (rule zero_less_one) + apply (rule_tac x=0 in exI) using negligible_subset[OF as,of "s \ t"] unfolding negligible_def indicator_def [abs_def] unfolding * @@ -4888,63 +6245,114 @@ done qed auto + subsection {* Finite case of the spike theorem is quite commonly needed. *} -lemma has_integral_spike_finite: assumes "finite s" "\x\t-s. g x = f x" - "(f has_integral y) t" shows "(g has_integral y) t" - apply(rule has_integral_spike) using assms by auto - -lemma has_integral_spike_finite_eq: assumes "finite s" "\x\t-s. g x = f x" +lemma has_integral_spike_finite: + assumes "finite s" + and "\x\t-s. g x = f x" + and "(f has_integral y) t" + shows "(g has_integral y) t" + apply (rule has_integral_spike) + using assms + apply auto + done + +lemma has_integral_spike_finite_eq: + assumes "finite s" + and "\x\t-s. g x = f x" shows "((f has_integral y) t \ (g has_integral y) t)" - apply rule apply(rule_tac[!] has_integral_spike_finite) using assms by auto + apply rule + apply (rule_tac[!] has_integral_spike_finite) + using assms + apply auto + done lemma integrable_spike_finite: - assumes "finite s" "\x\t-s. g x = f x" "f integrable_on t" shows "g integrable_on t" - using assms unfolding integrable_on_def apply safe apply(rule_tac x=y in exI) - apply(rule has_integral_spike_finite) by auto + assumes "finite s" + and "\x\t-s. g x = f x" + and "f integrable_on t" + shows "g integrable_on t" + using assms + unfolding integrable_on_def + apply safe + apply (rule_tac x=y in exI) + apply (rule has_integral_spike_finite) + apply auto + done + subsection {* In particular, the boundary of an interval is negligible. *} lemma negligible_frontier_interval: "negligible({a::'a::ordered_euclidean_space..b} - {a<.. ?A" apply rule unfolding Diff_iff mem_interval apply simp apply(erule conjE bexE)+ apply(rule_tac x=i in bexI) - by auto - thus ?thesis - apply- - apply(rule negligible_subset[of ?A]) - apply(rule negligible_unions[OF finite_imageI]) - by auto + apply auto + done + then show ?thesis + apply - + apply (rule negligible_subset[of ?A]) + apply (rule negligible_unions[OF finite_imageI]) + apply auto + done qed lemma has_integral_spike_interior: - assumes "\x\{a<..x\{a<..x\{a<.. (g has_integral y) ({a..b}))" - apply rule apply(rule_tac[!] has_integral_spike_interior) using assms by auto - -lemma integrable_spike_interior: assumes "\x\{a<..x\{a<.. (g has_integral y) {a..b}" + apply rule + apply (rule_tac[!] has_integral_spike_interior) + using assms + apply auto + done + +lemma integrable_spike_interior: + assumes "\x\{a<.. = True" - unfolding neutral_def apply(rule some_equality) by auto - -lemma monoidal_and[intro]: "monoidal op \" unfolding monoidal_def by auto - -lemma iterate_and[simp]: assumes "finite s" shows "(iterate op \) s p \ (\x\s. p x)" using assms -apply induct unfolding iterate_insert[OF monoidal_and] by auto - -lemma operative_division_and: assumes "operative op \ P" "d division_of {a..b}" + unfolding neutral_def by (rule some_equality) auto + +lemma monoidal_and[intro]: "monoidal op \" + unfolding monoidal_def by auto + +lemma iterate_and[simp]: + assumes "finite s" + shows "(iterate op \) s p \ (\x\s. p x)" + using assms + apply induct + unfolding iterate_insert[OF monoidal_and] + apply auto + done + +lemma operative_division_and: + assumes "operative op \ P" + and "d division_of {a..b}" shows "(\i\d. P i) \ P {a..b}" - using operative_division[OF monoidal_and assms] division_of_finite[OF assms(2)] by auto + using operative_division[OF monoidal_and assms] division_of_finite[OF assms(2)] + by auto lemma operative_approximable: assumes "0 \ e" fixes f::"'b::ordered_euclidean_space \ 'a::banach" shows "operative op \ (\i. \g. (\x\i. norm (f x - g (x::'b)) \ e) \ g integrable_on i)" unfolding operative_def neutral_and diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/TPTP/TPTP_Parser/make_mlyacclib --- a/src/HOL/TPTP/TPTP_Parser/make_mlyacclib Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/TPTP/TPTP_Parser/make_mlyacclib Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # make_mlyacclib - Generates Isabelle-friendly version of ML-Yacc's library. # diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/TPTP/TPTP_Parser/make_tptp_parser --- a/src/HOL/TPTP/TPTP_Parser/make_tptp_parser Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/TPTP/TPTP_Parser/make_tptp_parser Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # make_tptp_parser - Runs ML-Yacc to generate TPTP parser and makes it # Isabelle-friendly. diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/ATP/atp_util.ML --- a/src/HOL/Tools/ATP/atp_util.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/ATP/atp_util.ML Tue Sep 10 20:11:01 2013 +0200 @@ -7,8 +7,9 @@ signature ATP_UTIL = sig val timestamp : unit -> string + val hashw : word * word -> word + val hashw_string : string * word -> word val hash_string : string -> int - val hash_term : term -> int val chunk_list : int -> 'a list -> 'a list list val stringN_of_int : int -> int -> string val strip_spaces : bool -> (char -> bool) -> string -> string @@ -63,13 +64,7 @@ fun hashw (u, w) = Word.+ (u, Word.* (0w65599, w)) fun hashw_char (c, w) = hashw (Word.fromInt (Char.ord c), w) fun hashw_string (s : string, w) = CharVector.foldl hashw_char w s -fun hashw_term (t1 $ t2) = hashw (hashw_term t1, hashw_term t2) - | hashw_term (Const (s, _)) = hashw_string (s, 0w0) - | hashw_term (Free (s, _)) = hashw_string (s, 0w0) - | hashw_term _ = 0w0 - fun hash_string s = Word.toInt (hashw_string (s, 0w0)) -val hash_term = Word.toInt o hashw_term fun chunk_list _ [] = [] | chunk_list k xs = diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/Metis/metis_generate.ML --- a/src/HOL/Tools/Metis/metis_generate.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/Metis/metis_generate.ML Tue Sep 10 20:11:01 2013 +0200 @@ -215,9 +215,8 @@ else conj_clauses @ fact_clauses |> map (pair 0) - |> rpair (ctxt |> Config.put Legacy_Monomorph.keep_partial_instances false) - |-> Legacy_Monomorph.monomorph atp_schematic_consts_of - |> fst |> chop (length conj_clauses) + |> Monomorph.monomorph atp_schematic_consts_of ctxt + |> chop (length conj_clauses) |> pairself (maps (map (zero_var_indexes o snd))) val num_conjs = length conj_clauses (* Pretend every clause is a "simp" rule, to guide the term ordering. *) diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/Nitpick/nitpick_util.ML --- a/src/HOL/Tools/Nitpick/nitpick_util.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/Nitpick/nitpick_util.ML Tue Sep 10 20:11:01 2013 +0200 @@ -299,6 +299,11 @@ if maybe_quote s = s then pretty else Pretty.enum "" "\"" "\"" [pretty] end -val hash_term = ATP_Util.hash_term +fun hashw_term (t1 $ t2) = ATP_Util.hashw (hashw_term t1, hashw_term t2) + | hashw_term (Const (s, _)) = ATP_Util.hashw_string (s, 0w0) + | hashw_term (Free (s, _)) = ATP_Util.hashw_string (s, 0w0) + | hashw_term _ = 0w0 + +val hash_term = Word.toInt o hashw_term end; diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/Sledgehammer/MaSh/src/compareStats.py --- a/src/HOL/Tools/Sledgehammer/MaSh/src/compareStats.py Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/Sledgehammer/MaSh/src/compareStats.py Tue Sep 10 20:11:01 2013 +0200 @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python # Title: HOL/Tools/Sledgehammer/MaSh/src/compareStats.py # Author: Daniel Kuehlwein, ICIS, Radboud University Nijmegen # Copyright 2012 diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML Tue Sep 10 20:11:01 2013 +0200 @@ -18,7 +18,6 @@ del : (Facts.ref * Attrib.src list) list, only : bool} - val ignore_no_atp : bool Config.T val instantiate_inducts : bool Config.T val no_fact_override : fact_override val fact_of_ref : @@ -33,7 +32,6 @@ val maybe_instantiate_inducts : Proof.context -> term list -> term -> (((unit -> string) * 'a) * thm) list -> (((unit -> string) * 'a) * thm) list - val maybe_filter_no_atps : Proof.context -> ('a * thm) list -> ('a * thm) list val fact_of_raw_fact : raw_fact -> fact val all_facts : Proof.context -> bool -> bool -> unit Symtab.table -> thm list -> thm list @@ -59,9 +57,7 @@ del : (Facts.ref * Attrib.src list) list, only : bool} -(* experimental features *) -val ignore_no_atp = - Attrib.setup_config_bool @{binding sledgehammer_ignore_no_atp} (K false) +(* experimental feature *) val instantiate_inducts = Attrib.setup_config_bool @{binding sledgehammer_instantiate_inducts} (K false) @@ -106,15 +102,49 @@ body_type T = @{typ bool} | _ => false) +fun normalize_vars t = + let + fun normT (Type (s, Ts)) = fold_map normT Ts #>> curry Type s + | normT (TVar (z as (_, S))) = + (fn ((knownT, nT), accum) => + case find_index (equal z) knownT of + ~1 => (TVar ((Name.uu, nT), S), ((z :: knownT, nT + 1), accum)) + | j => (TVar ((Name.uu, nT - j - 1), S), ((knownT, nT), accum))) + | normT (T as TFree _) = pair T + fun norm (t $ u) = norm t ##>> norm u #>> op $ + | norm (Const (s, T)) = normT T #>> curry Const s + | norm (Var (z as (_, T))) = + normT T + #> (fn (T, (accumT, (known, n))) => + case find_index (equal z) known of + ~1 => (Var ((Name.uu, n), T), (accumT, (z :: known, n + 1))) + | j => (Var ((Name.uu, n - j - 1), T), (accumT, (known, n)))) + | norm (Abs (_, T, t)) = + norm t ##>> normT T #>> (fn (t, T) => Abs (Name.uu, T, t)) + | norm (Bound j) = pair (Bound j) + | norm (Free (s, T)) = normT T #>> curry Free s + in fst (norm t (([], 0), ([], 0))) end + fun status_of_thm css name th = - (* FIXME: use structured name *) - if (String.isSubstring ".induct" name orelse - String.isSubstring ".inducts" name) andalso - may_be_induction (prop_of th) then - Induction - else case Termtab.lookup css (prop_of th) of - SOME status => status - | NONE => General + let val t = normalize_vars (prop_of th) in + (* FIXME: use structured name *) + if String.isSubstring ".induct" name andalso may_be_induction t then + Induction + else case Termtab.lookup css t of + SOME status => status + | NONE => + let val concl = Logic.strip_imp_concl t in + case try (HOLogic.dest_eq o HOLogic.dest_Trueprop) concl of + SOME lrhss => + let + val prems = Logic.strip_imp_prems t + val t' = Logic.list_implies (prems, Logic.mk_equals lrhss) + in + Termtab.lookup css t' |> the_default General + end + | NONE => General + end + end fun stature_of_thm global assms chained css name th = (scope_of_thm global assms chained th, status_of_thm css name th) @@ -161,36 +191,6 @@ append ["induct", "inducts"] |> map (prefix Long_Name.separator) -val max_lambda_nesting = 5 (*only applies if not ho_atp*) - -fun term_has_too_many_lambdas max (t1 $ t2) = - exists (term_has_too_many_lambdas max) [t1, t2] - | term_has_too_many_lambdas max (Abs (_, _, t)) = - max = 0 orelse term_has_too_many_lambdas (max - 1) t - | term_has_too_many_lambdas _ _ = false - -(* Don't count nested lambdas at the level of formulas, since they are - quantifiers. *) -fun formula_has_too_many_lambdas Ts (Abs (_, T, t)) = - formula_has_too_many_lambdas (T :: Ts) t - | formula_has_too_many_lambdas Ts t = - if member (op =) [HOLogic.boolT, propT] (fastype_of1 (Ts, t)) then - exists (formula_has_too_many_lambdas Ts) (#2 (strip_comb t)) - else - term_has_too_many_lambdas max_lambda_nesting t - -(* The maximum apply depth of any "metis" call in "Metis_Examples" (on - 2007-10-31) was 11. *) -val max_apply_depth = 18 - -fun apply_depth (f $ t) = Int.max (apply_depth f, apply_depth t + 1) - | apply_depth (Abs (_, _, t)) = apply_depth t - | apply_depth _ = 0 - -fun is_too_complex ho_atp t = - apply_depth t > max_apply_depth orelse - (not ho_atp andalso formula_has_too_many_lambdas [] t) - (* FIXME: Ad hoc list *) val technical_prefixes = ["ATP", "Code_Evaluation", "Datatype", "Enum", "Lazy_Sequence", @@ -199,51 +199,66 @@ "Random_Sequence", "Sledgehammer", "SMT"] |> map (suffix Long_Name.separator) -fun has_technical_prefix s = +fun is_technical_const (s, _) = exists (fn pref => String.isPrefix pref s) technical_prefixes -val exists_technical_const = exists_Const (has_technical_prefix o fst) (* FIXME: make more reliable *) -val exists_low_level_class_const = - exists_Const (fn (s, _) => - s = @{const_name equal_class.equal} orelse - String.isSubstring (Long_Name.separator ^ "class" ^ Long_Name.separator) s) +val sep_class_sep = Long_Name.separator ^ "class" ^ Long_Name.separator +fun is_low_level_class_const (s, _) = + s = @{const_name equal_class.equal} orelse String.isSubstring sep_class_sep s + +val sep_that = Long_Name.separator ^ Obtain.thatN fun is_that_fact th = - String.isSuffix (Long_Name.separator ^ Obtain.thatN) (Thm.get_name_hint th) + String.isSuffix sep_that (Thm.get_name_hint th) andalso exists_subterm (fn Free (s, _) => s = Name.skolem Auto_Bind.thesisN | _ => false) (prop_of th) +datatype interest = Deal_Breaker | Interesting | Boring + +fun combine_interests Deal_Breaker _ = Deal_Breaker + | combine_interests _ Deal_Breaker = Deal_Breaker + | combine_interests Interesting _ = Interesting + | combine_interests _ Interesting = Interesting + | combine_interests Boring Boring = Boring + fun is_likely_tautology_too_meta_or_too_technical th = let fun is_interesting_subterm (Const (s, _)) = not (member (op =) atp_widely_irrelevant_consts s) | is_interesting_subterm (Free _) = true | is_interesting_subterm _ = false - fun is_boring_bool t = - not (exists_subterm is_interesting_subterm t) orelse - exists_type (exists_subtype (curry (op =) @{typ prop})) t - fun is_boring_prop _ (@{const Trueprop} $ t) = is_boring_bool t - | is_boring_prop Ts (@{const "==>"} $ t $ u) = - is_boring_prop Ts t andalso is_boring_prop Ts u - | is_boring_prop Ts (Const (@{const_name all}, _) $ Abs (_, T, t)) = - is_boring_prop (T :: Ts) t - | is_boring_prop Ts ((t as Const (@{const_name all}, _)) $ u) = - is_boring_prop Ts (t $ eta_expand Ts u 1) - | is_boring_prop _ (Const (@{const_name "=="}, _) $ t $ u) = - is_boring_bool t andalso is_boring_bool u - | is_boring_prop _ _ = true + fun interest_of_bool t = + if exists_Const (is_technical_const orf is_low_level_class_const) t then + Deal_Breaker + else if exists_type (exists_subtype (curry (op =) @{typ prop})) t orelse + not (exists_subterm is_interesting_subterm t) then + Boring + else + Interesting + fun interest_of_prop _ (@{const Trueprop} $ t) = interest_of_bool t + | interest_of_prop Ts (@{const "==>"} $ t $ u) = + combine_interests (interest_of_prop Ts t) (interest_of_prop Ts u) + | interest_of_prop Ts (Const (@{const_name all}, _) $ Abs (_, T, t)) = + interest_of_prop (T :: Ts) t + | interest_of_prop Ts ((t as Const (@{const_name all}, _)) $ u) = + interest_of_prop Ts (t $ eta_expand Ts u 1) + | interest_of_prop _ (Const (@{const_name "=="}, _) $ t $ u) = + combine_interests (interest_of_bool t) (interest_of_bool u) + | interest_of_prop _ _ = Deal_Breaker val t = prop_of th in - (is_boring_prop [] (prop_of th) andalso + (interest_of_prop [] t <> Interesting andalso not (Thm.eq_thm_prop (@{thm ext}, th))) orelse - exists_type type_has_top_sort t orelse exists_technical_const t orelse - exists_low_level_class_const t orelse is_that_fact th + is_that_fact th end -fun is_blacklisted_or_something ctxt ho_atp name = - (not (Config.get ctxt ignore_no_atp) andalso is_package_def name) orelse - exists (fn s => String.isSuffix s name) (multi_base_blacklist ctxt ho_atp) +fun is_blacklisted_or_something ctxt ho_atp = + let + val blist = multi_base_blacklist ctxt ho_atp + fun is_blisted name = + is_package_def name orelse exists (fn s => String.isSuffix s name) blist + in is_blisted end fun hackish_string_of_term ctxt = with_vanilla_print_mode (Syntax.string_of_term ctxt) #> simplify_spaces @@ -272,48 +287,45 @@ fun backquote_term ctxt = close_form #> hackish_string_of_term ctxt #> backquote fun backquote_thm ctxt = backquote_term ctxt o prop_of +(* gracefully handle huge background theories *) +val max_simps_for_clasimpset = 10000 + fun clasimpset_rule_table_of ctxt = - let - val thy = Proof_Context.theory_of ctxt - val atomize = HOLogic.mk_Trueprop o Object_Logic.atomize_term thy - fun add stature normalizers get_th = - fold (fn rule => - let - val th = rule |> get_th - val t = - th |> Thm.maxidx_of th > 0 ? zero_var_indexes |> prop_of - in - fold (fn normalize => Termtab.update (normalize t, stature)) - (I :: normalizers) - end) - val {safeIs, (* safeEs, *) hazIs, (* hazEs, *) ...} = - ctxt |> claset_of |> Classical.rep_cs - val intros = Item_Net.content safeIs @ Item_Net.content hazIs + let val simps = ctxt |> simpset_of |> dest_ss |> #simps in + if length simps >= max_simps_for_clasimpset then + Termtab.empty + else + let + fun add stature th = + Termtab.update (normalize_vars (prop_of th), stature) + val {safeIs, (* safeEs, *) hazIs, (* hazEs, *) ...} = + ctxt |> claset_of |> Classical.rep_cs + val intros = Item_Net.content safeIs @ Item_Net.content hazIs (* Add once it is used: - val elims = - Item_Net.content safeEs @ Item_Net.content hazEs - |> map Classical.classical_rule + val elims = + Item_Net.content safeEs @ Item_Net.content hazEs + |> map Classical.classical_rule *) - val simps = ctxt |> simpset_of |> dest_ss |> #simps - val specs = ctxt |> Spec_Rules.get - val (rec_defs, nonrec_defs) = - specs |> filter (curry (op =) Spec_Rules.Equational o fst) - |> maps (snd o snd) - |> filter_out (member Thm.eq_thm_prop risky_defs) - |> List.partition (is_rec_def o prop_of) - val spec_intros = - specs |> filter (member (op =) [Spec_Rules.Inductive, - Spec_Rules.Co_Inductive] o fst) - |> maps (snd o snd) - in - Termtab.empty |> add Simp [atomize] snd simps - |> add Rec_Def [] I rec_defs - |> add Non_Rec_Def [] I nonrec_defs + val specs = ctxt |> Spec_Rules.get + val (rec_defs, nonrec_defs) = + specs |> filter (curry (op =) Spec_Rules.Equational o fst) + |> maps (snd o snd) + |> filter_out (member Thm.eq_thm_prop risky_defs) + |> List.partition (is_rec_def o prop_of) + val spec_intros = + specs |> filter (member (op =) [Spec_Rules.Inductive, + Spec_Rules.Co_Inductive] o fst) + |> maps (snd o snd) + in + Termtab.empty |> fold (add Simp o snd) simps + |> fold (add Rec_Def) rec_defs + |> fold (add Non_Rec_Def) nonrec_defs (* Add once it is used: - |> add Elim [] I elims + |> fold (add Elim) elims *) - |> add Intro [] I intros - |> add Inductive [] I spec_intros + |> fold (add Intro) intros + |> fold (add Inductive) spec_intros + end end fun normalize_eq (t as @{const Trueprop} @@ -326,7 +338,7 @@ else HOLogic.mk_Trueprop (HOLogic.mk_not (t0 $ t2 $ t1)) | normalize_eq t = t -val normalize_eq_etc = normalize_eq o Term_Subst.zero_var_indexes +val normalize_eq_vars = normalize_eq #> normalize_vars fun if_thm_before th th' = if Theory.subthy (pairself Thm.theory_of_thm (th, th')) then th else th' @@ -341,7 +353,8 @@ fun build_name_tables name_of facts = let - fun cons_thm (_, th) = Termtab.cons_list (normalize_eq_etc (prop_of th), th) + fun cons_thm (_, th) = + Termtab.cons_list (normalize_eq_vars (prop_of th), th) fun add_plain canon alias = Symtab.update (Thm.get_name_hint alias, name_of (if_thm_before canon alias)) @@ -353,10 +366,17 @@ val inclass_name_tab = Symtab.fold add_inclass plain_name_tab Symtab.empty in (plain_name_tab, inclass_name_tab) end -fun uniquify facts = - Termtab.fold (cons o snd) - (fold (Termtab.default o `(normalize_eq_etc o prop_of o snd)) facts - Termtab.empty) [] +fun keyed_distinct key_of xs = + let val tab = fold (Termtab.default o `key_of) xs Termtab.empty in + Termtab.fold (cons o snd) tab [] + end + +fun hashed_keyed_distinct hash_of key_of xs = + let + val ys = map (`hash_of) xs + val sorted_ys = sort (int_ord o pairself fst) ys + val grouped_ys = AList.coalesce (op =) sorted_ys + in maps (keyed_distinct key_of o snd) grouped_ys end fun struct_induct_rule_on th = case Logic.strip_horn (prop_of th) of @@ -415,9 +435,6 @@ else I -fun maybe_filter_no_atps ctxt = - not (Config.get ctxt ignore_no_atp) ? filter_out (No_ATPs.member ctxt o snd) - fun fact_of_raw_fact ((name, stature), th) = ((name (), stature), th) fun all_facts ctxt generous ho_atp reserved add_ths chained css = @@ -435,14 +452,13 @@ |> filter is_good_unnamed_local |> map (pair "" o single) val full_space = Name_Space.merge (Facts.space_of global_facts, Facts.space_of local_facts) + val is_blacklisted_or_something = is_blacklisted_or_something ctxt ho_atp fun add_facts global foldx facts = foldx (fn (name0, ths) => if name0 <> "" andalso forall (not o member Thm.eq_thm_prop add_ths) ths andalso (Facts.is_concealed facts name0 orelse - not (can (Proof_Context.get_thms ctxt) name0) orelse - (not generous andalso - is_blacklisted_or_something ctxt ho_atp name0)) then + (not generous andalso is_blacklisted_or_something name0)) then I else let @@ -457,9 +473,7 @@ #> fold_rev (fn th => fn (j, accum) => (j - 1, if not (member Thm.eq_thm_prop add_ths th) andalso - (is_likely_tautology_too_meta_or_too_technical th orelse - (not generous andalso - is_too_complex ho_atp (prop_of th))) then + is_likely_tautology_too_meta_or_too_technical th then accum else let @@ -506,9 +520,10 @@ else let val (add, del) = pairself (Attrib.eval_thms ctxt) (add, del) in all_facts ctxt false ho_atp reserved add chained css - |> filter_out (member Thm.eq_thm_prop del o snd) - |> maybe_filter_no_atps ctxt - |> uniquify + |> filter_out + ((member Thm.eq_thm_prop del orf No_ATPs.member ctxt) o snd) + |> hashed_keyed_distinct (size_of_term o prop_of o snd) + (normalize_eq_vars o prop_of o snd) end) |> maybe_instantiate_inducts ctxt hyp_ts concl_t end diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML Tue Sep 10 20:11:01 2013 +0200 @@ -387,7 +387,7 @@ (* High enough so that it isn't wrongly considered as very relevant (e.g., for E weights), but low enough so that it is unlikely to be truncated away if few facts are included. *) -val special_fact_index = 75 +val special_fact_index = 45 fun relevance_filter ctxt thres0 decay max_facts is_built_in_const (fudge as {threshold_divisor, ridiculous_threshold, ...}) facts hyp_ts diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/Sledgehammer/sledgehammer_provers.ML --- a/src/HOL/Tools/Sledgehammer/sledgehammer_provers.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/Sledgehammer/sledgehammer_provers.ML Tue Sep 10 20:11:01 2013 +0200 @@ -650,19 +650,14 @@ | _ => (maybe_isar_name, []) in minimize_command override_params min_name end -fun repair_legacy_monomorph_context max_iters best_max_iters max_new_instances - best_max_new_instances = - Config.put Legacy_Monomorph.max_rounds - (max_iters |> the_default best_max_iters) - #> Config.put Legacy_Monomorph.max_new_instances - (max_new_instances |> the_default best_max_new_instances) - #> Config.put Legacy_Monomorph.keep_partial_instances false +val max_fact_instances = 10 (* FUDGE *) fun repair_monomorph_context max_iters best_max_iters max_new_instances best_max_new_instances = Config.put Monomorph.max_rounds (max_iters |> the_default best_max_iters) #> Config.put Monomorph.max_new_instances (max_new_instances |> the_default best_max_new_instances) + #> Config.put Monomorph.max_thm_instances max_fact_instances fun suffix_of_mode Auto_Try = "_try" | suffix_of_mode Try = "_try" @@ -757,7 +752,7 @@ let val ctxt = ctxt - |> repair_legacy_monomorph_context max_mono_iters + |> repair_monomorph_context max_mono_iters best_max_mono_iters max_new_mono_instances best_max_new_mono_instances (* pseudo-theorem involving the same constants as the subgoal *) @@ -770,9 +765,8 @@ |> op @ |> cons (0, subgoal_th) in - Legacy_Monomorph.monomorph atp_schematic_consts_of rths ctxt - |> fst |> tl - |> curry ListPair.zip (map fst facts) + Monomorph.monomorph atp_schematic_consts_of ctxt rths + |> tl |> curry ListPair.zip (map fst facts) |> maps (fn (name, rths) => map (pair name o zero_var_indexes o snd) rths) end @@ -845,8 +839,7 @@ fun sel_weights () = atp_problem_selection_weights atp_problem fun ord_info () = atp_problem_term_order_info atp_problem val ord = effective_term_order ctxt name - val full_proof = - debug orelse (isar_proofs |> the_default (mode = Minimize)) + val full_proof = isar_proofs |> the_default (mode = Minimize) val args = arguments ctxt full_proof extra (slice_timeout |> the_default one_day) @@ -1129,7 +1122,7 @@ (if show_filter then " " ^ quote fact_filter else "") ^ " fact" ^ plural_s num_facts val _ = - if verbose andalso is_some outcome then + if debug then quote name ^ " invoked with " ^ num_of_facts fact_filter num_facts ^ ": " ^ string_of_failure (failure_of_smt_failure (the outcome)) ^ diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/legacy_monomorph.ML --- a/src/HOL/Tools/legacy_monomorph.ML Tue Sep 10 20:09:53 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,331 +0,0 @@ -(* Title: HOL/Tools/legacy_monomorph.ML - Author: Sascha Boehme, TU Muenchen - -Monomorphization of theorems, i.e., computation of all (necessary) -instances. This procedure is incomplete in general, but works well for -most practical problems. - -For a list of universally closed theorems (without schematic term -variables), monomorphization computes a list of theorems with schematic -term variables: all polymorphic constants (i.e., constants occurring both -with ground types and schematic type variables) are instantiated with all -(necessary) ground types; thereby theorems containing these constants are -copied. To prevent nontermination, there is an upper limit for the number -of iterations involved in the fixpoint construction. - -The search for instances is performed on the constants with schematic -types, which are extracted from the initial set of theorems. The search -constructs, for each theorem with those constants, a set of substitutions, -which, in the end, is applied to all corresponding theorems. Remaining -schematic type variables are substituted with fresh types. - -Searching for necessary substitutions is an iterative fixpoint -construction: each iteration computes all required instances required by -the ground instances computed in the previous step and which haven't been -found before. Computed substitutions are always nontrivial: schematic type -variables are never mapped to schematic type variables. -*) - -signature LEGACY_MONOMORPH = -sig - (* utility function *) - val typ_has_tvars: typ -> bool - val all_schematic_consts_of: term -> typ list Symtab.table - val add_schematic_consts_of: term -> typ list Symtab.table -> - typ list Symtab.table - - (* configuration options *) - val max_rounds: int Config.T - val max_new_instances: int Config.T - val keep_partial_instances: bool Config.T - - (* monomorphization *) - val monomorph: (term -> typ list Symtab.table) -> (int * thm) list -> - Proof.context -> (int * thm) list list * Proof.context -end - -structure Legacy_Monomorph: LEGACY_MONOMORPH = -struct - -(* utility functions *) - -val typ_has_tvars = Term.exists_subtype (fn TVar _ => true | _ => false) - -fun add_schematic_const (c as (_, T)) = - if typ_has_tvars T then Symtab.insert_list (op =) c else I - -fun add_schematic_consts_of t = - Term.fold_aterms (fn Const c => add_schematic_const c | _ => I) t - -fun all_schematic_consts_of t = add_schematic_consts_of t Symtab.empty - - - -(* configuration options *) - -val max_rounds = Attrib.setup_config_int @{binding legacy_monomorph_max_rounds} (K 5) -val max_new_instances = - Attrib.setup_config_int @{binding legacy_monomorph_max_new_instances} (K 300) -val keep_partial_instances = - Attrib.setup_config_bool @{binding legacy_monomorph_keep_partial_instances} (K true) - - - -(* monomorphization *) - -(** preparing the problem **) - -datatype thm_info = - Ground of thm | - Schematic of { - index: int, - theorem: thm, - tvars: (indexname * sort) list, - schematics: typ list Symtab.table, - initial_round: int } - -fun prepare schematic_consts_of rthms = - let - val empty_sub = ((0, false, false), Vartab.empty) - - fun prep (r, thm) ((i, idx), (consts, subs)) = - if not (Term.exists_type typ_has_tvars (Thm.prop_of thm)) then - (Ground thm, ((i+1, idx + Thm.maxidx_of thm + 1), (consts, subs))) - else - let - (* increase indices to avoid clashes of type variables *) - val thm' = Thm.incr_indexes idx thm - val idx' = Thm.maxidx_of thm' + 1 - val schematics = schematic_consts_of (Thm.prop_of thm') - val consts' = - Symtab.fold (fn (n, _) => Symtab.update (n, [])) schematics consts - val subs' = Inttab.update (i, [empty_sub]) subs - val thm_info = Schematic { - index = i, - theorem = thm', - tvars = Term.add_tvars (Thm.prop_of thm') [], - schematics = schematics, - initial_round = r } - in (thm_info, ((i+1, idx'), (consts', subs'))) end - in fold_map prep rthms ((0, 0), (Symtab.empty, Inttab.empty)) ||> snd end - - - -(** collecting substitutions **) - -fun exceeded limit = (limit <= 0) -fun exceeded_limit (limit, _, _) = exceeded limit - - -fun derived_subst subst' subst = subst' |> Vartab.forall (fn (n, (_, T)) => - Vartab.lookup subst n |> Option.map (equal T o snd) |> the_default false) - -fun eq_subst (subst1, subst2) = - derived_subst subst1 subst2 andalso derived_subst subst2 subst1 - - -fun with_all_grounds cx grounds f = - if exceeded_limit cx then I else Symtab.fold f grounds - -fun with_all_type_combinations cx schematics f (n, Ts) = - if exceeded_limit cx then I - else fold_product f (Symtab.lookup_list schematics n) Ts - -fun derive_new_substs thy cx new_grounds schematics subst = - with_all_grounds cx new_grounds - (with_all_type_combinations cx schematics (fn T => fn U => - (case try (Sign.typ_match thy (T, U)) subst of - NONE => I - | SOME subst' => insert eq_subst subst'))) [] - - -fun known_subst sub subs1 subs2 subst' = - let fun derived (_, subst) = derived_subst subst' subst - in derived sub orelse exists derived subs1 orelse exists derived subs2 end - -fun within_limit f cx = if exceeded_limit cx then cx else f cx - -fun fold_partial_substs derive add = within_limit ( - let - fun fold_partial [] cx = cx - | fold_partial (sub :: subs) (limit, subs', next) = - if exceeded limit then (limit, sub :: subs @ subs', next) - else sub |> (fn ((generation, full, _), subst) => - if full then fold_partial subs (limit, sub :: subs', next) - else - (case filter_out (known_subst sub subs subs') (derive subst) of - [] => fold_partial subs (limit, sub :: subs', next) - | substs => - (limit, ((generation, full, true), subst) :: subs', next) - |> fold (within_limit o add) substs - |> fold_partial subs)) - in (fn (limit, subs, next) => fold_partial subs (limit, [], next)) end) - - -fun refine ctxt round known_grounds new_grounds (tvars, schematics) cx = - let - val thy = Proof_Context.theory_of ctxt - val count_partial = Config.get ctxt keep_partial_instances - - fun add_new_ground subst n T = - let val T' = Envir.subst_type subst T - in - (* FIXME: maybe keep types in a table or net for known_grounds, - that might improve efficiency here - *) - if typ_has_tvars T' then I - else if member (op =) (Symtab.lookup_list known_grounds n) T' then I - else Symtab.cons_list (n, T') - end - - fun add_new_subst subst (limit, subs, next_grounds) = - let - val full = forall (Vartab.defined subst o fst) tvars - val limit' = - if full orelse count_partial then limit - 1 else limit - val sub = ((round, full, false), subst) - val next_grounds' = - (schematics, next_grounds) - |-> Symtab.fold (uncurry (fold o add_new_ground subst)) - in (limit', sub :: subs, next_grounds') end - in - fold_partial_substs (derive_new_substs thy cx new_grounds schematics) - add_new_subst cx - end - - -(* - 'known_grounds' are all constant names known to occur schematically - associated with all ground instances considered so far -*) -fun add_relevant_instances known_grounds (Const (c as (n, T))) = - if typ_has_tvars T orelse not (Symtab.defined known_grounds n) then I - else if member (op =) (Symtab.lookup_list known_grounds n) T then I - else Symtab.insert_list (op =) c - | add_relevant_instances _ _ = I - -fun collect_instances known_grounds thm = - Term.fold_aterms (add_relevant_instances known_grounds) (Thm.prop_of thm) - - -fun make_subst_ctxt ctxt thm_infos known_grounds substitutions = - let - (* The total limit of returned (ground) facts is the number of facts - given to the monomorphizer increased by max_new_instances. Since - initially ground facts are returned anyway, the limit here is not - counting them. *) - val limit = Config.get ctxt max_new_instances + - fold (fn Schematic _ => Integer.add 1 | _ => I) thm_infos 0 - - fun add_ground_consts (Ground thm) = collect_instances known_grounds thm - | add_ground_consts (Schematic _) = I - val initial_grounds = fold add_ground_consts thm_infos Symtab.empty - in (known_grounds, (limit, substitutions, initial_grounds)) end - -fun is_new round initial_round = (round = initial_round) -fun is_active round initial_round = (round > initial_round) - -fun fold_schematic pred f = fold (fn - Schematic {index, theorem, tvars, schematics, initial_round} => - if pred initial_round then f theorem (index, tvars, schematics) else I - | Ground _ => I) - -fun focus f _ (index, tvars, schematics) (limit, subs, next_grounds) = - let - val (limit', isubs', next_grounds') = - (limit, Inttab.lookup_list subs index, next_grounds) - |> f (tvars, schematics) - in (limit', Inttab.update (index, isubs') subs, next_grounds') end - -fun collect_substitutions thm_infos ctxt round subst_ctxt = - let val (known_grounds, (limit, subs, next_grounds)) = subst_ctxt - in - if exceeded limit then subst_ctxt - else - let - fun collect thm _ = collect_instances known_grounds thm - val new = fold_schematic (is_new round) collect thm_infos next_grounds - - val known' = Symtab.merge_list (op =) (known_grounds, new) - val step = focus o refine ctxt round known' - in - (limit, subs, Symtab.empty) - |> not (Symtab.is_empty new) ? - fold_schematic (is_active round) (step new) thm_infos - |> fold_schematic (is_new round) (step known') thm_infos - |> pair known' - end - end - - - -(** instantiating schematic theorems **) - -fun super_sort (Ground _) S = S - | super_sort (Schematic {tvars, ...}) S = merge (op =) (S, maps snd tvars) - -fun new_super_type ctxt thm_infos = - let val S = fold super_sort thm_infos @{sort type} - in yield_singleton Variable.invent_types S ctxt |>> SOME o TFree end - -fun add_missing_tvar T (ix, S) subst = - if Vartab.defined subst ix then subst - else Vartab.update (ix, (S, T)) subst - -fun complete tvars subst T = - subst - |> Vartab.map (K (apsnd (Term.map_atyps (fn TVar _ => T | U => U)))) - |> fold (add_missing_tvar T) tvars - -fun instantiate_all' (mT, ctxt) subs thm_infos = - let - val thy = Proof_Context.theory_of ctxt - - fun cert (ix, (S, T)) = pairself (Thm.ctyp_of thy) (TVar (ix, S), T) - fun cert' subst = Vartab.fold (cons o cert) subst [] - fun instantiate thm subst = Thm.instantiate (cert' subst, []) thm - - fun with_subst tvars f ((generation, full, _), subst) = - if full then SOME (generation, f subst) - else Option.map (pair generation o f o complete tvars subst) mT - - fun inst (Ground thm) = [(0, thm)] - | inst (Schematic {theorem, tvars, index, ...}) = - Inttab.lookup_list subs index - |> map_filter (with_subst tvars (instantiate theorem)) - in (map inst thm_infos, ctxt) end - -fun instantiate_all ctxt thm_infos (_, (_, subs, _)) = - if Config.get ctxt keep_partial_instances then - let fun is_refined ((_, _, refined), _) = refined - in - (Inttab.map (K (filter_out is_refined)) subs, thm_infos) - |-> instantiate_all' (new_super_type ctxt thm_infos) - end - else instantiate_all' (NONE, ctxt) subs thm_infos - - - -(** overall procedure **) - -fun limit_rounds ctxt f = - let - val max = Config.get ctxt max_rounds - fun round i x = if i > max then x else round (i + 1) (f ctxt i x) - in round 1 end - -fun monomorph schematic_consts_of rthms ctxt = - let - val (thm_infos, (known_grounds, subs)) = prepare schematic_consts_of rthms - in - if Symtab.is_empty known_grounds then - (map (fn Ground thm => [(0, thm)] | _ => []) thm_infos, ctxt) - else - make_subst_ctxt ctxt thm_infos known_grounds subs - |> limit_rounds ctxt (collect_substitutions thm_infos) - |> instantiate_all ctxt thm_infos - end - - -end - diff -r 437c0a63bb16 -r c9d6f6285e1d src/HOL/Tools/monomorph.ML --- a/src/HOL/Tools/monomorph.ML Tue Sep 10 20:09:53 2013 +0200 +++ b/src/HOL/Tools/monomorph.ML Tue Sep 10 20:11:01 2013 +0200 @@ -35,6 +35,7 @@ (* configuration options *) val max_rounds: int Config.T val max_new_instances: int Config.T + val max_thm_instances: int Config.T (* monomorphization *) val monomorph: (term -> typ list Symtab.table) -> Proof.context -> @@ -67,14 +68,15 @@ val max_new_instances = Attrib.setup_config_int @{binding monomorph_max_new_instances} (K 300) +val max_thm_instances = + Attrib.setup_config_int @{binding max_thm_instances} (K 20) + fun limit_rounds ctxt f = let val max = Config.get ctxt max_rounds fun round i x = if i > max then x else round (i + 1) (f ctxt i x) in round 1 end -fun reached_limit ctxt n = (n >= Config.get ctxt max_new_instances) - (* theorem information and related functions *) @@ -175,25 +177,31 @@ in Term.fold_aterms add (Thm.prop_of thm) end -fun add_insts ctxt round used_grounds new_grounds id thm tvars schematics cx = +fun add_insts max_instances max_thm_instances ctxt round used_grounds + new_grounds id thm tvars schematics cx = let exception ENOUGH of typ list Symtab.table * (int * (int * thm) list Inttab.table) val thy = Proof_Context.theory_of ctxt - fun add subst (next_grounds, (n, insts)) = - let - val thm' = instantiate thy subst thm - val rthm = (round, thm') - val n_insts' = - if member (eq_snd Thm.eq_thm) (Inttab.lookup_list insts id) rthm then - (n, insts) - else (n + 1, Inttab.cons_list (id, rthm) insts) - val next_grounds' = - add_new_grounds used_grounds new_grounds thm' next_grounds - val cx' = (next_grounds', n_insts') - in if reached_limit ctxt n then raise ENOUGH cx' else cx' end + fun add subst (cx as (next_grounds, (n, insts))) = + if n >= max_instances then + raise ENOUGH cx + else + let + val thm' = instantiate thy subst thm + val rthm = (round, thm') + val rthms = Inttab.lookup_list insts id; + val n_insts' = + if member (eq_snd Thm.eq_thm) rthms rthm orelse + length rthms >= max_thm_instances then + (n, insts) + else + (n + 1, Inttab.cons_list (id, rthm) insts) + val next_grounds' = + add_new_grounds used_grounds new_grounds thm' next_grounds + in (next_grounds', n_insts') end fun with_grounds (n, T) f subst (n', Us) = let @@ -239,12 +247,12 @@ fun is_active round initial_round = (round > initial_round) -fun find_instances thm_infos ctxt round (known_grounds, new_grounds, insts) = +fun find_instances max_instances max_thm_instances thm_infos ctxt round + (known_grounds, new_grounds, insts) = let - val add_new = add_insts ctxt round + val add_new = add_insts max_instances max_thm_instances ctxt round fun consider_all pred f (cx as (_, (n, _))) = - if reached_limit ctxt n then cx - else fold_schematics pred f thm_infos cx + if n >= max_instances then cx else fold_schematics pred f thm_infos cx val known_grounds' = Symtab.merge_list (op =) (known_grounds, new_grounds) val empty_grounds = clear_grounds known_grounds' @@ -266,9 +274,13 @@ let val known_grounds = fold_grounds add_ground_types thm_infos consts val empty_grounds = clear_grounds known_grounds + val max_instances = Config.get ctxt max_new_instances + |> fold (fn Schematic _ => Integer.add 1 | _ => I) thm_infos + val max_thm_instances = Config.get ctxt max_thm_instances in (empty_grounds, known_grounds, (0, Inttab.empty)) - |> limit_rounds ctxt (find_instances thm_infos) + |> limit_rounds ctxt + (find_instances max_instances max_thm_instances thm_infos) |> (fn (_, _, (_, insts)) => insts) end diff -r 437c0a63bb16 -r c9d6f6285e1d src/Tools/jEdit/etc/options --- a/src/Tools/jEdit/etc/options Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Tools/jEdit/etc/options Tue Sep 10 20:11:01 2013 +0200 @@ -27,8 +27,11 @@ public option jedit_symbols_search_limit : int = 50 -- "maximum number of symbols in search result" -public option jedit_mac_adapter : bool = true - -- "some native Mac OS X support (potential conflict with MacOSX plugin)" +public option jedit_macos_application : bool = true + -- "some native Mac OS X application support (potential conflict with MacOSX plugin)" + +public option jedit_macos_preferences : bool = false + -- "native Mac OS X preferences menu" public option jedit_timing_threshold : real = 0.1 -- "default threshold for timing display" diff -r 437c0a63bb16 -r c9d6f6285e1d src/Tools/jEdit/etc/settings --- a/src/Tools/jEdit/etc/settings Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Tools/jEdit/etc/settings Tue Sep 10 20:11:01 2013 +0200 @@ -7,9 +7,7 @@ #JEDIT_JAVA_OPTIONS="-Xms128m -Xmx512m -Xss1m -Dactors.corePoolSize=4 -Dactors.enableForkJoin=false" JEDIT_JAVA_OPTIONS="-Xms128m -Xmx1024m -Xss2m -Dactors.corePoolSize=4 -Dactors.enableForkJoin=false" #JEDIT_JAVA_OPTIONS="-Xms512m -Xmx4096m -Xss8m -Dactors.corePoolSize=4 -Dactors.enableForkJoin=false" -JEDIT_SYSTEM_OPTIONS="-Dapple.laf.useScreenMenuBar=true --Dcom.apple.mrj.application.apple.menu.about.name=Isabelle/jEdit --Dscala.repl.no-threads=true" +JEDIT_SYSTEM_OPTIONS="-Dapple.laf.useScreenMenuBar=true -Dapple.awt.application.name=Isabelle -Dscala.repl.no-threads=true" ISABELLE_JEDIT_OPTIONS="" diff -r 437c0a63bb16 -r c9d6f6285e1d src/Tools/jEdit/src/active.scala --- a/src/Tools/jEdit/src/active.scala Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Tools/jEdit/src/active.scala Tue Sep 10 20:11:01 2013 +0200 @@ -26,37 +26,8 @@ val buffer = model.buffer val snapshot = model.snapshot() - def try_replace_command(padding: Boolean, exec_id: Document_ID.Exec, s: String) - { - snapshot.state.execs.get(exec_id).map(_.command) match { - case Some(command) => - snapshot.node.command_start(command) match { - case Some(start) => - JEdit_Lib.buffer_edit(buffer) { - val range = command.proper_range + start - if (padding) { - val pad = - JEdit_Lib.try_get_text(buffer, Text.Range(range.length - 1, range.length)) - match { - case None => "" - case Some(s) => if (Symbol.is_blank(s)) "" else " " - } - buffer.insert(start + range.length, pad + s) - } - else { - buffer.remove(start, range.length) - buffer.insert(start, s) - } - } - case None => - } - case None => - } - } - if (!snapshot.is_outdated) { // FIXME avoid hard-wired stuff - elem match { case XML.Elem(Markup(Markup.BROWSER, _), body) => default_thread_pool.submit(() => @@ -82,7 +53,8 @@ case XML.Elem(Markup(Markup.SENDBACK, props), _) => props match { case Position.Id(exec_id) => - try_replace_command(props.exists(_ == Markup.PADDING_COMMAND), exec_id, text) + Isabelle.edit_command(snapshot, buffer, + props.exists(_ == Markup.PADDING_COMMAND), exec_id, text) case _ => if (props.exists(_ == Markup.PADDING_LINE)) Isabelle.insert_line_padding(text_area, text) diff -r 437c0a63bb16 -r c9d6f6285e1d src/Tools/jEdit/src/isabelle.scala --- a/src/Tools/jEdit/src/isabelle.scala Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Tools/jEdit/src/isabelle.scala Tue Sep 10 20:11:01 2013 +0200 @@ -142,7 +142,7 @@ Rendering.font_size_change(view, i => i - ((i / 10) max 1)) - /* structured insert */ + /* structured edits */ def insert_line_padding(text_area: JEditTextArea, text: String) { @@ -162,6 +162,39 @@ } } + def edit_command( + snapshot: Document.Snapshot, + buffer: Buffer, + padding: Boolean, + exec_id: Document_ID.Exec, + s: String) + { + snapshot.state.execs.get(exec_id).map(_.command) match { + case Some(command) => + snapshot.node.command_start(command) match { + case Some(start) => + JEdit_Lib.buffer_edit(buffer) { + val range = command.proper_range + start + if (padding) { + val pad = + JEdit_Lib.try_get_text(buffer, Text.Range(range.length - 1, range.length)) + match { + case None => "" + case Some(s) => if (Symbol.is_blank(s)) "" else " " + } + buffer.insert(start + range.length, pad + s) + } + else { + buffer.remove(start, range.length) + buffer.insert(start, s) + } + } + case None => + } + case None => + } + } + /* completion */ diff -r 437c0a63bb16 -r c9d6f6285e1d src/Tools/jEdit/src/osx_adapter.scala --- a/src/Tools/jEdit/src/osx_adapter.scala Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Tools/jEdit/src/osx_adapter.scala Tue Sep 10 20:11:01 2013 +0200 @@ -9,26 +9,41 @@ import isabelle._ -import java.lang.{Class, ClassNotFoundException} +import java.lang.{Class, ClassNotFoundException, NoSuchMethodException} import java.lang.reflect.{InvocationHandler, Method, Proxy} object OSX_Adapter { - def set_quit_handler(target: AnyRef, quit_handler: Method) + private lazy val application_class: Class[_] = Class.forName("com.apple.eawt.Application") + private lazy val application = application_class.getConstructor().newInstance() + + def init { - set_handler(new OSX_Adapter("handle_quit", target, quit_handler)) + if (PIDE.options.bool("jedit_macos_application")) { + try { + set_handler("handleQuit") + set_handler("handleAbout") + + if (PIDE.options.bool("jedit_macos_preferences")) { + application_class.getDeclaredMethod("setEnabledPreferencesMenu", classOf[Boolean]). + invoke(application, java.lang.Boolean.valueOf(true)) + set_handler("handlePreferences") + } + } + catch { + case exn: ClassNotFoundException => + java.lang.System.err.println( + "com.apple.eawt.Application unavailable -- cannot install native OS X handler") + } + } } - var application: Any = null - - def set_handler(adapter: OSX_Adapter) + private def set_handler(name: String) { + val handler = PIDE.plugin.getClass.getDeclaredMethod(name) + val adapter = new OSX_Adapter(name, PIDE.plugin, handler) try { - val application_class: Class[_] = Class.forName("com.apple.eawt.Application") - if (application == null) - application = application_class.getConstructor().newInstance() - val application_listener_class: Class[_] = Class.forName("com.apple.eawt.ApplicationListener") val add_listener_method = @@ -58,9 +73,12 @@ val event = args(0) if (event != null) { - val set_handled_method = - event.getClass.getDeclaredMethod("setHandled", classOf[java.lang.Boolean]) - set_handled_method.invoke(event, java.lang.Boolean.valueOf(handled)) + try { + val set_handled_method = + event.getClass.getDeclaredMethod("setHandled", classOf[java.lang.Boolean]) + set_handled_method.invoke(event, java.lang.Boolean.valueOf(handled)) + } + catch { case _: NoSuchMethodException => } } } null diff -r 437c0a63bb16 -r c9d6f6285e1d src/Tools/jEdit/src/plugin.scala --- a/src/Tools/jEdit/src/plugin.scala Tue Sep 10 20:09:53 2013 +0200 +++ b/src/Tools/jEdit/src/plugin.scala Tue Sep 10 20:11:01 2013 +0200 @@ -14,6 +14,8 @@ import scala.swing.{ListView, ScrollPane} import org.gjt.sp.jedit.{jEdit, EBMessage, EBPlugin, Buffer, View, Debug} +import org.jedit.options.CombinedOptions +import org.gjt.sp.jedit.gui.AboutDialog import org.gjt.sp.jedit.textarea.{JEditTextArea, TextArea} import org.gjt.sp.jedit.syntax.ModeProvider import org.gjt.sp.jedit.msg.{EditorStarted, BufferUpdate, EditPaneUpdate, PropertiesChanged} @@ -222,12 +224,23 @@ /* Mac OS X application hooks */ - def handle_quit(): Boolean = + def handleQuit(): Boolean = { jEdit.exit(jEdit.getActiveView(), true) false } + def handlePreferences() + { + CombinedOptions.combinedOptions(jEdit.getActiveView()) + } + + def handleAbout(): Boolean = + { + new AboutDialog(jEdit.getActiveView()) + true + } + /* main plugin plumbing */ @@ -306,8 +319,7 @@ PIDE.options.update(Options.init()) PIDE.completion_history.load() - if (Platform.is_macos && PIDE.options.bool("jedit_mac_adapter")) - OSX_Adapter.set_quit_handler(this, this.getClass.getDeclaredMethod("handle_quit")) + if (Platform.is_macos) OSX_Adapter.init SyntaxUtilities.setStyleExtender(new Token_Markup.Style_Extender) if (ModeProvider.instance.isInstanceOf[ModeProvider])