merged
authorkrauss
Fri, 13 Sep 2013 09:31:45 +0200
changeset 53615 f557a4645f61
parent 53614 8c51fc24d83c (current diff)
parent 53602 0ae3db699a3e (diff)
child 53616 ff37dc246b10
merged
NEWS
lib/Tools/build_dialog
src/HOL/Tools/Sledgehammer/MaSh/src/mashTest.py
src/Pure/Tools/build_dialog.scala
--- a/Admin/Linux/Isabelle	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/Linux/Isabelle	Fri Sep 13 09:31:45 2013 +0200
@@ -4,25 +4,24 @@
 #
 # Main Isabelle application wrapper.
 
+# dereference executable
 if [ -L "$0" ]; then
   TARGET="$(LC_ALL=C ls -l "$0" | sed 's/.* -> //')"
   exec "$(cd "$(dirname "$0")"; cd "$(pwd -P)"; cd "$(dirname "$TARGET")"; pwd)/$(basename "$TARGET")" "$@"
 fi
 
 
-## settings
-
-PRG="$(basename "$0")"
+# minimal Isabelle environment
 
 ISABELLE_HOME="$(cd "$(dirname "$0")"; cd "$(pwd -P)"; pwd)"
-source "$ISABELLE_HOME/lib/scripts/getsettings" || exit 2
+source "$ISABELLE_HOME/lib/scripts/isabelle-platform"
 
 
-## main
+# main
 
-declare -a JAVA_ARGS
-JAVA_ARGS=({JAVA_ARGS})
+exec "$ISABELLE_HOME/contrib/jdk/${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM32}/bin/java" \
+  "-Disabelle.home=$ISABELLE_HOME" \
+  {JAVA_ARGS} \
+  -classpath "{CLASSPATH}" \
+  isabelle.Main "$@"
 
-exec "$ISABELLE_HOME/bin/isabelle" java "${JAVA_ARGS[@]}" \
-  -classpath "$ISABELLE_HOME/src/Tools/jEdit/dist/jedit.jar" isabelle.Main "$@"
-
--- a/Admin/Release/CHECKLIST	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/Release/CHECKLIST	Fri Sep 13 09:31:45 2013 +0200
@@ -11,6 +11,8 @@
 
 - test 'display_drafts' command;
 
+- test "#!/usr/bin/env isabelle_scala_script";
+
 - check HTML header of library;
 
 - check file positions within logic images (hyperlinks etc.);
--- a/Admin/Windows/WinRun4J/Isabelle.ini	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/Windows/WinRun4J/Isabelle.ini	Fri Sep 13 09:31:45 2013 +0200
@@ -1,11 +1,4 @@
 main.class=isabelle.Main
-classpath.1=lib\classes\ext\Pure.jar
-classpath.2=lib\classes\ext\scala-compiler.jar
-classpath.3=lib\classes\ext\scala-library.jar
-classpath.4=lib\classes\ext\scala-swing.jar
-classpath.5=lib\classes\ext\scala-actors.jar
-classpath.6=lib\classes\ext\scala-reflect.jar
-classpath.7=src\Tools\jEdit\dist\jedit.jar
 vm.location=contrib\jdk\x86-cygwin\jre\bin\server\jvm.dll
 splash.image=lib\logo\isabelle.bmp
 vmarg.1=-Disabelle.home=%INI_DIR%
--- a/Admin/build	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/build	Fri Sep 13 09:31:45 2013 +0200
@@ -74,7 +74,7 @@
 
 ## main
 
-#workaround for scalac
+#workaround for scalac 2.10.2
 function stty() { :; }
 export -f stty
 
--- a/Admin/components/components.sha1	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/components/components.sha1	Fri Sep 13 09:31:45 2013 +0200
@@ -33,6 +33,8 @@
 06e9be2627ebb95c45a9bcfa025d2eeef086b408  jedit_build-20130104.tar.gz
 c85c0829b8170f25aa65ec6852f505ce2a50639b  jedit_build-20130628.tar.gz
 5de3e399be2507f684b49dfd13da45228214bbe4  jedit_build-20130905.tar.gz
+87136818fd5528d97288f5b06bd30c787229eb0d  jedit_build-20130910.tar.gz
+0bd2bc2d9a491ba5fc8dd99df27c04f11a72e8fa  jfreechart-1.0.14-1.tar.gz
 8122526f1fc362ddae1a328bdbc2152853186fee  jfreechart-1.0.14.tar.gz
 6c737137cc597fc920943783382e928ea79e3feb  kodkodi-1.2.16.tar.gz
 5f95c96bb99927f3a026050f85bd056f37a9189e  kodkodi-1.5.2.tar.gz
@@ -58,6 +60,7 @@
 e6a43b7b3b21295853bd2a63b27ea20bd6102f5f  windows_app-20130906.tar.gz
 8fe004aead867d4c82425afac481142bd3f01fb0  windows_app-20130908.tar.gz
 d273abdc7387462f77a127fa43095eed78332b5c  windows_app-20130909.tar.gz
+1c36a840320dfa9bac8af25fc289a4df5ea3eccb  xz-java-1.2-1.tar.gz
 2ae13aa17d0dc95ce254a52f1dba10929763a10d  xz-java-1.2.tar.gz
 4530a1aa6f4498ee3d78d6000fa71a3f63bd077f  yices-1.0.28.tar.gz
 12ae71acde43bd7bed1e005c43034b208c0cba4c  z3-3.2.tar.gz
--- a/Admin/components/main	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/components/main	Fri Sep 13 09:31:45 2013 +0200
@@ -4,11 +4,11 @@
 exec_process-1.0.3
 Haskabelle-2013
 jdk-7u25
-jedit_build-20130905
-jfreechart-1.0.14
+jedit_build-20130910
+jfreechart-1.0.14-1
 kodkodi-1.5.2
 polyml-5.5.0-3
 scala-2.10.2
 spass-3.8ds
 z3-3.2
-xz-java-1.2
+xz-java-1.2-1
--- a/Admin/lib/Tools/makedist	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/lib/Tools/makedist	Fri Sep 13 09:31:45 2013 +0200
@@ -168,6 +168,8 @@
 find . "(" -name \*.thy -o -name \*.ML -o -name \*.scala ")" -perm +111 -print | xargs chmod -f -x
 find . -print | xargs chmod -f u+rw
 
+export CLASSPATH="$ISABELLE_CLASSPATH"
+
 ./bin/isabelle env ISABELLE_SCALA_BUILD_OPTIONS="$ISABELLE_SCALA_BUILD_OPTIONS -optimise" \
   ./Admin/build all || fail "Failed to build distribution"
 
--- a/Admin/lib/Tools/makedist_bundle	Thu Sep 12 22:10:17 2013 +0200
+++ b/Admin/lib/Tools/makedist_bundle	Fri Sep 13 09:31:45 2013 +0200
@@ -51,6 +51,30 @@
 tar -C "$TMP" -x -z -f "$ARCHIVE" || exit 2
 
 
+# distribution classpath (based on educated guesses)
+
+splitarray ":" "$ISABELLE_CLASSPATH"; CLASSPATH_ENTRIES=("${SPLITARRAY[@]}")
+declare -a DISTRIBITION_CLASSPATH=()
+
+for ENTRY in "${CLASSPATH_ENTRIES[@]}"
+do
+  ENTRY=$(echo "$ENTRY" | perl -n -e "
+    if (m,$ISABELLE_HOME/(.*)\$,) { print qq{\$1}; }
+    elsif (m,$USER_HOME/.isabelle/contrib/(.*)\$,) { print qq{contrib/\$1}; }
+    else { print; };
+    print qq{\n};")
+  DISTRIBITION_CLASSPATH["${#DISTRIBITION_CLASSPATH[@]}"]="$ENTRY"
+done
+
+DISTRIBITION_CLASSPATH["${#DISTRIBITION_CLASSPATH[@]}"]="src/Tools/jEdit/dist/jedit.jar"
+
+echo "classpath"
+for ENTRY in "${DISTRIBITION_CLASSPATH[@]}"
+do
+  echo "  $ENTRY"
+done
+
+
 # bundled components
 
 init_component "$JEDIT_HOME"
@@ -128,9 +152,19 @@
 case "$PLATFORM_FAMILY" in
   linux)
     purge_contrib '-name "x86*-darwin" -o -name "x86*-cygwin" -o -name "x86*-windows"'
+
+    LINUX_CLASSPATH=""
+    for ENTRY in "${DISTRIBITION_CLASSPATH[@]}"
+    do
+      if [ -z "$LINUX_CLASSPATH" ]; then
+        LINUX_CLASSPATH="\\\$ISABELLE_HOME/$ENTRY"
+      else
+        LINUX_CLASSPATH="$LINUX_CLASSPATH:\\\$ISABELLE_HOME/$ENTRY"
+      fi
+    done
     cat "$ISABELLE_HOME/Admin/Linux/Isabelle" | \
-      perl -p -e "s,{JAVA_ARGS},$JEDIT_JAVA_OPTIONS $JEDIT_SYSTEM_OPTIONS,g;" \
-        > "$ISABELLE_TARGET/$ISABELLE_NAME"
+      perl -p > "$ISABELLE_TARGET/$ISABELLE_NAME" \
+        -e "s,{JAVA_ARGS},$JEDIT_JAVA_OPTIONS $JEDIT_SYSTEM_OPTIONS,g; s,{CLASSPATH},$LINUX_CLASSPATH,;"
     chmod +x "$ISABELLE_TARGET/$ISABELLE_NAME"
     ;;
   macos)
@@ -151,6 +185,7 @@
 
     (
       cat "$ISABELLE_HOME/Admin/Windows/WinRun4J/Isabelle.ini"
+
       declare -a JAVA_ARGS=()
       eval "JAVA_ARGS=($ISABELLE_JAVA_SYSTEM_OPTIONS $JEDIT_JAVA_OPTIONS $JEDIT_SYSTEM_OPTIONS)"
       A=2
@@ -159,6 +194,14 @@
         echo -e "vmarg.$A=$ARG\r"
         A=$[ $A + 1 ]
       done
+
+      A=1
+      for ENTRY in "${DISTRIBITION_CLASSPATH[@]}"
+      do
+        ENTRY=$(echo "$ENTRY" | perl -p -e 's,/,\\\\,g;')
+        echo -e "classpath.$A=$ENTRY\r"
+        A=$[ $A + 1 ]
+      done
     ) > "$ISABELLE_TARGET/${ISABELLE_NAME}.ini"
 
     cp "$TMP/windows_app/Isabelle.exe" "$ISABELLE_TARGET/${ISABELLE_NAME}.exe"
@@ -233,11 +276,10 @@
           cat "$APP_TEMPLATE/Info.plist-part2"
         ) | perl -p -e "s,{ISABELLE_NAME},${ISABELLE_NAME},g;" > "$APP/Contents/Info.plist"
 
-        for NAME in Pure.jar scala-compiler.jar scala-library.jar scala-swing.jar scala-actors.jar scala-reflect.jar
+        for ENTRY in "${DISTRIBITION_CLASSPATH[@]}"
         do
-          ln -sf "../Resources/${ISABELLE_NAME}/lib/classes/ext/$NAME" "$APP/Contents/Java"
+          ln -sf "../Resources/${ISABELLE_NAME}/$ENTRY" "$APP/Contents/Java"
         done
-        ln -sf "../Resources/${ISABELLE_NAME}/src/Tools/jEdit/dist/jedit.jar" "$APP/Contents/Java"
 
         cp -R "$APP_TEMPLATE/Resources/." "$APP/Contents/Resources/."
         cp "$APP_TEMPLATE/../isabelle.icns" "$APP/Contents/Resources/."
--- a/NEWS	Thu Sep 12 22:10:17 2013 +0200
+++ b/NEWS	Fri Sep 13 09:31:45 2013 +0200
@@ -225,7 +225,7 @@
   - The whole reflection stack has been decomposed into conversions.
 INCOMPATIBILITY.
 
-* Weaker precendence of syntax for big intersection and union on sets,
+* Stronger precedence of syntax for big intersection and union on sets,
 in accordance with corresponding lattice operations.  INCOMPATIBILITY.
 
 * Nested case expressions are now translated in a separate check phase
@@ -260,8 +260,7 @@
 
 * Locale hierarchy for abstract orderings and (semi)lattices.
 
-* Discontinued theory src/HOL/Library/Eval_Witness.
-INCOMPATIBILITY.
+* Discontinued theory src/HOL/Library/Eval_Witness.  INCOMPATIBILITY.
 
 * Discontinued obsolete src/HOL/IsaMakefile (considered legacy since
 Isabelle2013).  Use "isabelle build" to operate on Isabelle sessions.
@@ -278,9 +277,9 @@
 Code_Target_Nat and Code_Target_Numeral.  See the tutorial on code
 generation for details.  INCOMPATIBILITY.
 
-* Complete_Partial_Order.admissible is defined outside the type 
-class ccpo, but with mandatory prefix ccpo. Admissibility theorems
-lose the class predicate assumption or sort constraint when possible.
+* Complete_Partial_Order.admissible is defined outside the type class
+ccpo, but with mandatory prefix ccpo. Admissibility theorems lose the
+class predicate assumption or sort constraint when possible.
 INCOMPATIBILITY.
 
 * Introduce type class "conditionally_complete_lattice": Like a
--- a/README	Thu Sep 12 22:10:17 2013 +0200
+++ b/README	Fri Sep 13 09:31:45 2013 +0200
@@ -10,14 +10,12 @@
 Installation
 
    Isabelle works on the three main platform families: Linux, Windows,
-   and Mac OS X.
-
-   Completely integrated bundles including the full Isabelle sources,
-   documentation, add-on tools and precompiled logic images for
-   several platforms are available from the Isabelle web page.
+   and Mac OS X.  The fully integrated application bundles from the
+   Isabelle web page include sources, documentation, and add-on tools
+   for all supported platforms.
 
    Some background information may be found in the Isabelle System
-   Manual, distributed with the sources (directory doc).
+   Manual (directory doc).
 
 User interfaces
 
--- a/etc/isar-keywords.el	Thu Sep 12 22:10:17 2013 +0200
+++ b/etc/isar-keywords.el	Fri Sep 13 09:31:45 2013 +0200
@@ -319,7 +319,6 @@
     "constant"
     "constrains"
     "datatypes"
-    "defaults"
     "defines"
     "file"
     "fixes"
--- a/etc/settings	Thu Sep 12 22:10:17 2013 +0200
+++ b/etc/settings	Fri Sep 13 09:31:45 2013 +0200
@@ -15,6 +15,13 @@
 
 ISABELLE_JAVA_SYSTEM_OPTIONS="-Dfile.encoding=UTF-8 -server"
 
+classpath "$ISABELLE_HOME/lib/classes/Pure.jar"
+classpath "$ISABELLE_HOME/lib/classes/scala-library.jar"
+classpath "$ISABELLE_HOME/lib/classes/scala-swing.jar"
+classpath "$ISABELLE_HOME/lib/classes/scala-actors.jar"
+classpath "$ISABELLE_HOME/lib/classes/scala-compiler.jar"
+classpath "$ISABELLE_HOME/lib/classes/scala-reflect.jar"
+
 
 ###
 ### Interactive sessions (cf. isabelle tty)
--- a/lib/Tools/build_dialog	Thu Sep 12 22:10:17 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-#!/usr/bin/env bash
-#
-# Author: Makarius
-#
-# DESCRIPTION: build Isabelle session images via GUI dialog
-
-
-## diagnostics
-
-PRG="$(basename "$0")"
-
-function usage()
-{
-  echo
-  echo "Usage: isabelle $PRG [OPTIONS]"
-  echo
-  echo "  Options are:"
-  echo "    -L OPTION    default logic via system option"
-  echo "    -d DIR       include session directory"
-  echo "    -l NAME      logic session name"
-  echo "    -s           system build mode: produce output in ISABELLE_HOME"
-  echo
-  echo "  Build Isabelle logic session image via GUI dialog (default: $ISABELLE_LOGIC)."
-  echo
-  exit 1
-}
-
-function fail()
-{
-  echo "$1" >&2
-  exit 2
-}
-
-
-## process command line
-
-LOGIC_OPTION=""
-declare -a INCLUDE_DIRS=()
-LOGIC=""
-SYSTEM_MODE=false
-
-while getopts "L:d:l:s" OPT
-do
-  case "$OPT" in
-    L)
-      LOGIC_OPTION="$OPTARG"
-      ;;
-    d)
-      INCLUDE_DIRS["${#INCLUDE_DIRS[@]}"]="$OPTARG"
-      ;;
-    l)
-      LOGIC="$OPTARG"
-      ;;
-    s)
-      SYSTEM_MODE="true"
-      ;;
-    \?)
-      usage
-      ;;
-  esac
-done
-
-shift $(($OPTIND - 1))
-
-
-# args
-
-[ "$#" -ne 0 ] && usage
-
-
-## main
-
-isabelle_admin_build jars || exit $?
-
-"$ISABELLE_TOOL" java isabelle.Build_Dialog \
-  "$LOGIC_OPTION" "$LOGIC" "$SYSTEM_MODE" "${INCLUDE_DIRS[@]}"
-
--- a/lib/Tools/java	Thu Sep 12 22:10:17 2013 +0200
+++ b/lib/Tools/java	Fri Sep 13 09:31:45 2013 +0200
@@ -4,9 +4,11 @@
 #
 # DESCRIPTION: invoke Java within the Isabelle environment
 
-CLASSPATH="$(jvmpath "$CLASSPATH")"
+declare -a JAVA_ARGS; eval "JAVA_ARGS=($ISABELLE_JAVA_SYSTEM_OPTIONS)"
 
-declare -a JAVA_ARGS; eval "JAVA_ARGS=($ISABELLE_JAVA_SYSTEM_OPTIONS)"
+[ -n "$CLASSPATH" ] && classpath "$CLASSPATH"
+unset CLASSPATH
+
 isabelle_jdk java "${JAVA_ARGS[@]}" \
-  "-Djava.ext.dirs=$(jvmpath "$ISABELLE_JAVA_EXT:$ISABELLE_HOME/lib/classes/ext")" "$@"
+  -classpath "$(jvmpath "$ISABELLE_CLASSPATH")" "$@"
 
--- a/lib/Tools/scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/lib/Tools/scala	Fri Sep 13 09:31:45 2013 +0200
@@ -6,6 +6,6 @@
 
 isabelle_admin_build jars || exit $?
 
-CLASSPATH="$(jvmpath "$CLASSPATH")"
 isabelle_scala scala -Dfile.encoding=UTF-8 \
-  "-Djava.ext.dirs=$(jvmpath "$ISABELLE_JAVA_EXT:$ISABELLE_HOME/lib/classes/ext")" "$@"
+  -classpath "$(jvmpath "$ISABELLE_CLASSPATH")" "$@"
+
--- a/lib/Tools/scalac	Thu Sep 12 22:10:17 2013 +0200
+++ b/lib/Tools/scalac	Fri Sep 13 09:31:45 2013 +0200
@@ -6,7 +6,6 @@
 
 isabelle_admin_build jars || exit $?
 
-CLASSPATH="$(jvmpath "$CLASSPATH")"
 isabelle_scala scalac -Dfile.encoding=UTF-8 \
-  "-Djava.ext.dirs=$(jvmpath "$ISABELLE_JAVA_EXT:$ISABELLE_HOME/lib/classes/ext")" "$@"
+  -classpath "$(jvmpath "$ISABELLE_CLASSPATH")" "$@"
 
--- a/lib/scripts/getsettings	Thu Sep 12 22:10:17 2013 +0200
+++ b/lib/scripts/getsettings	Fri Sep 13 09:31:45 2013 +0200
@@ -21,16 +21,20 @@
   ISABELLE_HOME_WINDOWS="$(cygpath -w "$(dirname "$ISABELLE_HOME")")\\$(basename "$ISABELLE_HOME")"
   ISABELLE_HOME="$(cygpath -u "$ISABELLE_HOME_WINDOWS")"
 
-  CLASSPATH="$(cygpath -i -u -p "$CLASSPATH")"
   function jvmpath() { cygpath -i -C UTF8 -w -p "$@"; }
   CYGWIN_ROOT="$(jvmpath "/")"
+
+  ISABELLE_CLASSPATH="$(cygpath -i -u -p "$CLASSPATH")"
+  unset CLASSPATH
 else
   if [ -z "$USER_HOME" ]; then
     USER_HOME="$HOME"
   fi
 
   function jvmpath() { echo "$@"; }
-  CLASSPATH="$CLASSPATH"
+
+  ISABELLE_CLASSPATH="$CLASSPATH"
+  unset CLASSPATH
 fi
 
 export ISABELLE_HOME
@@ -122,18 +126,18 @@
   function isabelle_admin_build () { return 0; }
 fi
 
-#CLASSPATH convenience
+#classpath
 function classpath ()
 {
   for X in "$@"
   do
-    if [ -z "$CLASSPATH" ]; then
-      CLASSPATH="$X"
+    if [ -z "$ISABELLE_CLASSPATH" ]; then
+      ISABELLE_CLASSPATH="$X"
     else
-      CLASSPATH="$X:$CLASSPATH"
+      ISABELLE_CLASSPATH="$ISABELLE_CLASSPATH:$X"
     fi
   done
-  export CLASSPATH
+  export ISABELLE_CLASSPATH
 }
 
 #arrays
--- a/src/Doc/Datatypes/Datatypes.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/Datatypes/Datatypes.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -17,7 +17,7 @@
 generated datatypes and codatatypes. The datatype support is similar to that
 provided by the earlier package due to Berghofer and Wenzel
 \cite{Berghofer-Wenzel:1999:TPHOL}, documented in the Isar reference manual
-\cite{isabelle-isar-ref}; indeed, replacing the keyword @{command datatype} by
+\cite{isabelle-isar-ref}; indeed, replacing the keyword \keyw{datatype} by
 @{command datatype_new} is usually all that is needed to port existing theories
 to use the new package.
 
@@ -40,7 +40,7 @@
 text {*
 \noindent
 The package also provides some convenience, notably automatically generated
-destructors (discriminators and selectors).
+discriminators and selectors.
 
 In addition to plain inductive datatypes, the new package supports coinductive
 datatypes, or \emph{codatatypes}, which may have infinite values. For example,
@@ -75,7 +75,7 @@
 infinitely many direct subtrees.
 
 To use the package, it is necessary to import the @{theory BNF} theory, which
-can be precompiled into the \textit{HOL-BNF} image. The following commands show
+can be precompiled into the \texttt{HOL-BNF} image. The following commands show
 how to launch jEdit/PIDE with the image loaded and how to build the image
 without launching jEdit:
 *}
@@ -91,10 +91,12 @@
 The package, like its predecessor, fully adheres to the LCF philosophy
 \cite{mgordon79}: The characteristic theorems associated with the specified
 (co)datatypes are derived rather than introduced axiomatically.%
-\footnote{If the \textit{quick\_and\_dirty} option is enabled, some of the
+\footnote{If the @{text quick_and_dirty} option is enabled, some of the
 internal constructions and most of the internal proof obligations are skipped.}
 The package's metatheory is described in a pair of papers
-\cite{traytel-et-al-2012,blanchette-et-al-wit}.
+\cite{traytel-et-al-2012,blanchette-et-al-wit}. The central notion is that of a
+\emph{bounded natural functor} (BNF)---a well-behaved type constructor for which
+nested (co)recursion is supported.
 
 This tutorial is organized as follows:
 
@@ -106,23 +108,25 @@
 
 \item Section \ref{sec:defining-recursive-functions}, ``Defining Recursive
 Functions,'' describes how to specify recursive functions using
-\keyw{primrec\_new}, @{command fun}, and @{command function}.
+@{command primrec_new}, \keyw{fun}, and \keyw{function}.
 
 \item Section \ref{sec:defining-codatatypes}, ``Defining Codatatypes,''
 describes how to specify codatatypes using the @{command codatatype} command.
 
 \item Section \ref{sec:defining-corecursive-functions}, ``Defining Corecursive
 Functions,'' describes how to specify corecursive functions using the
-\keyw{primcorec} command.
+@{command primcorec} command.
 
 \item Section \ref{sec:registering-bounded-natural-functors}, ``Registering
-Bounded Natural Functors,'' explains how to set up the package to allow nested
-recursion through custom well-behaved type constructors.
+Bounded Natural Functors,'' explains how to use the @{command bnf} command
+to register arbitrary type constructors as BNFs.
 
-\item Section \ref{sec:generating-free-constructor-theorems}, ``Generating Free
-Constructor Theorems,'' explains how to derive convenience theorems for free
-constructors, as performed internally by @{command datatype_new} and
-@{command codatatype}.
+\item Section
+\ref{sec:generating-destructors-and-theorems-for-free-constructors},
+``Generating Destructors and Theorems for Free Constructors,'' explains how to
+use the command @{command wrap_free_constructors} to derive destructor constants
+and theorems for freely generated types, as performed internally by @{command
+datatype_new} and @{command codatatype}.
 
 \item Section \ref{sec:standard-ml-interface}, ``Standard ML Interface,''
 describes the package's programmatic interface.
@@ -149,8 +153,8 @@
 in.\allowbreak tum.\allowbreak de}}
 
 The commands @{command datatype_new} and @{command primrec_new} are expected to
-displace @{command datatype} and @{command primrec} in a future release. Authors
-of new theories are encouraged to use the new commands, and maintainers of older
+displace \keyw{datatype} and \keyw{primrec} in a future release. Authors of new
+theories are encouraged to use the new commands, and maintainers of older
 theories may want to consider upgrading.
 
 Comments and bug reports concerning either the tool or this tutorial should be
@@ -163,7 +167,6 @@
 for its appearance. If you have ideas regarding material that should be
 included, please let the authors know.
 \end{framed}
-
 *}
 
 
@@ -171,10 +174,10 @@
   \label{sec:defining-datatypes} *}
 
 text {*
-This section describes how to specify datatypes using the @{command datatype_new}
-command. The command is first illustrated through concrete examples featuring
-different flavors of recursion. More examples can be found in the directory
-\verb|~~/src/HOL/BNF/Examples|.
+This section describes how to specify datatypes using the @{command
+datatype_new} command. The command is first illustrated through concrete
+examples featuring different flavors of recursion. More examples can be found in
+the directory \verb|~~/src/HOL/BNF/Examples|.
 *}
 
 
@@ -253,17 +256,17 @@
 
 text {*
 \noindent
-Nonatomic types must be enclosed in double quotes on the right-hand side of the
-equal sign, as is customary in Isabelle.
+Occurrences of nonatomic types on the right-hand side of the equal sign must be
+enclosed in double quotes, as is customary in Isabelle.
 *}
 
 
 subsubsection {* Mutual Recursion *}
 
 text {*
-\emph{Mutually recursive} types are introduced simultaneously and may refer to each
-other. The example below introduces a pair of types for even and odd natural
-numbers:
+\emph{Mutually recursive} types are introduced simultaneously and may refer to
+each other. The example below introduces a pair of types for even and odd
+natural numbers:
 *}
 
     datatype_new enat = EZero | ESuc onat
@@ -301,7 +304,7 @@
 *}
 
     datatype_new 'a wrong = Wrong (*<*)'a
-    typ (*>*)"'a wrong \<Rightarrow> 'a wrong"
+    typ (*>*)"'a wrong \<Rightarrow> 'a"
 
 text {*
 \noindent
@@ -312,7 +315,7 @@
 
     datatype_new ('a, 'b) fn = Fn "'a \<Rightarrow> 'b"
     datatype_new 'a also_wrong = Also_Wrong (*<*)'a
-    typ (*>*)"('a also_wrong, 'a also_wrong) fn"
+    typ (*>*)"('a also_wrong, 'a) fn"
 
 text {*
 \noindent
@@ -321,6 +324,12 @@
 @{text 'a\<^sub>m}. These type arguments are called \emph{live}; the remaining
 type arguments are called \emph{dead}. In @{typ "'a \<Rightarrow> 'b"} and
 @{typ "('a, 'b) fn"}, the type variable @{typ 'a} is dead and @{typ 'b} is live.
+
+Type constructors must be registered as bounded natural functors (BNFs) to have
+live arguments. This is done automatically for datatypes and codatatypes
+introduced by the @{command datatype_new} and @{command codatatype} commands.
+Section~\ref{sec:registering-bounded-natural-functors} explains how to register
+arbitrary type constructors as BNFs.
 *}
 
 
@@ -336,12 +345,8 @@
 \begin{itemize}
 \setlength{\itemsep}{0pt}
 
-\item \relax{Set functions} (or \relax{natural transformations}):
-@{text t_set1}, \ldots, @{text t_setm}
-
-\item \relax{Map function} (or \relax{functorial action}): @{text t_map}
-
-\item \relax{Relator}: @{text t_rel}
+\item \relax{Case combinator}: @{text t_case} (rendered using the familiar
+@{text case}--@{text of} syntax)
 
 \item \relax{Iterator}: @{text t_fold}
 
@@ -351,16 +356,25 @@
 @{text "t.is_C\<^sub>n"}
 
 \item \relax{Selectors}:
-@{text t.un_C11}$, \ldots, @{text t.un_C1k\<^sub>1}, \\
+@{text t.un_C\<^sub>11}$, \ldots, @{text t.un_C\<^sub>1k\<^sub>1}, \\
 \phantom{\relax{Selectors:}} \quad\vdots \\
-\phantom{\relax{Selectors:}} @{text t.un_Cn1}$, \ldots, @{text t.un_Cnk\<^sub>n}.
+\phantom{\relax{Selectors:}} @{text t.un_C\<^sub>n1}$, \ldots, @{text t.un_C\<^sub>nk\<^sub>n}.
+
+\item \relax{Set functions} (or \relax{natural transformations}):
+@{text t_set1}, \ldots, @{text t_setm}
+
+\item \relax{Map function} (or \relax{functorial action}): @{text t_map}
+
+\item \relax{Relator}: @{text t_rel}
+
 \end{itemize}
 
 \noindent
-The discriminators and selectors are collectively called \emph{destructors}. The
-prefix ``@{text "t."}'' is an optional component of the name and is normally
-hidden. The set functions, map function, relator, discriminators, and selectors
-can be given custom names, as in the example below:
+The case combinator, discriminators, and selectors are collectively called
+\emph{destructors}. The prefix ``@{text "t."}'' is an optional component of the
+name and is normally hidden. The set functions, map function, relator,
+discriminators, and selectors can be given custom names, as in the example
+below:
 *}
 
 (*<*)
@@ -372,7 +386,8 @@
       Nil ("[]") and
       Cons (infixr "#" 65)
 
-    hide_const Nil Cons hd tl map
+    hide_type list
+    hide_const Nil Cons hd tl set map list_all2 list_case list_rec
 
     locale dummy_list
     begin
@@ -393,21 +408,18 @@
 discriminator associated with @{const Cons} is simply
 @{term "\<lambda>xs. \<not> null xs"}.
 
-The @{text "defaults"} keyword following the @{const Nil} constructor specifies
-a default value for selectors associated with other constructors. Here, it is
-used to ensure that the tail of the empty list is the empty list (instead of
-being left unspecified).
+The @{text defaults} clause following the @{const Nil} constructor specifies a
+default value for selectors associated with other constructors. Here, it is used
+to ensure that the tail of the empty list is itself (instead of being left
+unspecified).
 
 Because @{const Nil} is a nullary constructor, it is also possible to use
 @{term "\<lambda>xs. xs = Nil"} as a discriminator. This is specified by
-entering ``@{text "="}'' instead of the identifier @{const null} in the
-declaration above. Although this may look appealing, the mixture of constructors
-and selectors in the resulting characteristic theorems can lead Isabelle's
-automation to switch between the constructor and the destructor view in
-surprising ways.
-*}
+entering ``@{text "="}'' instead of the identifier @{const null}. Although this
+may look appealing, the mixture of constructors and selectors in the
+characteristic theorems can lead Isabelle's automation to switch between the
+constructor and the destructor view in surprising ways.
 
-text {*
 The usual mixfix syntaxes are available for both types and constructors. For
 example:
 *}
@@ -415,19 +427,23 @@
 (*<*)
     end
 (*>*)
-    datatype_new ('a, 'b) prod (infixr "*" 20) =
-      Pair 'a 'b
+    datatype_new ('a, 'b) prod (infixr "*" 20) = Pair 'a 'b
+
+text {* \blankline *}
 
     datatype_new (set: 'a) list (map: map rel: list_all2) =
       null: Nil ("[]")
     | Cons (hd: 'a) (tl: "'a list") (infixr "#" 65)
 
 text {*
+\noindent
 Incidentally, this is how the traditional syntaxes can be set up:
 *}
 
     syntax "_list" :: "args \<Rightarrow> 'a list" ("[(_)]")
 
+text {* \blankline *}
+
     translations
       "[x, xs]" == "x # [xs]"
       "[x]" == "x # []"
@@ -440,49 +456,48 @@
 Datatype definitions have the following general syntax:
 
 @{rail "
-  @@{command datatype_new} @{syntax target}? @{syntax dt_options}? \\
+  @@{command_def datatype_new} target? @{syntax dt_options}? \\
     (@{syntax dt_name} '=' (@{syntax ctor} + '|') + @'and')
   ;
   @{syntax_def dt_options}: '(' ((@'no_discs_sels' | @'rep_compat') + ',') ')'
 "}
 
-The syntactic quantity @{syntax target} can be used to specify a local context
-(e.g., @{text "(in linorder)"}). It is documented in the Isar reference manual
-\cite{isabelle-isar-ref}.
-
-The optional target is followed by optional options:
+The syntactic quantity \synt{target} can be used to specify a local
+context---e.g., @{text "(in linorder)"}. It is documented in the Isar reference
+manual \cite{isabelle-isar-ref}.
+%
+The optional target is optionally followed by datatype-specific options:
 
 \begin{itemize}
 \setlength{\itemsep}{0pt}
 
 \item
-The \keyw{no\_discs\_sels} option indicates that no destructors (i.e.,
-discriminators and selectors) should be generated.
+The \keyw{no\_discs\_sels} option indicates that no discriminators or selectors
+should be generated.
 
 \item
 The \keyw{rep\_compat} option indicates that the names generated by the
-package should contain optional (and normally not displayed) @{text "new."}
-components to prevent clashes with a later call to @{command rep_datatype}. See
+package should contain optional (and normally not displayed) ``@{text "new."}''
+components to prevent clashes with a later call to \keyw{rep\_datatype}. See
 Section~\ref{ssec:datatype-compatibility-issues} for details.
 \end{itemize}
 
 The left-hand sides of the datatype equations specify the name of the type to
-define, its type parameters, and optional additional information:
+define, its type parameters, and additional information:
 
 @{rail "
-  @{syntax_def dt_name}: @{syntax tyargs}? @{syntax name}
-    @{syntax map_rel}? @{syntax mixfix}?
+  @{syntax_def dt_name}: @{syntax tyargs}? name @{syntax map_rel}? mixfix?
   ;
-  @{syntax_def tyargs}: @{syntax typefree} | '(' ((@{syntax name} ':')? @{syntax typefree} + ',') ')'
+  @{syntax_def tyargs}: typefree | '(' ((name ':')? typefree + ',') ')'
   ;
-  @{syntax_def map_rel}: '(' ((('map' | 'rel') ':' @{syntax name}) +) ')'
+  @{syntax_def map_rel}: '(' ((('map' | 'rel') ':' name) +) ')'
 "}
 
 \noindent
-The syntactic quantity @{syntax name} denotes an identifier, @{syntax typefree}
-denotes fixed type variable (@{typ 'a}, @{typ 'b}, \ldots), and @{syntax
-mixfix} denotes the usual parenthesized mixfix notation. They are documented in
-the Isar reference manual \cite{isabelle-isar-ref}.
+The syntactic quantity \synt{name} denotes an identifier, \synt{typefree}
+denotes fixed type variable (@{typ 'a}, @{typ 'b}, \ldots), and \synt{mixfix}
+denotes the usual parenthesized mixfix notation. They are documented in the Isar
+reference manual \cite{isabelle-isar-ref}.
 
 The optional names preceding the type variables allow to override the default
 names of the set functions (@{text t_set1}, \ldots, @{text t_setM}).
@@ -490,28 +505,32 @@
 specify exactly the same type variables in the same order.
 
 @{rail "
-  @{syntax_def ctor}: (@{syntax name} ':')? @{syntax name} (@{syntax ctor_arg} * ) \\
-    @{syntax dt_sel_defaults}? @{syntax mixfix}?
+  @{syntax_def ctor}: (name ':')? name (@{syntax ctor_arg} * ) \\
+    @{syntax dt_sel_defaults}? mixfix?
 "}
 
+\medskip
+
 \noindent
 The main constituents of a constructor specification is the name of the
 constructor and the list of its argument types. An optional discriminator name
 can be supplied at the front to override the default name
-(@{text t.un_C}$_{ij}$).
+(@{text t.is_C\<^sub>j}).
 
 @{rail "
-  @{syntax_def ctor_arg}: @{syntax type} | '(' @{syntax name} ':' @{syntax type} ')'
+  @{syntax_def ctor_arg}: type | '(' name ':' type ')'
 "}
 
+\medskip
+
 \noindent
 In addition to the type of a constructor argument, it is possible to specify a
 name for the corresponding selector to override the default name
-(@{text t.un_C}$_{ij}$). The same selector names can be reused for several
-constructors as long as they have the same type.
+(@{text un_C\<^sub>ji}). The same selector names can be reused for several
+constructors as long as they share the same type.
 
 @{rail "
-  @{syntax_def dt_sel_defaults}: '(' @'defaults' (@{syntax name} ':' @{syntax term} +) ')'
+  @{syntax_def dt_sel_defaults}: '(' @'defaults' (name ':' term +) ')'
 "}
 
 \noindent
@@ -519,28 +538,209 @@
 @{text "C \<Colon> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<sigma>\<^sub>p \<Rightarrow> \<sigma>"},
 default values can be specified for any selector
 @{text "un_D \<Colon> \<sigma> \<Rightarrow> \<tau>"}
-associated with other constructors. The specified default value must have type
+associated with other constructors. The specified default value must be of type
 @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> \<sigma>\<^sub>p \<Rightarrow> \<tau>"}
-(i.e., it may dependend on @{text C}'s arguments).
+(i.e., it may depends on @{text C}'s arguments).
 *}
 
 subsection {* Generated Theorems
   \label{ssec:datatype-generated-theorems} *}
 
 text {*
-  * free ctor theorems
-    * case syntax
+The characteristic theorems generated by @{command datatype_new} are grouped in
+two broad categories:
+
+\begin{itemize}
+\item The \emph{free constructor theorems} are properties about the constructors
+and destructors that can be derived for any freely generated type. Internally,
+the derivation is performed by @{command wrap_free_constructors}.
+
+\item The \emph{functorial theorems} are properties of datatypes related to
+their BNF nature.
+
+\item The \emph{inductive theorems} are properties of datatypes related to
+their inductive nature.
+
+\end{itemize}
+
+\noindent
+The full list of named theorems can be obtained as usual by entering the
+command \keyw{print\_theorems} immediately after the datatype definition.
+This list normally excludes low-level theorems that reveal internal
+constructions. To make these accessible, add the line
+*}
+
+    declare [[bnf_note_all]]
+(*<*)
+    declare [[bnf_note_all = false]]
+(*>*)
+
+text {*
+\noindent
+to the top of the theory file.
+*}
+
+subsubsection {* Free Constructor Theorems *}
+
+(*<*)
+    consts is_Cons :: 'a
+(*>*)
+
+text {*
+The first subgroup of properties are concerned with the constructors.
+They are listed below for @{typ "'a list"}:
+
+\begin{indentblock}
+\begin{description}
+
+\item[@{text "t."}\hthm{inject} @{text "[iff, induct_simp]"}\upshape:] ~ \\
+@{thm list.inject[no_vars]}
+
+\item[@{text "t."}\hthm{distinct} @{text "[simp, induct_simp]"}\upshape:] ~ \\
+@{thm list.distinct(1)[no_vars]} \\
+@{thm list.distinct(2)[no_vars]}
+
+\item[@{text "t."}\hthm{exhaust} @{text "[cases t, case_names C\<^sub>1 \<dots> C\<^sub>n]"}\upshape:] ~ \\
+@{thm list.exhaust[no_vars]}
+
+\item[@{text "t."}\hthm{nchotomy}\upshape:] ~ \\
+@{thm list.nchotomy[no_vars]}
+
+\end{description}
+\end{indentblock}
+
+\noindent
+The next subgroup is concerned with the case combinator:
+
+\begin{indentblock}
+\begin{description}
+
+\item[@{text "t."}\hthm{case} @{text "[simp]"}\upshape:] ~ \\
+@{thm list.case(1)[no_vars]} \\
+@{thm list.case(2)[no_vars]}
+
+\item[@{text "t."}\hthm{case\_cong}\upshape:] ~ \\
+@{thm list.case_cong[no_vars]}
+
+\item[@{text "t."}\hthm{weak\_case\_cong} @{text "[cong]"}\upshape:] ~ \\
+@{thm list.weak_case_cong[no_vars]}
+
+\item[@{text "t."}\hthm{split}\upshape:] ~ \\
+@{thm list.split[no_vars]}
+
+\item[@{text "t."}\hthm{split\_asm}\upshape:] ~ \\
+@{thm list.split_asm[no_vars]}
+
+\item[@{text "t."}\hthm{splits} = @{text "split split_asm"}]
+
+\end{description}
+\end{indentblock}
+
+\noindent
+The third and last subgroup revolves around discriminators and selectors:
+
+\begin{indentblock}
+\begin{description}
 
-  * per-type theorems
-    * sets, map, rel
-    * induct, fold, rec
-    * simps
+\item[@{text "t."}\hthm{discs} @{text "[simp]"}\upshape:] ~ \\
+@{thm list.discs(1)[no_vars]} \\
+@{thm list.discs(2)[no_vars]}
+
+\item[@{text "t."}\hthm{sels} @{text "[simp]"}\upshape:] ~ \\
+@{thm list.sels(1)[no_vars]} \\
+@{thm list.sels(2)[no_vars]}
+
+\item[@{text "t."}\hthm{collapse} @{text "[simp]"}\upshape:] ~ \\
+@{thm list.collapse(1)[no_vars]} \\
+@{thm list.collapse(2)[no_vars]}
+
+\item[@{text "t."}\hthm{disc\_exclude}\upshape:] ~ \\
+These properties are missing for @{typ "'a list"} because there is only one
+proper discriminator. Had the datatype been introduced with a second
+discriminator called @{const is_Cons}, they would have read thusly: \\[\jot]
+@{prop "null list \<Longrightarrow> \<not> is_Cons list"} \\
+@{prop "is_Cons list \<Longrightarrow> \<not> null list"}
+
+\item[@{text "t."}\hthm{disc\_exhaust} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\upshape:] ~ \\
+@{thm list.disc_exhaust[no_vars]}
+
+\item[@{text "t."}\hthm{expand}\upshape:] ~ \\
+@{thm list.expand[no_vars]}
+
+\item[@{text "t."}\hthm{case\_conv}\upshape:] ~ \\
+@{thm list.case_conv[no_vars]}
+
+\end{description}
+\end{indentblock}
+*}
+
+
+subsubsection {* Functorial Theorems *}
+
+text {*
+The BNF-related theorem are listed below:
+
+\begin{indentblock}
+\begin{description}
+
+\item[@{text "t."}\hthm{sets} @{text "[code]"}\upshape:] ~ \\
+@{thm list.sets(1)[no_vars]} \\
+@{thm list.sets(2)[no_vars]}
+
+\item[@{text "t."}\hthm{map} @{text "[code]"}\upshape:] ~ \\
+@{thm list.map(1)[no_vars]} \\
+@{thm list.map(2)[no_vars]}
 
-  * multi-type (``common'') theorems
-    * induct
+\item[@{text "t."}\hthm{rel\_inject} @{text "[code]"}\upshape:] ~ \\
+@{thm list.rel_inject(1)[no_vars]} \\
+@{thm list.rel_inject(2)[no_vars]}
+
+\item[@{text "t."}\hthm{rel\_distinct} @{text "[code]"}\upshape:] ~ \\
+@{thm list.rel_distinct(1)[no_vars]} \\
+@{thm list.rel_distinct(2)[no_vars]}
+
+\end{description}
+\end{indentblock}
+*}
+
+
+subsubsection {* Inductive Theorems *}
+
+text {*
+The inductive theorems are listed below:
+
+\begin{indentblock}
+\begin{description}
+
+\item[@{text "t."}\hthm{induct} @{text "[induct t, case_names C\<^sub>1 \<dots> C\<^sub>n]"}\upshape:] ~ \\
+@{thm list.induct[no_vars]}
 
-  * mention what is registered with which attribute
-    * and also nameless safes
+\item[@{text "t\<^sub>1_\<dots>_t\<^sub>m."}\hthm{induct} @{text "[case_names C\<^sub>1 \<dots> C\<^sub>n]"}\upshape:] ~ \\
+Given $m > 1$ mutually recursive datatypes, this induction rule can be used to
+prove $m$ properties simultaneously.
+
+\item[@{text "t."}\hthm{fold} @{text "[code]"}\upshape:] ~ \\
+@{thm list.fold(1)[no_vars]} \\
+@{thm list.fold(2)[no_vars]}
+
+\item[@{text "t."}\hthm{rec} @{text "[code]"}\upshape:] ~ \\
+@{thm list.rec(1)[no_vars]} \\
+@{thm list.rec(2)[no_vars]}
+
+\end{description}
+\end{indentblock}
+
+\noindent
+For convenience, @{command datatype_new} also provides the following collection:
+
+\begin{indentblock}
+\begin{description}
+
+\item[@{text "t."}\hthm{simps} = @{text t.inject} @{text t.distinct} @{text t.case} @{text t.rec} @{text t.fold} @{text t.map} @{text t.rel_inject}] ~ \\
+@{text t.rel_distinct} @{text t.sets}
+
+\end{description}
+\end{indentblock}
 *}
 
 
@@ -568,12 +768,16 @@
       * \keyw{rep\_compat}
       * \keyw{rep\_datatype}
       * has some limitations
-        * mutually recursive datatypes? (fails with rep\_datatype?)
-        * nested datatypes? (fails with datatype\_new?)
+        * mutually recursive datatypes? (fails with rep_datatype?)
+        * nested datatypes? (fails with datatype_new?)
     * option 2
-      * \keyw{datatype\_compat}
+      * @{command datatype_new_compat}
       * not fully implemented yet?
 
+@{rail "
+  @@{command_def datatype_new_compat} types
+"}
+
   * register old datatype as new datatype
     * no clean way yet
     * if the goal is to do recursion through old datatypes, can register it as
@@ -587,11 +791,11 @@
   \label{sec:defining-recursive-functions} *}
 
 text {*
-This describes how to specify recursive functions over datatypes
-specified using @{command datatype_new}. The focus in on the \keyw{primrec\_new}
-command, which supports primitive recursion. A few examples feature the
-@{command fun} and @{command function} commands, described in a separate
-tutorial \cite{isabelle-function}.
+This describes how to specify recursive functions over datatypes specified using
+@{command datatype_new}. The focus in on the @{command primrec_new} command,
+which supports primitive recursion. A few examples feature the \keyw{fun} and
+\keyw{function} commands, described in a separate tutorial
+\cite{isabelle-function}.
 
 %%% TODO: partial_function?
 *}
@@ -811,10 +1015,10 @@
 Primitive recursive functions have the following general syntax:
 
 @{rail "
-  @@{command primrec_new} @{syntax target}? @{syntax \"fixes\"} \\ @'where'
+  @@{command_def primrec_new} target? fixes \\ @'where'
     (@{syntax primrec_equation} + '|')
   ;
-  @{syntax_def primrec_equation}: @{syntax thmdecl}? @{syntax prop}
+  @{syntax_def primrec_equation}: thmdecl? prop
 "}
 *}
 
@@ -856,11 +1060,12 @@
 @{keyword consts}.
 
 \item
-Define the datatype, specifying @{text "un_D\<^sub>0"} as the selector's default value.
+Define the datatype, specifying @{text "un_D\<^sub>0"} as the selector's default
+value.
 
 \item
-Define the behavior of @{text "un_D\<^sub>0"} on values of the newly introduced datatype
-using the @{command overloading} command.
+Define the behavior of @{text "un_D\<^sub>0"} on values of the newly introduced
+datatype using the \keyw{overloading} command.
 
 \item
 Derive the desired equation on @{text un_D} from the characteristic equations
@@ -928,8 +1133,8 @@
 text {*
 Definitions of codatatypes have almost exactly the same syntax as for datatypes
 (Section~\ref{ssec:datatype-syntax}), with two exceptions: The command is called
-@{command codatatype}; the \keyw{no\_discs\_sels} option is not available, because
-destructors are a central notion for codatatypes.
+@{command codatatype}; the \keyw{no\_discs\_sels} option is not available,
+because destructors are a central notion for codatatypes.
 *}
 
 subsection {* Generated Theorems
@@ -941,7 +1146,7 @@
 
 text {*
 This section describes how to specify corecursive functions using the
-\keyw{primcorec} command.
+@{command primcorec} command.
 
 %%% TODO: partial_function? E.g. for defining tail recursive function on lazy
 %%% lists (cf. terminal0 in TLList.thy)
@@ -966,11 +1171,10 @@
 Primitive corecursive definitions have the following general syntax:
 
 @{rail "
-  @@{command primcorec} @{syntax target}? @{syntax \"fixes\"} \\ @'where'
+  @@{command_def primcorec} target? fixes \\ @'where'
     (@{syntax primcorec_formula} + '|')
   ;
-  @{syntax_def primcorec_formula}: @{syntax thmdecl}? @{syntax prop}
-    (@'of' (@{syntax term} * ))?
+  @{syntax_def primcorec_formula}: thmdecl? prop (@'of' (term * ))?
 "}
 *}
 
@@ -1009,19 +1213,18 @@
   \label{ssec:bnf-syntax} *}
 
 text {*
-
 @{rail "
-  @@{command bnf} @{syntax target}? (@{syntax name} ':')? @{syntax term} \\
-    @{syntax term_list} @{syntax term} @{syntax term_list} @{syntax term}?
+  @@{command_def bnf} target? (name ':')? term \\
+    term_list term term_list term?
   ;
-  @{syntax_def X_list}: '[' (@{syntax X} + ',') ']'
+  X_list: '[' (X + ',') ']'
 "}
 
 options: no_discs_sels rep_compat
 *}
 
-section {* Generating Free Constructor Theorems
-  \label{sec:generating-free-constructor-theorems} *}
+section {* Generating Destructors and Theorems for Free Constructors
+  \label{sec:generating-destructors-and-theorems-for-free-constructors} *}
 
 text {*
 This section explains how to derive convenience theorems for free constructors,
@@ -1031,7 +1234,7 @@
     a type not introduced by ...
 
   * also useful for compatibility with old package, e.g. add destructors to
-    old @{command datatype}
+    old \keyw{datatype}
 
   * @{command wrap_free_constructors}
     * \keyw{no\_discs\_sels}, \keyw{rep\_compat}
@@ -1050,23 +1253,21 @@
 Free constructor wrapping has the following general syntax:
 
 @{rail "
-  @@{command wrap_free_constructors} @{syntax target}? @{syntax dt_options} \\
-    @{syntax term_list} @{syntax name} @{syntax fc_discs_sels}?
+  @@{command_def wrap_free_constructors} target? @{syntax dt_options} \\
+    term_list name @{syntax fc_discs_sels}?
   ;
-  @{syntax_def fc_discs_sels}: @{syntax name_list} (@{syntax name_list_list} @{syntax name_term_list_list}? )?
+  @{syntax_def fc_discs_sels}: name_list (name_list_list name_term_list_list? )?
   ;
-  @{syntax_def name_term}: (@{syntax name} ':' @{syntax term})
+  @{syntax_def name_term}: (name ':' term)
 "}
 
 options: no_discs_sels rep_compat
 
 X_list is as for BNF
 
+Section~\ref{ssec:datatype-generated-theorems} lists the generated theorems.
 *}
 
-subsection {* Generated Theorems
-  \label{ssec:ctors-generated-theorems} *}
-
 
 section {* Standard ML Interface
   \label{sec:standard-ml-interface} *}
@@ -1114,7 +1315,7 @@
 *}
 
 text {*
-* primrec\_new and primcorec are vaporware
+* primcorec is unfinished
 
 * slow n-ary mutual (co)datatype, avoid as much as possible (e.g. using nesting)
 
@@ -1128,12 +1329,13 @@
   based on overloading
 
 * no way to register "sum" and "prod" as (co)datatypes to enable N2M reduction for them
-  (for datatype\_compat and prim(co)rec)
+  (for @{command datatype_new_compat} and prim(co)rec)
 
 * no way to register same type as both data- and codatatype?
 
 * no recursion through unused arguments (unlike with the old package)
 
+* in a locale, cannot use locally fixed types (because of limitation in typedef)?
 *}
 
 
--- a/src/Doc/Datatypes/document/root.tex	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/Datatypes/document/root.tex	Fri Sep 13 09:31:45 2013 +0200
@@ -13,11 +13,20 @@
 \usepackage{railsetup}
 \usepackage{framed}
 
+\setcounter{secnumdepth}{3}
+\setcounter{tocdepth}{3}
+
 \newbox\boxA
 \setbox\boxA=\hbox{\ }
 \parindent=4\wd\boxA
 
+\newcommand\blankline{\vskip-.5\baselineskip}
+
+\newenvironment{indentblock}{\list{}{}\item[]}{\endlist}
+
 \newcommand{\keyw}[1]{\isacommand{#1}}
+\newcommand{\synt}[1]{\textit{#1}}
+\newcommand{\hthm}[1]{\textbf{\textit{#1}}}
 
 %\renewcommand{\isactrlsub}[1]{\/$\sb{\mathrm{#1}}$}
 \renewcommand{\isactrlsub}[1]{\/$\sb{#1}$}
--- a/src/Doc/IsarRef/Spec.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/IsarRef/Spec.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -251,7 +251,7 @@
   Here is an artificial example of bundling various configuration
   options: *}
 
-bundle trace = [[simp_trace, blast_trace, linarith_trace, metis_trace, smt_trace]]
+bundle trace = [[simp_trace, linarith_trace, metis_trace, smt_trace]]
 
 lemma "x = x"
   including trace by metis
--- a/src/Doc/Sledgehammer/document/root.tex	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/Sledgehammer/document/root.tex	Fri Sep 13 09:31:45 2013 +0200
@@ -1098,7 +1098,7 @@
 are potentially generated. Whether monomorphization takes place depends on the
 type encoding used. If the option is set to \textit{smart}, it is set to a value
 that was empirically found to be appropriate for the prover. For most provers,
-this value is 200.
+this value is 100.
 
 \nopagebreak
 {\small See also \textit{type\_enc} (\S\ref{problem-encoding}).}
--- a/src/Doc/System/Interfaces.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/System/Interfaces.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -32,10 +32,10 @@
   directories may be included via option @{verbatim "-d"} to augment
   that name space (see also \secref{sec:tool-build}).
 
-  By default, the specified image is checked and built on demand, see
-  also @{tool build_dialog}.  The @{verbatim "-s"} determines where to
-  store the result session image (see also \secref{sec:tool-build}).
-  The @{verbatim "-n"} option bypasses the session build dialog.
+  By default, the specified image is checked and built on demand. The
+  @{verbatim "-s"} option determines where to store the result session
+  image (see also \secref{sec:tool-build}). The @{verbatim "-n"}
+  option bypasses the session build dialog.
 
   The @{verbatim "-m"} option specifies additional print modes for the
   prover process.
--- a/src/Doc/System/Scala.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/System/Scala.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -61,10 +61,10 @@
 
   This allows to compile further Scala modules, depending on existing
   Isabelle/Scala functionality.  The resulting class or jar files can
-  be added to the @{setting CLASSPATH} via the @{verbatim classpath}
-  Bash function that is provided by the Isabelle process environment.
-  Thus add-on components can register themselves in a modular manner,
-  see also \secref{sec:components}.
+  be added to the Java classpath the @{verbatim classpath} Bash
+  function that is provided by the Isabelle process environment.  Thus
+  add-on components can register themselves in a modular manner, see
+  also \secref{sec:components}.
 
   Note that jEdit (\secref{sec:tool-jedit}) has its own mechanisms for
   adding plugin components, which needs special attention since
--- a/src/Doc/System/Sessions.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Doc/System/Sessions.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -419,31 +419,4 @@
 \end{ttbox}
 *}
 
-
-section {* Build dialog *}
-
-text {* The @{tool_def build_dialog} provides a simple GUI wrapper to
-  the tool Isabelle @{tool build} tool.  This enables user interfaces
-  like Isabelle/jEdit \secref{sec:tool-jedit} to provide read-made
-  logic image on startup.  Its command-line usage is:
-\begin{ttbox}
-Usage: isabelle build_dialog [OPTIONS] LOGIC
-
-  Options are:
-    -L OPTION    default logic via system option
-    -d DIR       include session directory
-    -l NAME      logic session name
-    -s           system build mode: produce output in ISABELLE_HOME
-
-  Build Isabelle logic session image via GUI dialog (default: \$ISABELLE_LOGIC).
-\end{ttbox}
-
-  \medskip Option @{verbatim "-l"} specifies an explicit logic session
-  name.  Option @{verbatim "-L"} specifies a system option name as
-  fall-back to determine the logic session name.  If both are omitted
-  or have empty value, @{setting ISABELLE_LOGIC} is used as default.
-
-  \medskip Options @{verbatim "-d"} and @{verbatim "-s"} have the same
-  meaning as for the command-line @{tool build} tool itself.  *}
-
 end
--- a/src/HOL/BNF/BNF_Def.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/BNF_Def.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -89,6 +89,9 @@
 lemma eq_OOI: "R = op = \<Longrightarrow> R = R OO R"
   by auto
 
+lemma OO_Grp_alt: "(Grp A f)^--1 OO Grp A g = (\<lambda>x y. \<exists>z. z \<in> A \<and> f z = x \<and> g z = y)"
+  unfolding Grp_def by auto
+
 lemma Grp_UNIV_id: "f = id \<Longrightarrow> (Grp UNIV f)^--1 OO Grp UNIV f = Grp UNIV f"
 unfolding Grp_def by auto
 
@@ -110,10 +113,6 @@
 lemma Collect_split_Grp_inD: "z \<in> Collect (split (Grp A f)) \<Longrightarrow> fst z \<in> A"
 unfolding Grp_def o_def by auto
 
-lemma wpull_Grp:
-"wpull (Collect (split (Grp A f))) A (f ` A) f id fst snd"
-unfolding wpull_def Grp_def by auto
-
 definition "pick_middlep P Q a c = (SOME b. P a b \<and> Q b c)"
 
 lemma pick_middlep:
--- a/src/HOL/BNF/BNF_FP_Base.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/BNF_FP_Base.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -11,8 +11,6 @@
 
 theory BNF_FP_Base
 imports BNF_Comp BNF_Ctr_Sugar
-keywords
-  "defaults"
 begin
 
 lemma mp_conj: "(P \<longrightarrow> Q) \<and> R \<Longrightarrow> P \<Longrightarrow> R \<and> Q"
--- a/src/HOL/BNF/BNF_Util.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/BNF_Util.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -47,16 +47,9 @@
 lemma bijI: "\<lbrakk>\<And>x y. (f x = f y) = (x = y); \<And>y. \<exists>x. y = f x\<rbrakk> \<Longrightarrow> bij f"
 unfolding bij_def inj_on_def by auto blast
 
-lemma pair_mem_Collect_split:
-"(\<lambda>x y. (x, y) \<in> {(x, y). P x y}) = P"
-by simp
-
 lemma Collect_pair_mem_eq: "{(x, y). (x, y) \<in> R} = R"
 by simp
 
-lemma Collect_fst_snd_mem_eq: "{p. (fst p, snd p) \<in> A} = A"
-by simp
-
 (* Operator: *)
 definition "Gr A f = {(a, f a) | a. a \<in> A}"
 
--- a/src/HOL/BNF/Examples/Misc_Codatatype.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Examples/Misc_Codatatype.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -43,6 +43,8 @@
   ('a, 'b1, 'b2) F2 = unit + 'b1 * 'b2
 *)
 
+codatatype 'a p = P "'a + 'a p"
+
 codatatype 'a J1 = J11 'a "'a J1" | J12 'a "'a J2"
 and 'a J2 = J21 | J22 "'a J1" "'a J2"
 
@@ -73,6 +75,7 @@
 
 codatatype ('b, 'c) less_killing = LK "'b \<Rightarrow> 'c"
 
+codatatype 'b poly_unit = U "'b \<Rightarrow> 'b poly_unit"
 codatatype 'b cps = CPS1 'b | CPS2 "'b \<Rightarrow> 'b cps"
 
 codatatype ('b1, 'b2, 'b3, 'b4, 'b5, 'b6, 'b7, 'b8, 'b9) fun_rhs =
--- a/src/HOL/BNF/Tools/bnf_def.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_def.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -770,7 +770,7 @@
     val bnf_wit_As = map (apsnd (mk_bnf_t As')) bnf_wits;
 
     val pre_names_lthy = lthy;
-    val (((((((((((((((((((((((((fs, gs), hs), x), y), (z, z')), zs), ys), As),
+    val ((((((((((((((((((((((((fs, gs), hs), x), y), zs), ys), As),
       As_copy), Xs), B1s), B2s), f1s), f2s), e1s), e2s), p1s), p2s), bs), (Rs, Rs')), Rs_copy), Ss),
       transfer_domRs), transfer_ranRs), names_lthy) = pre_names_lthy
       |> mk_Frees "f" (map2 (curry op -->) As' Bs')
@@ -778,7 +778,6 @@
       ||>> mk_Frees "h" (map2 (curry op -->) As' Ts)
       ||>> yield_singleton (mk_Frees "x") CA'
       ||>> yield_singleton (mk_Frees "y") CB'
-      ||>> yield_singleton (apfst (op ~~) oo mk_Frees' "z") CRs'
       ||>> mk_Frees "z" As'
       ||>> mk_Frees "y" Bs'
       ||>> mk_Frees "A" (map HOLogic.mk_setT As')
@@ -1093,7 +1092,8 @@
 
         val map_wppull = Lazy.lazy mk_map_wppull;
 
-        val rel_OO_Grps = no_refl [#rel_OO_Grp axioms];
+        val rel_OO_Grp = #rel_OO_Grp axioms;
+        val rel_OO_Grps = no_refl [rel_OO_Grp];
 
         fun mk_rel_Grp () =
           let
@@ -1182,23 +1182,7 @@
 
         val rel_OO = Lazy.lazy mk_rel_OO;
 
-        fun mk_in_rel () =
-          let
-            val bnf_in = mk_in setRs (map (mk_bnf_t RTs) bnf_sets) CRs';
-            val map1 = Term.list_comb (mk_bnf_map RTs As', map fst_const RTs);
-            val map2 = Term.list_comb (mk_bnf_map RTs Bs', map snd_const RTs);
-            val map_fst_eq = HOLogic.mk_eq (map1 $ z, x);
-            val map_snd_eq = HOLogic.mk_eq (map2 $ z, y);
-            val lhs = Term.list_comb (rel, Rs) $ x $ y;
-            val rhs =
-              HOLogic.mk_exists (fst z', snd z', HOLogic.mk_conj (HOLogic.mk_mem (z, bnf_in),
-                HOLogic.mk_conj (map_fst_eq, map_snd_eq)));
-            val goal =
-              fold_rev Logic.all (x :: y :: Rs) (mk_Trueprop_eq (lhs, rhs));
-          in
-            Goal.prove_sorry lthy [] [] goal (mk_in_rel_tac (the_single rel_OO_Grps))
-            |> Thm.close_derivation
-          end;
+        fun mk_in_rel () = trans OF [rel_OO_Grp, @{thm OO_Grp_alt}] RS @{thm predicate2_eqD};
 
         val in_rel = Lazy.lazy mk_in_rel;
 
--- a/src/HOL/BNF/Tools/bnf_def_tactics.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_def_tactics.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -21,7 +21,6 @@
   val mk_rel_eq_tac: int -> thm -> thm -> thm -> tactic
   val mk_rel_OO_tac: thm list -> thm -> thm -> thm -> thm -> thm list ->
     {prems: thm list, context: Proof.context} -> tactic
-  val mk_in_rel_tac: thm -> {prems: 'a, context: Proof.context} -> tactic
   val mk_rel_conversep_tac: thm -> thm -> tactic
   val mk_rel_conversep_le_tac: thm list -> thm -> thm -> thm -> thm list ->
     {prems: thm list, context: Proof.context} -> tactic
@@ -209,13 +208,6 @@
           rtac (map_comp0 RS sym), atac, atac]) [@{thm fst_fstOp}, @{thm snd_sndOp}])] 1
   end;
 
-fun mk_in_rel_tac rel_OO_Gr {context = ctxt, prems = _} =
-  EVERY' [rtac (rel_OO_Gr RS fun_cong RS fun_cong RS trans), rtac iffI,
-    REPEAT_DETERM o eresolve_tac [@{thm GrpE}, @{thm relcomppE}, @{thm conversepE}],
-    hyp_subst_tac ctxt, rtac exI, rtac conjI, atac, rtac conjI, rtac refl, rtac refl,
-    REPEAT_DETERM o eresolve_tac [exE, conjE], rtac @{thm relcomppI}, rtac @{thm conversepI},
-    etac @{thm GrpI}, atac, etac @{thm GrpI}, atac] 1;
-
 fun mk_rel_mono_strong_tac in_rel set_map0s {context = ctxt, prems = _} =
   if null set_map0s then atac 1
   else
@@ -230,16 +222,18 @@
   {context = ctxt, prems = _} =
   let
     val n = length set_maps;
+    val in_tac = if n = 0 then rtac UNIV_I else
+      rtac CollectI THEN' CONJ_WRAP' (fn thm =>
+        etac (thm RS
+          @{thm ord_eq_le_trans[OF _ subset_trans[OF image_mono convol_image_vimage2p]]}))
+      set_maps;
   in
     REPEAT_DETERM_N n (HEADGOAL (rtac @{thm fun_relI})) THEN
     unfold_thms_tac ctxt @{thms fun_rel_iff_leq_vimage2p} THEN
     HEADGOAL (EVERY' [rtac @{thm order_trans}, rtac rel_mono, REPEAT_DETERM_N n o atac,
       rtac @{thm predicate2I}, dtac (in_rel RS iffD1),
       REPEAT_DETERM o eresolve_tac [exE, CollectE, conjE], hyp_subst_tac ctxt,
-      rtac @{thm vimage2pI}, rtac (in_rel RS iffD2), rtac exI, rtac conjI, rtac CollectI,
-      CONJ_WRAP' (fn thm =>
-        etac (thm RS @{thm ord_eq_le_trans[OF _ subset_trans[OF image_mono convol_image_vimage2p]]}))
-      set_maps,
+      rtac @{thm vimage2pI}, rtac (in_rel RS iffD2), rtac exI, rtac conjI, in_tac,
       rtac conjI,
       EVERY' (map (fn convol =>
         rtac (box_equals OF [map_cong0, map_comp RS sym, map_comp RS sym]) THEN'
--- a/src/HOL/BNF/Tools/bnf_fp_def_sugar.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_fp_def_sugar.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -44,8 +44,8 @@
   val build_rel: local_theory -> (typ * typ -> term) -> typ * typ -> term
   val dest_map: Proof.context -> string -> term -> term * term list
   val dest_ctr: Proof.context -> string -> term -> term * term list
-  val mk_co_iters_prelims: BNF_FP_Util.fp_kind -> typ list -> typ list -> int list ->
-    int list list -> term list list -> Proof.context ->
+  val mk_co_iters_prelims: BNF_FP_Util.fp_kind -> typ list list list -> typ list -> typ list ->
+    int list -> int list list -> term list list -> Proof.context ->
     (term list list
      * (typ list list * typ list list list list * term list list
         * term list list list list) list option
@@ -53,9 +53,9 @@
         * ((term list list * term list list list) * (typ list * typ list list)) list) option)
     * Proof.context
 
-  val mk_iter_fun_arg_types: typ list -> int list -> int list list -> term ->
+  val mk_iter_fun_arg_types: typ list list list -> int list -> int list list -> term ->
     typ list list list list
-  val mk_coiter_fun_arg_types: typ list -> int list -> int list list -> term ->
+  val mk_coiter_fun_arg_types: typ list list list -> typ list -> int list -> term ->
     typ list list
     * (typ list list list list * typ list list list * typ list list list list * typ list)
   val define_iters: string list ->
@@ -268,12 +268,12 @@
 
 val mk_fp_iter_fun_types = binder_fun_types o fastype_of;
 
-fun unzip_recT Cs (T as Type (@{type_name prod}, Ts as [_, U])) =
-    if member (op =) Cs U then Ts else [T]
+fun unzip_recT (Type (@{type_name prod}, _)) T = [T]
+  | unzip_recT _ (T as Type (@{type_name prod}, Ts)) = Ts
   | unzip_recT _ T = [T];
 
-fun unzip_corecT Cs (T as Type (@{type_name sum}, Ts as [_, U])) =
-    if member (op =) Cs U then Ts else [T]
+fun unzip_corecT (Type (@{type_name sum}, _)) T = [T]
+  | unzip_corecT _ (T as Type (@{type_name sum}, Ts)) = Ts
   | unzip_corecT _ T = [T];
 
 fun mk_map live Ts Us t =
@@ -398,12 +398,12 @@
 
 fun mk_iter_fun_arg_types0 n ms = map2 dest_tupleT ms o dest_sumTN_balanced n o domain_type;
 
-fun mk_iter_fun_arg_types Cs ns mss =
+fun mk_iter_fun_arg_types ctr_Tsss ns mss =
   mk_fp_iter_fun_types
   #> map3 mk_iter_fun_arg_types0 ns mss
-  #> map (map (map (unzip_recT Cs)));
+  #> map2 (map2 (map2 unzip_recT)) ctr_Tsss;
 
-fun mk_iters_args_types Cs ns mss ctor_iter_fun_Tss lthy =
+fun mk_iters_args_types ctr_Tsss Cs ns mss ctor_iter_fun_Tss lthy =
   let
     val Css = map2 replicate ns Cs;
     val y_Tsss = map3 mk_iter_fun_arg_types0 ns mss (map un_fold_of ctor_iter_fun_Tss);
@@ -418,8 +418,11 @@
     val yssss = map (map (map single)) ysss;
 
     val z_Tssss =
-      map3 (fn n => fn ms => map2 (map (unzip_recT Cs) oo dest_tupleT) ms o
-        dest_sumTN_balanced n o domain_type o co_rec_of) ns mss ctor_iter_fun_Tss;
+      map4 (fn n => fn ms => fn ctr_Tss => fn ctor_iter_fun_Ts =>
+          map3 (fn m => fn ctr_Ts => fn ctor_iter_fun_T =>
+              map2 unzip_recT ctr_Ts (dest_tupleT m ctor_iter_fun_T))
+            ms ctr_Tss (dest_sumTN_balanced n (domain_type (co_rec_of ctor_iter_fun_Ts))))
+        ns mss ctr_Tsss ctor_iter_fun_Tss;
 
     val z_Tsss' = map (map flat_rec_arg_args) z_Tssss;
     val h_Tss = map2 (map2 (curry op --->)) z_Tsss' Css;
@@ -434,16 +437,18 @@
     ([(g_Tss, y_Tssss, gss, yssss), (h_Tss, z_Tssss, hss, zssss)], lthy)
   end;
 
-fun mk_coiter_fun_arg_types0 Cs ns mss fun_Ts =
+fun mk_coiter_fun_arg_types0 ctr_Tsss Cs ns fun_Ts =
   let
-    (*avoid "'a itself" arguments in coiterators and corecursors*)
-    fun repair_arity [0] = [1]
-      | repair_arity ms = ms;
+    (*avoid "'a itself" arguments in coiterators*)
+    fun repair_arity [[]] = [[@{typ unit}]]
+      | repair_arity Tss = Tss;
 
+    val ctr_Tsss' = map repair_arity ctr_Tsss;
     val f_sum_prod_Ts = map range_type fun_Ts;
     val f_prod_Tss = map2 dest_sumTN_balanced ns f_sum_prod_Ts;
-    val f_Tsss = map2 (map2 dest_tupleT o repair_arity) mss f_prod_Tss;
-    val f_Tssss = map2 (fn C => map (map (map (curry op --> C) o unzip_corecT Cs))) Cs f_Tsss;
+    val f_Tsss = map2 (map2 (dest_tupleT o length)) ctr_Tsss' f_prod_Tss;
+    val f_Tssss = map3 (fn C => map2 (map2 (map (curry op --> C) oo unzip_corecT)))
+      Cs ctr_Tsss' f_Tsss;
     val q_Tssss = map (map (map (fn [_] => [] | [_, T] => [mk_pred1T (domain_type T)]))) f_Tssss;
   in
     (q_Tssss, f_Tsss, f_Tssss, f_sum_prod_Ts)
@@ -451,18 +456,18 @@
 
 fun mk_coiter_p_pred_types Cs ns = map2 (fn n => replicate (Int.max (0, n - 1)) o mk_pred1T) ns Cs;
 
-fun mk_coiter_fun_arg_types Cs ns mss dtor_coiter =
+fun mk_coiter_fun_arg_types ctr_Tsss Cs ns dtor_coiter =
   (mk_coiter_p_pred_types Cs ns,
-   mk_fp_iter_fun_types dtor_coiter |> mk_coiter_fun_arg_types0 Cs ns mss);
+   mk_fp_iter_fun_types dtor_coiter |> mk_coiter_fun_arg_types0 ctr_Tsss Cs ns);
 
-fun mk_coiters_args_types Cs ns mss dtor_coiter_fun_Tss lthy =
+fun mk_coiters_args_types ctr_Tsss Cs ns mss dtor_coiter_fun_Tss lthy =
   let
     val p_Tss = mk_coiter_p_pred_types Cs ns;
 
     fun mk_types get_Ts =
       let
         val fun_Ts = map get_Ts dtor_coiter_fun_Tss;
-        val (q_Tssss, f_Tsss, f_Tssss, f_sum_prod_Ts) = mk_coiter_fun_arg_types0 Cs ns mss fun_Ts;
+        val (q_Tssss, f_Tsss, f_Tssss, f_sum_prod_Ts) = mk_coiter_fun_arg_types0 ctr_Tsss Cs ns fun_Ts;
         val pf_Tss = map3 flat_corec_preds_predsss_gettersss p_Tss q_Tssss f_Tssss;
       in
         (q_Tssss, f_Tsss, f_Tssss, (f_sum_prod_Ts, pf_Tss))
@@ -509,7 +514,7 @@
     ((z, cs, cpss, [(unfold_args, unfold_types), (corec_args, corec_types)]), lthy)
   end;
 
-fun mk_co_iters_prelims fp fpTs Cs ns mss xtor_co_iterss0 lthy =
+fun mk_co_iters_prelims fp ctr_Tsss fpTs Cs ns mss xtor_co_iterss0 lthy =
   let
     val thy = Proof_Context.theory_of lthy;
 
@@ -519,9 +524,9 @@
 
     val ((iters_args_types, coiters_args_types), lthy') =
       if fp = Least_FP then
-        mk_iters_args_types Cs ns mss xtor_co_iter_fun_Tss lthy |>> (rpair NONE o SOME)
+        mk_iters_args_types ctr_Tsss Cs ns mss xtor_co_iter_fun_Tss lthy |>> (rpair NONE o SOME)
       else
-        mk_coiters_args_types Cs ns mss xtor_co_iter_fun_Tss lthy |>> (pair NONE o SOME)
+        mk_coiters_args_types ctr_Tsss Cs ns mss xtor_co_iter_fun_Tss lthy |>> (pair NONE o SOME)
   in
     ((xtor_co_iterss, iters_args_types, coiters_args_types), lthy')
   end;
@@ -542,9 +547,12 @@
   let
     val thy = Proof_Context.theory_of lthy0;
 
+    val maybe_conceal_def_binding = Thm.def_binding
+      #> Config.get lthy0 bnf_note_all = false ? Binding.conceal;
+
     val ((csts, defs), (lthy', lthy)) = lthy0
       |> apfst split_list o fold_map (fn (b, spec) =>
-        Specification.definition (SOME (b, NONE, NoSyn), ((Thm.def_binding b, []), spec))
+        Specification.definition (SOME (b, NONE, NoSyn), ((maybe_conceal_def_binding b, []), spec))
         #>> apsnd snd) binding_specs
       ||> `Local_Theory.restore;
 
@@ -1221,7 +1229,7 @@
     val mss = map (map length) ctr_Tsss;
 
     val ((xtor_co_iterss, iters_args_types, coiters_args_types), lthy') =
-      mk_co_iters_prelims fp fpTs Cs ns mss xtor_co_iterss0 lthy;
+      mk_co_iters_prelims fp ctr_Tsss fpTs Cs ns mss xtor_co_iterss0 lthy;
 
     fun define_ctrs_dtrs_for_type (((((((((((((((((((((((fp_bnf, fp_b), fpT), ctor), dtor),
             xtor_co_iters), ctor_dtor), dtor_ctor), ctor_inject), pre_map_def), pre_set_defs),
@@ -1250,9 +1258,12 @@
           map3 (fn k => fn xs => fn tuple_x => fold_rev Term.lambda xs (ctor $
             mk_InN_balanced ctr_sum_prod_T n tuple_x k)) ks xss tuple_xs;
 
+        val maybe_conceal_def_binding = Thm.def_binding
+          #> Config.get no_defs_lthy bnf_note_all = false ? Binding.conceal;
+
         val ((raw_ctrs, raw_ctr_defs), (lthy', lthy)) = no_defs_lthy
           |> apfst split_list o fold_map3 (fn b => fn mx => fn rhs =>
-              Local_Theory.define ((b, mx), ((Thm.def_binding b, []), rhs)) #>> apsnd snd)
+              Local_Theory.define ((b, mx), ((maybe_conceal_def_binding b, []), rhs)) #>> apsnd snd)
             ctr_bindings ctr_mixfixes ctr_rhss
           ||> `Local_Theory.restore;
 
@@ -1538,7 +1549,7 @@
   (Parse.typ >> pair Binding.empty);
 
 val parse_defaults =
-  @{keyword "("} |-- @{keyword "defaults"} |-- Scan.repeat parse_bound_term --| @{keyword ")"};
+  @{keyword "("} |-- Parse.reserved "defaults" |-- Scan.repeat parse_bound_term --| @{keyword ")"};
 
 val parse_type_arg_constrained =
   Parse.type_ident -- Scan.option (@{keyword "::"} |-- Parse.!!! Parse.sort);
@@ -1554,8 +1565,6 @@
 
 val no_map_rel = (Binding.empty, Binding.empty);
 
-(* "map" and "rel" are purposedly not registered as keywords, because they are short and nice names
-   that we don't want them to be highlighted everywhere. *)
 fun extract_map_rel ("map", b) = apfst (K b)
   | extract_map_rel ("rel", b) = apsnd (K b)
   | extract_map_rel (s, _) = error ("Unknown label " ^ quote s ^ " (expected \"map\" or \"rel\")");
--- a/src/HOL/BNF/Tools/bnf_fp_def_sugar_tactics.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_fp_def_sugar_tactics.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -152,7 +152,7 @@
      full_simp_tac
        (ss_only (@{thm prod.inject} :: no_refl discs @ rel_eqs @ more_simp_thms) ctxt) THEN'
      REPEAT o etac conjE THEN_MAYBE' REPEAT o hyp_subst_tac ctxt THEN' REPEAT o rtac conjI THEN'
-     REPEAT o rtac refl);
+     REPEAT o (rtac refl ORELSE' atac));
 
 fun mk_coinduct_distinct_ctrs_tac ctxt discs discs' =
   hyp_subst_tac ctxt THEN' REPEAT o etac conjE THEN'
--- a/src/HOL/BNF/Tools/bnf_fp_n2m_sugar.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_fp_n2m_sugar.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -127,7 +127,7 @@
       val nested_bnfs = nesty_bnfs lthy ctrXs_Tsss Xs;
 
       val ((xtor_co_iterss, iters_args_types, coiters_args_types), _) =
-        mk_co_iters_prelims fp fpTs Cs ns mss xtor_co_iterss0 lthy;
+        mk_co_iters_prelims fp ctr_Tsss fpTs Cs ns mss xtor_co_iterss0 lthy;
 
       fun mk_binding b suf = Binding.suffix_name ("_" ^ suf) b;
 
--- a/src/HOL/BNF/Tools/bnf_fp_rec_sugar_util.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_fp_rec_sugar_util.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -312,7 +312,8 @@
 
     val perm_Cs = map (body_type o fastype_of o co_rec_of o of_fp_sugar (#xtor_co_iterss o #fp_res))
       perm_fp_sugars;
-    val perm_fun_arg_Tssss = mk_iter_fun_arg_types perm_Cs perm_ns perm_mss (co_rec_of ctor_iters1);
+    val perm_fun_arg_Tssss =
+      mk_iter_fun_arg_types perm_ctr_Tsss perm_ns perm_mss (co_rec_of ctor_iters1);
 
     fun unpermute0 perm0_xs = permute_like (op =) perm0_kks kks perm0_xs;
     fun unpermute perm_xs = permute_like (op =) perm_indices indices perm_xs;
@@ -389,12 +390,11 @@
     val nn = length perm_fpTs;
     val kks = 0 upto nn - 1;
     val perm_ns = map length perm_ctr_Tsss;
-    val perm_mss = map (map length) perm_ctr_Tsss;
 
     val perm_Cs = map (domain_type o body_fun_type o fastype_of o co_rec_of o
       of_fp_sugar (#xtor_co_iterss o #fp_res)) perm_fp_sugars;
     val (perm_p_Tss, (perm_q_Tssss, _, perm_f_Tssss, _)) =
-      mk_coiter_fun_arg_types perm_Cs perm_ns perm_mss (co_rec_of dtor_coiters1);
+      mk_coiter_fun_arg_types perm_ctr_Tsss perm_Cs perm_ns (co_rec_of dtor_coiters1);
 
     val (perm_p_hss, h) = indexedd perm_p_Tss 0;
     val (perm_q_hssss, h') = indexedddd perm_q_Tssss h;
@@ -447,7 +447,8 @@
         val p_ios = map SOME p_is @ [NONE];
         val collapses = #collapses (nth ctr_sugars index);
         val corec_thms = co_rec_of (nth coiter_thmsss index);
-        val disc_corecs = co_rec_of (nth disc_coitersss index);
+        val disc_corecs = (case co_rec_of (nth disc_coitersss index) of [] => [TrueI]
+          | thms => thms);
         val sel_corecss = co_rec_of (nth sel_coiterssss index);
       in
         map11 mk_ctr_spec ctrs discs selss p_ios q_isss f_isss f_Tsss collapses corec_thms
--- a/src/HOL/BNF/Tools/bnf_fp_util.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_fp_util.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -587,13 +587,15 @@
       in
         Binding.prefix_name rawN
         #> fold_rev (fn (s, mand) => Binding.qualify mand s) (qs @ [(n, true)])
+        #> Binding.conceal
       end;
 
     val ((bnfs, (deadss, livess)), (unfold_set, lthy)) = apfst (apsnd split_list o split_list)
       (fold_map2 (fn b => bnf_of_typ Smart_Inline (raw_qualify b) fp_sort Xs) bs rhsXs
         (empty_unfolds, lthy));
 
-    fun norm_qualify i = Binding.qualify true (Binding.name_of (nth bs (Int.max (0, i - 1))));
+    fun norm_qualify i = Binding.qualify true (Binding.name_of (nth bs (Int.max (0, i - 1))))
+      #> Binding.conceal;
 
     val Ass = map (map dest_TFree) livess;
     val resDs = fold (subtract (op =)) Ass resBs;
@@ -606,7 +608,8 @@
 
     val Dss = map3 (append oo map o nth) livess kill_poss deadss;
 
-    val pre_qualify = Binding.qualify false o Binding.name_of;
+    fun pre_qualify b = Binding.qualify false (Binding.name_of b)
+      #> Config.get lthy' bnf_note_all = false ? Binding.conceal;
 
     val ((pre_bnfs, deadss), lthy'') =
       fold_map3 (fn b => seal_bnf (pre_qualify b) unfold_set' (Binding.prefix_name preN b))
--- a/src/HOL/BNF/Tools/bnf_gfp.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_gfp.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -66,13 +66,17 @@
     val ks = 1 upto n;
     val m = live - n; (*passive, if 0 don't generate a new BNF*)
     val ls = 1 upto m;
+
+    val note_all = Config.get lthy bnf_note_all;
     val b_names = map Binding.name_of bs;
-    val common_name = mk_common_name b_names;
-    val b = Binding.name common_name;
-    val internal_b = Binding.prefix true common_name b;
-    fun qualify_bs internal = map2 (Binding.prefix internal) b_names bs;
-    val internal_bs = qualify_bs true;
-    val external_bs = qualify_bs false;
+    val b_name = mk_common_name b_names;
+    val b = Binding.name b_name;
+    val mk_internal_b = Binding.name #> Binding.prefix true b_name #> Binding.conceal;
+    fun mk_internal_bs name =
+      map (fn b =>
+        Binding.prefix true b_name (Binding.suffix_name ("_" ^ name) b) |> Binding.conceal) bs;
+    val external_bs = map2 (Binding.prefix false) b_names bs
+      |> note_all = false ? map Binding.conceal;
 
     (* TODO: check if m, n, etc., are sane *)
 
@@ -297,7 +301,7 @@
 
     (* coalgebra *)
 
-    val coalg_bind = Binding.suffix_name ("_" ^ coN ^ algN) internal_b;
+    val coalg_bind = mk_internal_b (coN ^ algN) ;
     val coalg_name = Binding.name_of coalg_bind;
     val coalg_def_bind = (Thm.def_binding coalg_bind, []);
 
@@ -373,7 +377,7 @@
 
     (* morphism *)
 
-    val mor_bind = Binding.suffix_name ("_" ^ morN) internal_b;
+    val mor_bind = mk_internal_b morN;
     val mor_name = Binding.name_of mor_bind;
     val mor_def_bind = (Thm.def_binding mor_bind, []);
 
@@ -518,8 +522,7 @@
 
     val timer = time (timer "Morphism definition & thms");
 
-    fun hset_rec_bind j = internal_b
-      |> Binding.suffix_name ("_" ^ hset_recN ^ (if m = 1 then "" else string_of_int j)) ;
+    fun hset_rec_bind j = mk_internal_b (hset_recN ^ (if m = 1 then "" else string_of_int j));
     val hset_rec_name = Binding.name_of o hset_rec_bind;
     val hset_rec_def_bind = rpair [] o Thm.def_binding o hset_rec_bind;
 
@@ -573,8 +576,8 @@
     val hset_rec_0ss' = transpose hset_rec_0ss;
     val hset_rec_Sucss' = transpose hset_rec_Sucss;
 
-    fun hset_bind i j = nth internal_bs (i - 1)
-      |> Binding.suffix_name ("_" ^ hsetN ^ (if m = 1 then "" else string_of_int j));
+    fun hset_binds j = mk_internal_bs (hsetN ^ (if m = 1 then "" else string_of_int j))
+    fun hset_bind i j = nth (hset_binds j) (i - 1);
     val hset_name = Binding.name_of oo hset_bind;
     val hset_def_bind = rpair [] o Thm.def_binding oo hset_bind;
 
@@ -741,7 +744,7 @@
 
     (* bisimulation *)
 
-    val bis_bind = Binding.suffix_name ("_" ^ bisN) internal_b;
+    val bis_bind = mk_internal_b bisN;
     val bis_name = Binding.name_of bis_bind;
     val bis_def_bind = (Thm.def_binding bis_bind, []);
 
@@ -885,7 +888,8 @@
 
     (* largest self-bisimulation *)
 
-    fun lsbis_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ lsbisN);
+    val lsbis_binds = mk_internal_bs lsbisN;
+    fun lsbis_bind i = nth lsbis_binds (i - 1);
     val lsbis_name = Binding.name_of o lsbis_bind;
     val lsbis_def_bind = rpair [] o Thm.def_binding o lsbis_bind;
 
@@ -970,8 +974,7 @@
       then (lthy, sum_bd, sum_bdT, bd_card_order, bd_Cinfinite, bd_Card_order, set_bdss)
       else
         let
-          val sbdT_bind =
-            Binding.qualify false (Binding.name_of b) (Binding.suffix_name ("_" ^ sum_bdTN) b);
+          val sbdT_bind = mk_internal_b sum_bdTN;
 
           val ((sbdT_name, (sbdT_glob_info, sbdT_loc_info)), lthy) =
             typedef (sbdT_bind, dead_params, NoSyn)
@@ -980,7 +983,7 @@
           val sbdT = Type (sbdT_name, dead_params');
           val Abs_sbdT = Const (#Abs_name sbdT_glob_info, sum_bdT --> sbdT);
 
-          val sbd_bind = Binding.suffix_name ("_" ^ sum_bdN) internal_b;
+          val sbd_bind = mk_internal_b sum_bdN;
           val sbd_name = Binding.name_of sbd_bind;
           val sbd_def_bind = (Thm.def_binding sbd_bind, []);
 
@@ -1076,7 +1079,8 @@
 
     (* tree coalgebra *)
 
-    fun isNode_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ isNodeN);
+    val isNode_binds = mk_internal_bs isNodeN;
+    fun isNode_bind i = nth isNode_binds (i - 1);
     val isNode_name = Binding.name_of o isNode_bind;
     val isNode_def_bind = rpair [] o Thm.def_binding o isNode_bind;
 
@@ -1135,7 +1139,8 @@
         Library.foldr1 HOLogic.mk_conj [empty, Field, prefCl, tree, undef]
       end;
 
-    fun carT_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ carTN);
+    val carT_binds = mk_internal_bs carTN;
+    fun carT_bind i = nth carT_binds (i - 1);
     val carT_name = Binding.name_of o carT_bind;
     val carT_def_bind = rpair [] o Thm.def_binding o carT_bind;
 
@@ -1167,7 +1172,8 @@
       (Const (nth carTs (i - 1),
          Library.foldr (op -->) (map fastype_of As, HOLogic.mk_setT treeT)), As);
 
-    fun strT_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ strTN);
+    val strT_binds = mk_internal_bs strTN;
+    fun strT_bind i = nth strT_binds (i - 1);
     val strT_name = Binding.name_of o strT_bind;
     val strT_def_bind = rpair [] o Thm.def_binding o strT_bind;
 
@@ -1228,7 +1234,7 @@
     val to_sbd_thmss = mk_to_sbd_thmss @{thm toCard};
     val from_to_sbd_thmss = mk_to_sbd_thmss @{thm fromCard_toCard};
 
-    val Lev_bind = Binding.suffix_name ("_" ^ LevN) internal_b;
+    val Lev_bind = mk_internal_b LevN;
     val Lev_name = Binding.name_of Lev_bind;
     val Lev_def_bind = rpair [] (Thm.def_binding Lev_bind);
 
@@ -1282,7 +1288,7 @@
     val Lev_0s = flat (mk_rec_simps n @{thm nat_rec_0} [Lev_def]);
     val Lev_Sucs = flat (mk_rec_simps n @{thm nat_rec_Suc} [Lev_def]);
 
-    val rv_bind = Binding.suffix_name ("_" ^ rvN) internal_b;
+    val rv_bind = mk_internal_b rvN;
     val rv_name = Binding.name_of rv_bind;
     val rv_def_bind = rpair [] (Thm.def_binding rv_bind);
 
@@ -1328,7 +1334,8 @@
     val rv_Nils = flat (mk_rec_simps n @{thm list_rec_Nil} [rv_def]);
     val rv_Conss = flat (mk_rec_simps n @{thm list_rec_Cons} [rv_def]);
 
-    fun beh_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ behN);
+    val beh_binds = mk_internal_bs behN;
+    fun beh_bind i = nth beh_binds (i - 1);
     val beh_name = Binding.name_of o beh_bind;
     val beh_def_bind = rpair [] o Thm.def_binding o beh_bind;
 
@@ -1636,7 +1643,7 @@
     val ((T_names, (T_glob_infos, T_loc_infos)), lthy) =
       lthy
       |> fold_map4 (fn b => fn mx => fn car_final => fn in_car_final =>
-        typedef (b, params, mx) car_final NONE
+        typedef (Binding.conceal b, params, mx) car_final NONE
           (EVERY' [rtac exI, rtac in_car_final] 1)) bs mixfixes car_finals in_car_final_thms
       |>> apsnd split_list o split_list;
 
@@ -1692,7 +1699,7 @@
 
     fun dtor_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ dtorN);
     val dtor_name = Binding.name_of o dtor_bind;
-    val dtor_def_bind = rpair [] o Thm.def_binding o dtor_bind;
+    val dtor_def_bind = rpair [] o Binding.conceal o Thm.def_binding o dtor_bind;
 
     fun dtor_spec i rep str map_FT dtorT Jz Jz' =
       let
@@ -1744,7 +1751,7 @@
 
     fun unfold_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ dtor_unfoldN);
     val unfold_name = Binding.name_of o unfold_bind;
-    val unfold_def_bind = rpair [] o Thm.def_binding o unfold_bind;
+    val unfold_def_bind = rpair [] o Binding.conceal o Thm.def_binding o unfold_bind;
 
     fun unfold_spec i T AT abs f z z' =
       let
@@ -1865,7 +1872,7 @@
 
     fun ctor_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ ctorN);
     val ctor_name = Binding.name_of o ctor_bind;
-    val ctor_def_bind = rpair [] o Thm.def_binding o ctor_bind;
+    val ctor_def_bind = rpair [] o Binding.conceal o Thm.def_binding o ctor_bind;
 
     fun ctor_spec i ctorT =
       let
@@ -1936,7 +1943,7 @@
 
     fun corec_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ dtor_corecN);
     val corec_name = Binding.name_of o corec_bind;
-    val corec_def_bind = rpair [] o Thm.def_binding o corec_bind;
+    val corec_def_bind = rpair [] o Binding.conceal o Thm.def_binding o corec_bind;
 
     val corec_strs =
       map3 (fn dtor => fn sum_s => fn mapx =>
@@ -2007,7 +2014,7 @@
     val (dtor_corec_unique_thms, dtor_corec_unique_thm) =
       `split_conj_thm (split_conj_prems n
         (mor_UNIV_thm RS iffD2 RS corec_unique_mor_thm)
-        |> Local_Defs.unfold lthy (@{thms o_sum_case o_id id_o o_assoc sum_case_o_inj(1)} @
+        |> Local_Defs.unfold lthy (@{thms o_sum_case o_id id_o id_apply o_assoc sum_case_o_inj(1)} @
            map_id0s @ sym_map_comps) OF replicate n @{thm arg_cong2[of _ _ _ _ sum_case, OF refl]});
 
     val timer = time (timer "corec definitions & thms");
@@ -2096,11 +2103,11 @@
 
     (*register new codatatypes as BNFs*)
     val (timer, Jbnfs, (folded_dtor_map_o_thms, folded_dtor_map_thms), folded_dtor_set_thmss',
-      dtor_set_induct_thms, dtor_Jrel_thms, lthy) =
+      dtor_set_induct_thms, dtor_Jrel_thms, Jbnf_notes, lthy) =
       if m = 0 then
         (timer, replicate n DEADID_bnf,
         map_split (`(mk_pointfree lthy)) (map2 mk_dtor_map_DEADID_thm dtor_inject_thms map_ids),
-        replicate n [], [], map2 mk_dtor_Jrel_DEADID_thm dtor_inject_thms bnfs, lthy)
+        replicate n [], [], map2 mk_dtor_Jrel_DEADID_thm dtor_inject_thms bnfs, [], lthy)
       else let
         val fTs = map2 (curry op -->) passiveAs passiveBs;
         val gTs = map2 (curry op -->) passiveBs passiveCs;
@@ -2734,8 +2741,7 @@
             bs thmss)
       in
        (timer, Jbnfs, (folded_dtor_map_o_thms, folded_dtor_map_thms), folded_dtor_set_thmss',
-         dtor_set_induct_thms, dtor_Jrel_thms,
-         lthy |> Local_Theory.notes (Jbnf_common_notes @ Jbnf_notes) |> snd)
+         dtor_set_induct_thms, dtor_Jrel_thms, Jbnf_common_notes @ Jbnf_notes, lthy)
       end;
 
       val dtor_unfold_o_map_thms = mk_xtor_un_fold_o_map_thms Greatest_FP false m
@@ -2883,7 +2889,11 @@
         |> maps (fn (thmN, thmss) =>
           map2 (fn b => fn thms =>
             ((Binding.qualify true (Binding.name_of b) (Binding.name thmN), []), [(thms, [])]))
-          bs thmss)
+          bs thmss);
+
+    (*FIXME: once the package exports all the necessary high-level characteristic theorems,
+       those should not only be concealed but rather not noted at all*)
+    val maybe_conceal_notes = note_all = false ? map (apfst (apfst Binding.conceal));
   in
     timer;
     ({Ts = Ts, bnfs = Jbnfs, ctors = ctors, dtors = dtors,
@@ -2896,24 +2906,18 @@
       xtor_co_iter_thmss = transpose [dtor_unfold_thms, dtor_corec_thms],
       xtor_co_iter_o_map_thmss = transpose [dtor_unfold_o_map_thms, dtor_corec_o_map_thms],
       rel_xtor_co_induct_thm = Jrel_coinduct_thm},
-     lthy |> Local_Theory.notes (common_notes @ notes) |> snd)
+     lthy |> Local_Theory.notes (maybe_conceal_notes (common_notes @ notes @ Jbnf_notes)) |> snd)
   end;
 
 val _ =
   Outer_Syntax.local_theory @{command_spec "codatatype"} "define BNF-based coinductive datatypes"
     (parse_co_datatype_cmd Greatest_FP construct_gfp);
 
-local
-
 val option_parser = Parse.group (fn () => "option") (Parse.reserved "sequential" >> K true);
 
-in
-
 val _ = Outer_Syntax.local_theory_to_proof @{command_spec "primcorec"}
   "define primitive corecursive functions"
   ((Scan.optional (@{keyword "("} |-- Parse.!!! option_parser --| @{keyword ")"}) false) --
     (Parse.fixes -- Parse_Spec.where_alt_specs) >> uncurry add_primcorec_cmd);
- 
-end
 
 end;
--- a/src/HOL/BNF/Tools/bnf_lfp.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_lfp.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -36,13 +36,17 @@
     val n = length bnfs; (*active*)
     val ks = 1 upto n;
     val m = live - n; (*passive, if 0 don't generate a new BNF*)
+
+    val note_all = Config.get lthy bnf_note_all;
     val b_names = map Binding.name_of bs;
-    val common_name = mk_common_name b_names;
-    val b = Binding.name common_name;
-    val internal_b = Binding.prefix true common_name b;
-    fun qualify_bs internal = map2 (Binding.prefix internal) b_names bs;
-    val internal_bs = qualify_bs true;
-    val external_bs = qualify_bs false;
+    val b_name = mk_common_name b_names;
+    val b = Binding.name b_name;
+    val mk_internal_b = Binding.name #> Binding.prefix true b_name #> Binding.conceal;
+    fun mk_internal_bs name =
+      map (fn b =>
+        Binding.prefix true b_name (Binding.suffix_name ("_" ^ name) b) |> Binding.conceal) bs;
+    val external_bs = map2 (Binding.prefix false) b_names bs
+      |> note_all = false ? map Binding.conceal;
 
     (* TODO: check if m, n, etc., are sane *)
 
@@ -238,7 +242,7 @@
 
     (* algebra *)
 
-    val alg_bind = Binding.suffix_name ("_" ^ algN) internal_b;
+    val alg_bind = mk_internal_b algN;
     val alg_name = Binding.name_of alg_bind;
     val alg_def_bind = (Thm.def_binding alg_bind, []);
 
@@ -325,7 +329,7 @@
 
     (* morphism *)
 
-    val mor_bind = Binding.suffix_name ("_" ^ morN) internal_b;
+    val mor_bind = mk_internal_b morN;
     val mor_name = Binding.name_of mor_bind;
     val mor_def_bind = (Thm.def_binding mor_bind, []);
 
@@ -712,8 +716,9 @@
 
     val timer = time (timer "min_algs definition & thms");
 
-    fun min_alg_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ min_algN);
-    val min_alg_name = Binding.name_of o min_alg_bind;
+    val min_alg_binds = mk_internal_bs min_algN;
+    fun min_alg_bind i = nth min_alg_binds (i - 1);
+    fun min_alg_name i = Binding.name_of (min_alg_bind i);
     val min_alg_def_bind = rpair [] o Thm.def_binding o min_alg_bind;
 
     fun min_alg_spec i =
@@ -791,7 +796,7 @@
     val timer = time (timer "Minimal algebra definition & thms");
 
     val II_repT = HOLogic.mk_prodT (HOLogic.mk_tupleT II_BTs, HOLogic.mk_tupleT II_sTs);
-    val IIT_bind = Binding.suffix_name ("_" ^ IITN) b;
+    val IIT_bind = mk_internal_b IITN;
 
     val ((IIT_name, (IIT_glob_info, IIT_loc_info)), lthy) =
       typedef (IIT_bind, params, NoSyn)
@@ -824,7 +829,8 @@
     val select_Bs = map (mk_nthN n (HOLogic.mk_fst (Rep_IIT $ iidx))) ks;
     val select_ss = map (mk_nthN n (HOLogic.mk_snd (Rep_IIT $ iidx))) ks;
 
-    fun str_init_bind i = nth internal_bs (i - 1) |> Binding.suffix_name ("_" ^ str_initN);
+    val str_init_binds = mk_internal_bs str_initN;
+    fun str_init_bind i = nth str_init_binds (i - 1);
     val str_init_name = Binding.name_of o str_init_bind;
     val str_init_def_bind = rpair [] o Thm.def_binding o str_init_bind;
 
@@ -953,7 +959,8 @@
 
     val ((T_names, (T_glob_infos, T_loc_infos)), lthy) =
       lthy
-      |> fold_map3 (fn b => fn mx => fn car_init => typedef (b, params, mx) car_init NONE
+      |> fold_map3 (fn b => fn mx => fn car_init =>
+        typedef (Binding.conceal b, params, mx) car_init NONE
           (EVERY' [rtac ssubst, rtac @{thm ex_in_conv}, resolve_tac alg_not_empty_thms,
             rtac alg_init_thm] 1)) bs mixfixes car_inits
       |>> apsnd split_list o split_list;
@@ -1016,7 +1023,7 @@
 
     fun ctor_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ ctorN);
     val ctor_name = Binding.name_of o ctor_bind;
-    val ctor_def_bind = rpair [] o Thm.def_binding o ctor_bind;
+    val ctor_def_bind = rpair [] o Binding.conceal o Thm.def_binding o ctor_bind;
 
     fun ctor_spec i abs str map_FT_init x x' =
       let
@@ -1075,7 +1082,7 @@
 
     fun fold_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ ctor_foldN);
     val fold_name = Binding.name_of o fold_bind;
-    val fold_def_bind = rpair [] o Thm.def_binding o fold_bind;
+    val fold_def_bind = rpair [] o Binding.conceal o Thm.def_binding o fold_bind;
 
     fun fold_spec i T AT =
       let
@@ -1165,7 +1172,7 @@
 
     fun dtor_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ dtorN);
     val dtor_name = Binding.name_of o dtor_bind;
-    val dtor_def_bind = rpair [] o Thm.def_binding o dtor_bind;
+    val dtor_def_bind = rpair [] o Binding.conceal o Thm.def_binding o dtor_bind;
 
     fun dtor_spec i FT T =
       let
@@ -1238,7 +1245,7 @@
 
     fun rec_bind i = nth external_bs (i - 1) |> Binding.suffix_name ("_" ^ ctor_recN);
     val rec_name = Binding.name_of o rec_bind;
-    val rec_def_bind = rpair [] o Thm.def_binding o rec_bind;
+    val rec_def_bind = rpair [] o Binding.conceal o Thm.def_binding o rec_bind;
 
     val rec_strs =
       map3 (fn ctor => fn prod_s => fn mapx =>
@@ -1405,11 +1412,11 @@
 
     (*register new datatypes as BNFs*)
     val (timer, Ibnfs, (folded_ctor_map_o_thms, folded_ctor_map_thms), folded_ctor_set_thmss',
-        ctor_Irel_thms, lthy) =
+        ctor_Irel_thms, Ibnf_notes, lthy) =
       if m = 0 then
         (timer, replicate n DEADID_bnf,
         map_split (`(mk_pointfree lthy)) (map2 mk_ctor_map_DEADID_thm ctor_inject_thms map_ids),
-        replicate n [], map2 mk_ctor_Irel_DEADID_thm ctor_inject_thms bnfs, lthy)
+        replicate n [], map2 mk_ctor_Irel_DEADID_thm ctor_inject_thms bnfs, [], lthy)
       else let
         val fTs = map2 (curry op -->) passiveAs passiveBs;
         val f1Ts = map2 (curry op -->) passiveAs passiveYs;
@@ -1809,7 +1816,7 @@
             bs thmss)
       in
         (timer, Ibnfs, (folded_ctor_map_o_thms, folded_ctor_map_thms), folded_ctor_set_thmss',
-          ctor_Irel_thms, lthy |> Local_Theory.notes (Ibnf_common_notes @ Ibnf_notes) |> snd)
+          ctor_Irel_thms, Ibnf_common_notes @ Ibnf_notes, lthy)
       end;
 
       val ctor_fold_o_map_thms = mk_xtor_un_fold_o_map_thms Least_FP false m ctor_fold_unique_thm
@@ -1858,7 +1865,11 @@
         |> maps (fn (thmN, thmss) =>
           map2 (fn b => fn thms =>
             ((Binding.qualify true (Binding.name_of b) (Binding.name thmN), []), [(thms, [])]))
-          bs thmss)
+          bs thmss);
+
+    (*FIXME: once the package exports all the necessary high-level characteristic theorems,
+       those should not only be concealed but rather not noted at all*)
+    val maybe_conceal_notes = note_all = false ? map (apfst (apfst Binding.conceal));
   in
     timer;
     ({Ts = Ts, bnfs = Ibnfs, ctors = ctors, dtors = dtors, xtor_co_iterss = transpose [folds, recs],
@@ -1869,7 +1880,7 @@
       xtor_co_iter_thmss = transpose [ctor_fold_thms, ctor_rec_thms],
       xtor_co_iter_o_map_thmss = transpose [ctor_fold_o_map_thms, ctor_rec_o_map_thms],
       rel_xtor_co_induct_thm = Irel_induct_thm},
-     lthy |> Local_Theory.notes (common_notes @ notes) |> snd)
+     lthy |> Local_Theory.notes (maybe_conceal_notes (common_notes @ notes @ Ibnf_notes)) |> snd)
   end;
 
 val _ =
--- a/src/HOL/BNF/Tools/bnf_tactics.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/BNF/Tools/bnf_tactics.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -61,8 +61,7 @@
   |> pairself (dest_comb #> apsnd (dest_comb #> fst) #> HOLogic.mk_comp)
   |> mk_Trueprop_eq
   |> (fn goal => Goal.prove_sorry ctxt [] [] goal
-     (fn {context=ctxt, prems = _} =>
-       unfold_thms_tac ctxt [@{thm o_def}, mk_sym thm] THEN rtac refl 1))
+    (K (rtac ext 1 THEN unfold_thms_tac ctxt [o_apply, mk_sym thm] THEN rtac refl 1)))
   |> Thm.close_derivation;
 
 
@@ -102,7 +101,7 @@
   rtac (unfold_thms ctxt (IJrel_defs @ IJsrel_defs @
     @{thms Collect_pair_mem_eq mem_Collect_eq fst_conv snd_conv}) dtor_srel RS trans) 1 THEN
   unfold_thms_tac ctxt (srel_def ::
-    @{thms Collect_fst_snd_mem_eq mem_Collect_eq pair_mem_Collect_split fst_conv snd_conv
+    @{thms pair_collapse Collect_mem_eq mem_Collect_eq prod.cases fst_conv snd_conv
       split_conv}) THEN
   rtac refl 1;
 
--- a/src/HOL/Library/Convex.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Library/Convex.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -46,6 +46,12 @@
 lemma convex_Int: "convex s \<Longrightarrow> convex t \<Longrightarrow> convex (s \<inter> t)"
   unfolding convex_def by auto
 
+lemma convex_INT: "\<forall>i\<in>A. convex (B i) \<Longrightarrow> convex (\<Inter>i\<in>A. B i)"
+  unfolding convex_def by auto
+
+lemma convex_Times: "convex s \<Longrightarrow> convex t \<Longrightarrow> convex (s \<times> t)"
+  unfolding convex_def by auto
+
 lemma convex_halfspace_le: "convex {x. inner a x \<le> b}"
   unfolding convex_def
   by (auto simp: inner_add intro!: convex_bound_le)
--- a/src/HOL/Library/Set_Algebras.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Library/Set_Algebras.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -90,6 +90,11 @@
 lemma set_plus_intro [intro]: "a : C ==> b : D ==> a + b : C + D"
   by (auto simp add: set_plus_def)
 
+lemma set_plus_elim:
+  assumes "x \<in> A + B"
+  obtains a b where "x = a + b" and "a \<in> A" and "b \<in> B"
+  using assms unfolding set_plus_def by fast
+
 lemma set_plus_intro2 [intro]: "b : C ==> a + b : a +o C"
   by (auto simp add: elt_set_plus_def)
 
@@ -201,6 +206,11 @@
 lemma set_times_intro [intro]: "a : C ==> b : D ==> a * b : C * D"
   by (auto simp add: set_times_def)
 
+lemma set_times_elim:
+  assumes "x \<in> A * B"
+  obtains a b where "x = a * b" and "a \<in> A" and "b \<in> B"
+  using assms unfolding set_times_def by fast
+
 lemma set_times_intro2 [intro!]: "b : C ==> a * b : a *o C"
   by (auto simp add: elt_set_times_def)
 
@@ -321,10 +331,20 @@
     - a : (- 1) *o C"
   by (auto simp add: elt_set_times_def)
 
-lemma set_plus_image:
-  fixes S T :: "'n::semigroup_add set" shows "S + T = (\<lambda>(x, y). x + y) ` (S \<times> T)"
+lemma set_plus_image: "S + T = (\<lambda>(x, y). x + y) ` (S \<times> T)"
   unfolding set_plus_def by (fastforce simp: image_iff)
 
+lemma set_times_image: "S * T = (\<lambda>(x, y). x * y) ` (S \<times> T)"
+  unfolding set_times_def by (fastforce simp: image_iff)
+
+lemma finite_set_plus:
+  assumes "finite s" and "finite t" shows "finite (s + t)"
+  using assms unfolding set_plus_image by simp
+
+lemma finite_set_times:
+  assumes "finite s" and "finite t" shows "finite (s * t)"
+  using assms unfolding set_times_image by simp
+
 lemma set_setsum_alt:
   assumes fin: "finite I"
   shows "setsum S I = {setsum s I |s. \<forall>i\<in>I. s i \<in> S i}"
--- a/src/HOL/Limits.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Limits.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -185,17 +185,19 @@
 done
 
 text{*alternative formulation for boundedness*}
-lemma Bseq_iff3: "Bseq X = (\<exists>k > 0. \<exists>N. \<forall>n. norm(X(n) + -X(N)) \<le> k)"
-apply safe
-apply (simp add: Bseq_def, safe)
-apply (rule_tac x = "K + norm (X N)" in exI)
-apply auto
-apply (erule order_less_le_trans, simp)
-apply (rule_tac x = N in exI, safe)
-apply (drule_tac x = n in spec)
-apply (rule order_trans [OF norm_triangle_ineq], simp)
-apply (auto simp add: Bseq_iff2)
-done
+lemma Bseq_iff3:
+  "Bseq X \<longleftrightarrow> (\<exists>k>0. \<exists>N. \<forall>n. norm (X n + - X N) \<le> k)" (is "?P \<longleftrightarrow> ?Q")
+proof
+  assume ?P
+  then obtain K
+    where *: "0 < K" and **: "\<And>n. norm (X n) \<le> K" by (auto simp add: Bseq_def)
+  from * have "0 < K + norm (X 0)" by (rule order_less_le_trans) simp
+  moreover from ** have "\<forall>n. norm (X n + - X 0) \<le> K + norm (X 0)"
+    by (auto intro: order_trans norm_triangle_ineq)
+  ultimately show ?Q by blast
+next
+  assume ?Q then show ?P by (auto simp add: Bseq_iff2)
+qed
 
 lemma BseqI2: "(\<forall>n. k \<le> f n & f n \<le> (K::real)) ==> Bseq f"
 apply (simp add: Bseq_def)
--- a/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer_filter.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Mirabelle/Tools/mirabelle_sledgehammer_filter.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -12,11 +12,11 @@
 
 fun extract_relevance_fudge args
       {local_const_multiplier, worse_irrel_freq, higher_order_irrel_weight,
-       abs_rel_weight, abs_irrel_weight, skolem_irrel_weight,
-       theory_const_rel_weight, theory_const_irrel_weight,
-       chained_const_irrel_weight, intro_bonus, elim_bonus, simp_bonus,
-       local_bonus, assum_bonus, chained_bonus, max_imperfect, max_imperfect_exp,
-       threshold_divisor, ridiculous_threshold} =
+       abs_rel_weight, abs_irrel_weight, theory_const_rel_weight,
+       theory_const_irrel_weight, chained_const_irrel_weight, intro_bonus,
+       elim_bonus, simp_bonus, local_bonus, assum_bonus, chained_bonus,
+       max_imperfect, max_imperfect_exp, threshold_divisor,
+       ridiculous_threshold} =
   {local_const_multiplier =
        get args "local_const_multiplier" local_const_multiplier,
    worse_irrel_freq = get args "worse_irrel_freq" worse_irrel_freq,
@@ -24,7 +24,6 @@
        get args "higher_order_irrel_weight" higher_order_irrel_weight,
    abs_rel_weight = get args "abs_rel_weight" abs_rel_weight,
    abs_irrel_weight = get args "abs_irrel_weight" abs_irrel_weight,
-   skolem_irrel_weight = get args "skolem_irrel_weight" skolem_irrel_weight,
    theory_const_rel_weight =
        get args "theory_const_rel_weight" theory_const_rel_weight,
    theory_const_irrel_weight =
--- a/src/HOL/Multivariate_Analysis/Cartesian_Euclidean_Space.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Cartesian_Euclidean_Space.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -291,7 +291,7 @@
   by (metis component_le_norm_cart order_trans)
 
 lemma norm_bound_component_lt_cart: "norm x < e ==> \<bar>x$i\<bar> < e"
-  by (metis component_le_norm_cart basic_trans_rules(21))
+  by (metis component_le_norm_cart le_less_trans)
 
 lemma norm_le_l1_cart: "norm x <= setsum(\<lambda>i. \<bar>x$i\<bar>) UNIV"
   by (simp add: norm_vec_def setL2_le_setsum)
@@ -322,7 +322,6 @@
   shows "setsum (\<lambda>x. c *s f x) S = c *s setsum f S"
   by (simp add: vec_eq_iff setsum_right_distrib)
 
-(* TODO: use setsum_norm_allsubsets_bound *)
 lemma setsum_norm_allsubsets_bound_cart:
   fixes f:: "'a \<Rightarrow> real ^'n"
   assumes fP: "finite P" and fPs: "\<And>Q. Q \<subseteq> P \<Longrightarrow> norm (setsum f Q) \<le> e"
@@ -500,7 +499,7 @@
   where "matrix f = (\<chi> i j. (f(axis j 1))$i)"
 
 lemma matrix_vector_mul_linear: "linear(\<lambda>x. A *v (x::real ^ _))"
-  by (simp add: linear_def matrix_vector_mult_def vec_eq_iff
+  by (simp add: linear_iff matrix_vector_mult_def vec_eq_iff
       field_simps setsum_right_distrib setsum_addf)
 
 lemma matrix_works:
--- a/src/HOL/Multivariate_Analysis/Convex_Euclidean_Space.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Convex_Euclidean_Space.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -18,7 +18,7 @@
 (* ------------------------------------------------------------------------- *)
 
 lemma linear_scaleR: "linear (\<lambda>x. scaleR c x)"
-  by (simp add: linear_def scaleR_add_right)
+  by (simp add: linear_iff scaleR_add_right)
 
 lemma injective_scaleR: "c \<noteq> 0 \<Longrightarrow> inj (\<lambda>x::'a::real_vector. scaleR c x)"
   by (simp add: inj_on_def)
@@ -303,13 +303,13 @@
 qed
 
 lemma fst_linear: "linear fst"
-  unfolding linear_def by (simp add: algebra_simps)
+  unfolding linear_iff by (simp add: algebra_simps)
 
 lemma snd_linear: "linear snd"
-  unfolding linear_def by (simp add: algebra_simps)
+  unfolding linear_iff by (simp add: algebra_simps)
 
 lemma fst_snd_linear: "linear (%(x,y). x + y)"
-  unfolding linear_def by (simp add: algebra_simps)
+  unfolding linear_iff by (simp add: algebra_simps)
 
 lemma scaleR_2:
   fixes x :: "'a::real_vector"
@@ -8098,7 +8098,7 @@
       then obtain e where e: "e > 1" "(1 - e) *\<^sub>R f x + e *\<^sub>R f z \<in> S"
         using convex_rel_interior_iff[of S "f z"] z assms `S \<noteq> {}` by auto
       moreover have "(1 - e) *\<^sub>R f x + e *\<^sub>R f z = f ((1 - e) *\<^sub>R x + e *\<^sub>R z)"
-        using `linear f` by (simp add: linear_def)
+        using `linear f` by (simp add: linear_iff)
       ultimately have "\<exists>e. e > 1 \<and> (1 - e) *\<^sub>R x + e *\<^sub>R z \<in> f -` S"
         using e by auto
     }
@@ -8116,7 +8116,7 @@
       then obtain e where e: "e > 1" "(1 - e) *\<^sub>R y + e *\<^sub>R z \<in> f -` S"
         using convex_rel_interior_iff[of "f -` S" z] z conv by auto
       moreover have "(1 - e) *\<^sub>R x + e *\<^sub>R f z = f ((1 - e) *\<^sub>R y + e *\<^sub>R z)"
-        using `linear f` y by (simp add: linear_def)
+        using `linear f` y by (simp add: linear_iff)
       ultimately have "\<exists>e. e > 1 \<and> (1 - e) *\<^sub>R x + e *\<^sub>R f z \<in> S \<inter> range f"
         using e by auto
     }
--- a/src/HOL/Multivariate_Analysis/Derivative.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Derivative.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -14,7 +14,7 @@
   assume "bounded_linear f"
   then interpret f: bounded_linear f .
   show "linear f"
-    by (simp add: f.add f.scaleR linear_def)
+    by (simp add: f.add f.scaleR linear_iff)
 qed
 
 lemma netlimit_at_vector: (* TODO: move *)
@@ -1278,7 +1278,7 @@
       qed
     qed
     show "bounded_linear (g' x)"
-      unfolding linear_linear linear_def
+      unfolding linear_linear linear_iff
       apply(rule,rule,rule) defer
     proof(rule,rule)
       fix x' y z::"'m" and c::real
@@ -1286,12 +1286,12 @@
       show "g' x (c *\<^sub>R x') = c *\<^sub>R g' x x'"
         apply(rule tendsto_unique[OF trivial_limit_sequentially])
         apply(rule lem3[rule_format])
-        unfolding lin[unfolded bounded_linear_def bounded_linear_axioms_def,THEN conjunct2,THEN conjunct1,rule_format]
+        unfolding lin[THEN bounded_linear_imp_linear, THEN linear_cmul]
         apply (intro tendsto_intros) by(rule lem3[rule_format])
       show "g' x (y + z) = g' x y + g' x z"
         apply(rule tendsto_unique[OF trivial_limit_sequentially])
         apply(rule lem3[rule_format])
-        unfolding lin[unfolded bounded_linear_def additive_def,THEN conjunct1,rule_format]
+        unfolding lin[THEN bounded_linear_imp_linear, THEN linear_add]
         apply(rule tendsto_add) by(rule lem3[rule_format])+
     qed
     show "\<forall>e>0. \<exists>d>0. \<forall>y\<in>s. norm (y - x) < d \<longrightarrow> norm (g y - g x - g' x (y - x)) \<le> e * norm (y - x)"
--- a/src/HOL/Multivariate_Analysis/Determinants.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Determinants.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -1080,7 +1080,7 @@
       unfolding th0 fd[rule_format] by (simp add: power2_eq_square field_simps)}
   note fc = this
   show ?thesis
-    unfolding linear_def vector_eq[where 'a="real^'n"] scalar_mult_eq_scaleR
+    unfolding linear_iff vector_eq[where 'a="real^'n"] scalar_mult_eq_scaleR
     by (simp add: inner_add fc field_simps)
 qed
 
--- a/src/HOL/Multivariate_Analysis/Fashoda.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Fashoda.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -1,7 +1,8 @@
-(* Author:                     John Harrison
-   Translation from HOL light: Robert Himmelmann, TU Muenchen *)
+(*  Author:     John Harrison
+    Author:     Robert Himmelmann, TU Muenchen (translation from HOL light)
+*)
 
-header {* Fashoda meet theorem. *}
+header {* Fashoda meet theorem *}
 
 theory Fashoda
 imports Brouwer_Fixpoint Path_Connected Cartesian_Euclidean_Space
@@ -15,131 +16,312 @@
 lemma axis_in_Basis: "a \<in> Basis \<Longrightarrow> axis i a \<in> Basis"
   by (auto simp add: Basis_vec_def axis_eq_axis)
 
-subsection {*Fashoda meet theorem. *}
+
+subsection {* Fashoda meet theorem *}
 
-lemma infnorm_2: "infnorm (x::real^2) = max (abs(x$1)) (abs(x$2))"
-  unfolding infnorm_cart UNIV_2 apply(rule cSup_eq) by auto
+lemma infnorm_2:
+  fixes x :: "real^2"
+  shows "infnorm x = max (abs (x$1)) (abs (x$2))"
+  unfolding infnorm_cart UNIV_2 by (rule cSup_eq) auto
 
-lemma infnorm_eq_1_2: "infnorm (x::real^2) = 1 \<longleftrightarrow>
-        (abs(x$1) \<le> 1 \<and> abs(x$2) \<le> 1 \<and> (x$1 = -1 \<or> x$1 = 1 \<or> x$2 = -1 \<or> x$2 = 1))"
+lemma infnorm_eq_1_2:
+  fixes x :: "real^2"
+  shows "infnorm x = 1 \<longleftrightarrow>
+    abs (x$1) \<le> 1 \<and> abs (x$2) \<le> 1 \<and> (x$1 = -1 \<or> x$1 = 1 \<or> x$2 = -1 \<or> x$2 = 1)"
   unfolding infnorm_2 by auto
 
-lemma infnorm_eq_1_imp: assumes "infnorm (x::real^2) = 1" shows "abs(x$1) \<le> 1" "abs(x$2) \<le> 1"
+lemma infnorm_eq_1_imp:
+  fixes x :: "real^2"
+  assumes "infnorm x = 1"
+  shows "abs (x$1) \<le> 1" and "abs (x$2) \<le> 1"
   using assms unfolding infnorm_eq_1_2 by auto
 
-lemma fashoda_unit: fixes f g::"real \<Rightarrow> real^2"
-  assumes "f ` {- 1..1} \<subseteq> {- 1..1}" "g ` {- 1..1} \<subseteq> {- 1..1}"
-  "continuous_on {- 1..1} f"  "continuous_on {- 1..1} g"
-  "f (- 1)$1 = - 1" "f 1$1 = 1" "g (- 1) $2 = -1" "g 1 $2 = 1"
-  shows "\<exists>s\<in>{- 1..1}. \<exists>t\<in>{- 1..1}. f s = g t" proof(rule ccontr)
-  case goal1 note as = this[unfolded bex_simps,rule_format]
+lemma fashoda_unit:
+  fixes f g :: "real \<Rightarrow> real^2"
+  assumes "f ` {- 1..1} \<subseteq> {- 1..1}"
+    and "g ` {- 1..1} \<subseteq> {- 1..1}"
+    and "continuous_on {- 1..1} f"
+    and "continuous_on {- 1..1} g"
+    and "f (- 1)$1 = - 1"
+    and "f 1$1 = 1" "g (- 1) $2 = -1"
+    and "g 1 $2 = 1"
+  shows "\<exists>s\<in>{- 1..1}. \<exists>t\<in>{- 1..1}. f s = g t"
+proof (rule ccontr)
+  assume "\<not> ?thesis"
+  note as = this[unfolded bex_simps,rule_format]
   def sqprojection \<equiv> "\<lambda>z::real^2. (inverse (infnorm z)) *\<^sub>R z" 
-  def negatex \<equiv> "\<lambda>x::real^2. (vector [-(x$1), x$2])::real^2" 
-  have lem1:"\<forall>z::real^2. infnorm(negatex z) = infnorm z"
+  def negatex \<equiv> "\<lambda>x::real^2. (vector [-(x$1), x$2])::real^2"
+  have lem1: "\<forall>z::real^2. infnorm (negatex z) = infnorm z"
     unfolding negatex_def infnorm_2 vector_2 by auto
-  have lem2:"\<forall>z. z\<noteq>0 \<longrightarrow> infnorm(sqprojection z) = 1" unfolding sqprojection_def
-    unfolding infnorm_mul[unfolded scalar_mult_eq_scaleR] unfolding abs_inverse real_abs_infnorm
-    apply(subst infnorm_eq_0[THEN sym]) by auto
-  let ?F = "(\<lambda>w::real^2. (f \<circ> (\<lambda>x. x$1)) w - (g \<circ> (\<lambda>x. x$2)) w)"
-  have *:"\<And>i. (\<lambda>x::real^2. x $ i) ` {- 1..1} = {- 1..1::real}"
-    apply(rule set_eqI) unfolding image_iff Bex_def mem_interval_cart apply rule defer 
-    apply(rule_tac x="vec x" in exI) by auto
-  { fix x assume "x \<in> (\<lambda>w. (f \<circ> (\<lambda>x. x $ 1)) w - (g \<circ> (\<lambda>x. x $ 2)) w) ` {- 1..1::real^2}"
+  have lem2: "\<forall>z. z \<noteq> 0 \<longrightarrow> infnorm (sqprojection z) = 1"
+    unfolding sqprojection_def
+    unfolding infnorm_mul[unfolded scalar_mult_eq_scaleR]
+    unfolding abs_inverse real_abs_infnorm
+    apply (subst infnorm_eq_0[THEN sym])
+    apply auto
+    done
+  let ?F = "\<lambda>w::real^2. (f \<circ> (\<lambda>x. x$1)) w - (g \<circ> (\<lambda>x. x$2)) w"
+  have *: "\<And>i. (\<lambda>x::real^2. x $ i) ` {- 1..1} = {- 1..1::real}"
+    apply (rule set_eqI)
+    unfolding image_iff Bex_def mem_interval_cart
+    apply rule
+    defer
+    apply (rule_tac x="vec x" in exI)
+    apply auto
+    done
+  {
+    fix x
+    assume "x \<in> (\<lambda>w. (f \<circ> (\<lambda>x. x $ 1)) w - (g \<circ> (\<lambda>x. x $ 2)) w) ` {- 1..1::real^2}"
     then guess w unfolding image_iff .. note w = this
-    hence "x \<noteq> 0" using as[of "w$1" "w$2"] unfolding mem_interval_cart by auto} note x0=this
-  have 21:"\<And>i::2. i\<noteq>1 \<Longrightarrow> i=2" using UNIV_2 by auto
-  have 1:"{- 1<..<1::real^2} \<noteq> {}" unfolding interval_eq_empty_cart by auto
-  have 2:"continuous_on {- 1..1} (negatex \<circ> sqprojection \<circ> ?F)"
-    apply(intro continuous_on_intros continuous_on_component)
-    unfolding * apply(rule assms)+
-    apply(subst sqprojection_def)
-    apply(intro continuous_on_intros)
-    apply(simp add: infnorm_eq_0 x0)
-    apply(rule linear_continuous_on)
-  proof-
-    show "bounded_linear negatex" apply(rule bounded_linearI') unfolding vec_eq_iff proof(rule_tac[!] allI) fix i::2 and x y::"real^2" and c::real
-      show "negatex (x + y) $ i = (negatex x + negatex y) $ i" "negatex (c *\<^sub>R x) $ i = (c *\<^sub>R negatex x) $ i"
-        apply-apply(case_tac[!] "i\<noteq>1") prefer 3 apply(drule_tac[1-2] 21) 
-        unfolding negatex_def by(auto simp add:vector_2 ) qed
+    then have "x \<noteq> 0"
+      using as[of "w$1" "w$2"]
+      unfolding mem_interval_cart
+      by auto
+  } note x0 = this
+  have 21: "\<And>i::2. i \<noteq> 1 \<Longrightarrow> i = 2"
+    using UNIV_2 by auto
+  have 1: "{- 1<..<1::real^2} \<noteq> {}"
+    unfolding interval_eq_empty_cart by auto
+  have 2: "continuous_on {- 1..1} (negatex \<circ> sqprojection \<circ> ?F)"
+    apply (intro continuous_on_intros continuous_on_component)
+    unfolding *
+    apply (rule assms)+
+    apply (subst sqprojection_def)
+    apply (intro continuous_on_intros)
+    apply (simp add: infnorm_eq_0 x0)
+    apply (rule linear_continuous_on)
+  proof -
+    show "bounded_linear negatex"
+      apply (rule bounded_linearI')
+      unfolding vec_eq_iff
+    proof (rule_tac[!] allI)
+      fix i :: 2
+      fix x y :: "real^2"
+      fix c :: real
+      show "negatex (x + y) $ i =
+        (negatex x + negatex y) $ i" "negatex (c *\<^sub>R x) $ i = (c *\<^sub>R negatex x) $ i"
+        apply -
+        apply (case_tac[!] "i\<noteq>1")
+        prefer 3
+        apply (drule_tac[1-2] 21) 
+        unfolding negatex_def
+        apply (auto simp add:vector_2)
+        done
+    qed
   qed
-  have 3:"(negatex \<circ> sqprojection \<circ> ?F) ` {- 1..1} \<subseteq> {- 1..1}" unfolding subset_eq apply rule proof-
-    case goal1 then guess y unfolding image_iff .. note y=this have "?F y \<noteq> 0" apply(rule x0) using y(1) by auto
-    hence *:"infnorm (sqprojection (?F y)) = 1" unfolding y o_def apply- by(rule lem2[rule_format])
-    have "infnorm x = 1" unfolding *[THEN sym] y o_def by(rule lem1[rule_format])
-    thus "x\<in>{- 1..1}" unfolding mem_interval_cart infnorm_2 apply- apply rule
-    proof-case goal1 thus ?case apply(cases "i=1") defer apply(drule 21) by auto qed qed
-  guess x apply(rule brouwer_weak[of "{- 1..1::real^2}" "negatex \<circ> sqprojection \<circ> ?F"])
-    apply(rule compact_interval convex_interval)+ unfolding interior_closed_interval
-    apply(rule 1 2 3)+ . note x=this
-  have "?F x \<noteq> 0" apply(rule x0) using x(1) by auto
-  hence *:"infnorm (sqprojection (?F x)) = 1" unfolding o_def by(rule lem2[rule_format])
-  have nx:"infnorm x = 1" apply(subst x(2)[THEN sym]) unfolding *[THEN sym] o_def by(rule lem1[rule_format])
-  have "\<forall>x i. x \<noteq> 0 \<longrightarrow> (0 < (sqprojection x)$i \<longleftrightarrow> 0 < x$i)"    "\<forall>x i. x \<noteq> 0 \<longrightarrow> ((sqprojection x)$i < 0 \<longleftrightarrow> x$i < 0)"
-    apply- apply(rule_tac[!] allI impI)+ proof- fix x::"real^2" and i::2 assume x:"x\<noteq>0"
-    have "inverse (infnorm x) > 0" using x[unfolded infnorm_pos_lt[THEN sym]] by auto
-    thus "(0 < sqprojection x $ i) = (0 < x $ i)"   "(sqprojection x $ i < 0) = (x $ i < 0)"
+  have 3: "(negatex \<circ> sqprojection \<circ> ?F) ` {- 1..1} \<subseteq> {- 1..1}"
+    unfolding subset_eq
+    apply rule
+  proof -
+    case goal1
+    then guess y unfolding image_iff .. note y=this
+    have "?F y \<noteq> 0"
+      apply (rule x0)
+      using y(1)
+      apply auto
+      done
+    then have *: "infnorm (sqprojection (?F y)) = 1"
+      unfolding y o_def by - (rule lem2[rule_format])
+    have "infnorm x = 1"
+      unfolding *[THEN sym] y o_def by (rule lem1[rule_format])
+    then show "x \<in> {- 1..1}"
+      unfolding mem_interval_cart infnorm_2
+      apply -
+      apply rule
+    proof -
+      case goal1
+      then show ?case
+        apply (cases "i = 1")
+        defer
+        apply (drule 21)
+        apply auto
+        done
+    qed
+  qed
+  guess x
+    apply (rule brouwer_weak[of "{- 1..1::real^2}" "negatex \<circ> sqprojection \<circ> ?F"])
+    apply (rule compact_interval convex_interval)+ unfolding interior_closed_interval
+    apply (rule 1 2 3)+
+    done
+  note x=this
+  have "?F x \<noteq> 0"
+    apply (rule x0)
+    using x(1)
+    apply auto
+    done
+  then have *: "infnorm (sqprojection (?F x)) = 1"
+    unfolding o_def by (rule lem2[rule_format])
+  have nx: "infnorm x = 1"
+    apply (subst x(2)[THEN sym])
+    unfolding *[THEN sym] o_def
+    apply (rule lem1[rule_format])
+    done
+  have "\<forall>x i. x \<noteq> 0 \<longrightarrow> (0 < (sqprojection x)$i \<longleftrightarrow> 0 < x$i)"
+    and "\<forall>x i. x \<noteq> 0 \<longrightarrow> ((sqprojection x)$i < 0 \<longleftrightarrow> x$i < 0)"
+    apply -
+    apply (rule_tac[!] allI impI)+
+  proof -
+    fix x :: "real^2"
+    fix i :: 2
+    assume x: "x \<noteq> 0"
+    have "inverse (infnorm x) > 0"
+      using x[unfolded infnorm_pos_lt[THEN sym]] by auto
+    then show "(0 < sqprojection x $ i) = (0 < x $ i)"
+      and "(sqprojection x $ i < 0) = (x $ i < 0)"
       unfolding sqprojection_def vector_component_simps vector_scaleR_component real_scaleR_def
-      unfolding zero_less_mult_iff mult_less_0_iff by(auto simp add:field_simps) qed
+      unfolding zero_less_mult_iff mult_less_0_iff
+      by (auto simp add: field_simps)
+  qed
   note lem3 = this[rule_format]
-  have x1:"x $ 1 \<in> {- 1..1::real}" "x $ 2 \<in> {- 1..1::real}" using x(1) unfolding mem_interval_cart by auto
-  hence nz:"f (x $ 1) - g (x $ 2) \<noteq> 0" unfolding right_minus_eq apply-apply(rule as) by auto
-  have "x $ 1 = -1 \<or> x $ 1 = 1 \<or> x $ 2 = -1 \<or> x $ 2 = 1" using nx unfolding infnorm_eq_1_2 by auto 
-  thus False proof- fix P Q R S 
-    presume "P \<or> Q \<or> R \<or> S" "P\<Longrightarrow>False" "Q\<Longrightarrow>False" "R\<Longrightarrow>False" "S\<Longrightarrow>False" thus False by auto
-  next assume as:"x$1 = 1"
-    hence *:"f (x $ 1) $ 1 = 1" using assms(6) by auto
+  have x1: "x $ 1 \<in> {- 1..1::real}" "x $ 2 \<in> {- 1..1::real}"
+    using x(1) unfolding mem_interval_cart by auto
+  then have nz: "f (x $ 1) - g (x $ 2) \<noteq> 0"
+    unfolding right_minus_eq
+    apply -
+    apply (rule as)
+    apply auto
+    done
+  have "x $ 1 = -1 \<or> x $ 1 = 1 \<or> x $ 2 = -1 \<or> x $ 2 = 1"
+    using nx unfolding infnorm_eq_1_2 by auto 
+  then show False
+  proof -
+    fix P Q R S 
+    presume "P \<or> Q \<or> R \<or> S"
+      and "P \<Longrightarrow> False"
+      and "Q \<Longrightarrow> False"
+      and "R \<Longrightarrow> False"
+      and "S \<Longrightarrow> False"
+    then show False by auto
+  next
+    assume as: "x$1 = 1"
+    then have *: "f (x $ 1) $ 1 = 1"
+      using assms(6) by auto
     have "sqprojection (f (x$1) - g (x$2)) $ 1 < 0"
       using x(2)[unfolded o_def vec_eq_iff,THEN spec[where x=1]]
-      unfolding as negatex_def vector_2 by auto moreover
-    from x1 have "g (x $ 2) \<in> {- 1..1}" apply-apply(rule assms(2)[unfolded subset_eq,rule_format]) by auto
-    ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval_cart 
-      apply(erule_tac x=1 in allE) by auto 
-  next assume as:"x$1 = -1"
-    hence *:"f (x $ 1) $ 1 = - 1" using assms(5) by auto
+      unfolding as negatex_def vector_2
+      by auto
+    moreover
+    from x1 have "g (x $ 2) \<in> {- 1..1}"
+      apply -
+      apply (rule assms(2)[unfolded subset_eq,rule_format])
+      apply auto
+      done
+    ultimately show False
+      unfolding lem3[OF nz] vector_component_simps * mem_interval_cart 
+      apply (erule_tac x=1 in allE)
+      apply auto
+      done
+  next
+    assume as: "x$1 = -1"
+    then have *: "f (x $ 1) $ 1 = - 1"
+      using assms(5) by auto
     have "sqprojection (f (x$1) - g (x$2)) $ 1 > 0"
       using x(2)[unfolded o_def vec_eq_iff,THEN spec[where x=1]]
-      unfolding as negatex_def vector_2 by auto moreover
-    from x1 have "g (x $ 2) \<in> {- 1..1}" apply-apply(rule assms(2)[unfolded subset_eq,rule_format]) by auto
-    ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval_cart 
-      apply(erule_tac x=1 in allE) by auto
-  next assume as:"x$2 = 1"
-    hence *:"g (x $ 2) $ 2 = 1" using assms(8) by auto
+      unfolding as negatex_def vector_2
+      by auto
+    moreover
+    from x1 have "g (x $ 2) \<in> {- 1..1}"
+      apply -
+      apply (rule assms(2)[unfolded subset_eq,rule_format])
+      apply auto
+      done
+    ultimately show False
+      unfolding lem3[OF nz] vector_component_simps * mem_interval_cart 
+      apply (erule_tac x=1 in allE)
+      apply auto
+      done
+  next
+    assume as: "x$2 = 1"
+    then have *: "g (x $ 2) $ 2 = 1"
+      using assms(8) by auto
     have "sqprojection (f (x$1) - g (x$2)) $ 2 > 0"
       using x(2)[unfolded o_def vec_eq_iff,THEN spec[where x=2]]
-      unfolding as negatex_def vector_2 by auto moreover
-    from x1 have "f (x $ 1) \<in> {- 1..1}" apply-apply(rule assms(1)[unfolded subset_eq,rule_format]) by auto
-    ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval_cart 
-     apply(erule_tac x=2 in allE) by auto
- next assume as:"x$2 = -1"
-    hence *:"g (x $ 2) $ 2 = - 1" using assms(7) by auto
+      unfolding as negatex_def vector_2
+      by auto
+    moreover
+    from x1 have "f (x $ 1) \<in> {- 1..1}"
+      apply -
+      apply (rule assms(1)[unfolded subset_eq,rule_format])
+      apply auto
+      done
+    ultimately show False
+      unfolding lem3[OF nz] vector_component_simps * mem_interval_cart
+      apply (erule_tac x=2 in allE)
+      apply auto
+      done
+  next
+    assume as: "x$2 = -1"
+    then have *: "g (x $ 2) $ 2 = - 1"
+      using assms(7) by auto
     have "sqprojection (f (x$1) - g (x$2)) $ 2 < 0"
       using x(2)[unfolded o_def vec_eq_iff,THEN spec[where x=2]]
-      unfolding as negatex_def vector_2 by auto moreover
-    from x1 have "f (x $ 1) \<in> {- 1..1}" apply-apply(rule assms(1)[unfolded subset_eq,rule_format]) by auto
-    ultimately show False unfolding lem3[OF nz] vector_component_simps * mem_interval_cart 
-      apply(erule_tac x=2 in allE) by auto qed(auto) qed
+      unfolding as negatex_def vector_2
+      by auto
+    moreover
+    from x1 have "f (x $ 1) \<in> {- 1..1}"
+      apply -
+      apply (rule assms(1)[unfolded subset_eq,rule_format])
+      apply auto
+      done
+    ultimately show False
+      unfolding lem3[OF nz] vector_component_simps * mem_interval_cart
+      apply (erule_tac x=2 in allE)
+      apply auto
+      done
+  qed auto
+qed
 
-lemma fashoda_unit_path: fixes f ::"real \<Rightarrow> real^2" and g ::"real \<Rightarrow> real^2"
-  assumes "path f" "path g" "path_image f \<subseteq> {- 1..1}" "path_image g \<subseteq> {- 1..1}"
-  "(pathstart f)$1 = -1" "(pathfinish f)$1 = 1"  "(pathstart g)$2 = -1" "(pathfinish g)$2 = 1"
-  obtains z where "z \<in> path_image f" "z \<in> path_image g" proof-
+lemma fashoda_unit_path:
+  fixes f g :: "real \<Rightarrow> real^2"
+  assumes "path f"
+    and "path g"
+    and "path_image f \<subseteq> {- 1..1}"
+    and "path_image g \<subseteq> {- 1..1}"
+    and "(pathstart f)$1 = -1"
+    and "(pathfinish f)$1 = 1"
+    and "(pathstart g)$2 = -1"
+    and "(pathfinish g)$2 = 1"
+  obtains z where "z \<in> path_image f" and "z \<in> path_image g"
+proof -
   note assms=assms[unfolded path_def pathstart_def pathfinish_def path_image_def]
   def iscale \<equiv> "\<lambda>z::real. inverse 2 *\<^sub>R (z + 1)"
-  have isc:"iscale ` {- 1..1} \<subseteq> {0..1}" unfolding iscale_def by(auto)
-  have "\<exists>s\<in>{- 1..1}. \<exists>t\<in>{- 1..1}. (f \<circ> iscale) s = (g \<circ> iscale) t" proof(rule fashoda_unit) 
+  have isc: "iscale ` {- 1..1} \<subseteq> {0..1}"
+    unfolding iscale_def by auto
+  have "\<exists>s\<in>{- 1..1}. \<exists>t\<in>{- 1..1}. (f \<circ> iscale) s = (g \<circ> iscale) t"
+  proof (rule fashoda_unit)
     show "(f \<circ> iscale) ` {- 1..1} \<subseteq> {- 1..1}" "(g \<circ> iscale) ` {- 1..1} \<subseteq> {- 1..1}"
       using isc and assms(3-4) unfolding image_compose by auto
-    have *:"continuous_on {- 1..1} iscale" unfolding iscale_def by(rule continuous_on_intros)+
+    have *: "continuous_on {- 1..1} iscale"
+      unfolding iscale_def by (rule continuous_on_intros)+
     show "continuous_on {- 1..1} (f \<circ> iscale)" "continuous_on {- 1..1} (g \<circ> iscale)"
-      apply-apply(rule_tac[!] continuous_on_compose[OF *]) apply(rule_tac[!] continuous_on_subset[OF _ isc])
-      by(rule assms)+ have *:"(1 / 2) *\<^sub>R (1 + (1::real^1)) = 1" unfolding vec_eq_iff by auto
-    show "(f \<circ> iscale) (- 1) $ 1 = - 1" "(f \<circ> iscale) 1 $ 1 = 1" "(g \<circ> iscale) (- 1) $ 2 = -1" "(g \<circ> iscale) 1 $ 2 = 1"
-      unfolding o_def iscale_def using assms by(auto simp add:*) qed
+      apply -
+      apply (rule_tac[!] continuous_on_compose[OF *])
+      apply (rule_tac[!] continuous_on_subset[OF _ isc])
+      apply (rule assms)+
+      done
+    have *: "(1 / 2) *\<^sub>R (1 + (1::real^1)) = 1"
+      unfolding vec_eq_iff by auto
+    show "(f \<circ> iscale) (- 1) $ 1 = - 1"
+      and "(f \<circ> iscale) 1 $ 1 = 1"
+      and "(g \<circ> iscale) (- 1) $ 2 = -1"
+      and "(g \<circ> iscale) 1 $ 2 = 1"
+      unfolding o_def iscale_def
+      using assms
+      by (auto simp add: *)
+  qed
   then guess s .. from this(2) guess t .. note st=this
-  show thesis apply(rule_tac z="f (iscale s)" in that)
-    using st `s\<in>{- 1..1}` unfolding o_def path_image_def image_iff apply-
-    apply(rule_tac x="iscale s" in bexI) prefer 3 apply(rule_tac x="iscale t" in bexI)
-    using isc[unfolded subset_eq, rule_format] by auto qed
+  show thesis
+    apply (rule_tac z="f (iscale s)" in that)
+    using st `s\<in>{- 1..1}`
+    unfolding o_def path_image_def image_iff
+    apply -
+    apply (rule_tac x="iscale s" in bexI)
+    prefer 3
+    apply (rule_tac x="iscale t" in bexI)
+    using isc[unfolded subset_eq, rule_format]
+    apply auto
+    done
+qed
 
 lemma fashoda: fixes b::"real^2"
   assumes "path f" "path g" "path_image f \<subseteq> {a..b}" "path_image g \<subseteq> {a..b}"
--- a/src/HOL/Multivariate_Analysis/Integration.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Integration.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -136,18 +136,21 @@
     "f 0 = 0"
     "f (- a) = - f a"
     "f (s *\<^sub>R v) = s *\<^sub>R (f v)"
-  apply (rule_tac[!] additive.add additive.minus additive.diff additive.zero bounded_linear.scaleR)
-  using assms unfolding bounded_linear_def additive_def
-  apply auto
-  done
+proof -
+  interpret f: bounded_linear f by fact
+  show "f (a + b) = f a + f b" by (rule f.add)
+  show "f (a - b) = f a - f b" by (rule f.diff)
+  show "f 0 = 0" by (rule f.zero)
+  show "f (- a) = - f a" by (rule f.minus)
+  show "f (s *\<^sub>R v) = s *\<^sub>R (f v)" by (rule f.scaleR)
+qed
 
 lemma bounded_linearI:
   assumes "\<And>x y. f (x + y) = f x + f y"
     and "\<And>r x. f (r *\<^sub>R x) = r *\<^sub>R f x"
     and "\<And>x. norm (f x) \<le> norm x * K"
   shows "bounded_linear f"
-  unfolding bounded_linear_def additive_def bounded_linear_axioms_def
-  using assms by auto
+  using assms by (rule bounded_linear_intro) (* FIXME: duplicate *)
 
 lemma bounded_linear_component [intro]: "bounded_linear (\<lambda>x::'a::euclidean_space. x \<bullet> k)"
   by (rule bounded_linear_inner_left)
@@ -3324,12 +3327,13 @@
   have *: "\<And>(a::'a) b c. content ({a..b} \<inter> {x. x\<bullet>k \<le> c}) = 0 \<longleftrightarrow>
     interior({a..b} \<inter> {x. x\<bullet>k \<le> c}) = {}"
     unfolding  interval_split[OF k] content_eq_0_interior by auto
-  guess u1 v1 using d(4)[OF assms(2)] apply-by(erule exE)+ note uv1=this
-  guess u2 v2 using d(4)[OF assms(3)] apply-by(erule exE)+ note uv2=this
+  guess u1 v1 using d(4)[OF assms(2)] by (elim exE) note uv1=this
+  guess u2 v2 using d(4)[OF assms(3)] by (elim exE) note uv2=this
   have **: "\<And>s t u. s \<inter> t = {} \<Longrightarrow> u \<subseteq> s \<Longrightarrow> u \<subseteq> t \<Longrightarrow> u = {}"
     by auto
   show ?thesis
-    unfolding uv1 uv2 * apply(rule **[OF d(5)[OF assms(2-4)]])
+    unfolding uv1 uv2 *
+    apply (rule **[OF d(5)[OF assms(2-4)]])
     defer
     apply (subst assms(5)[unfolded uv1 uv2])
     unfolding uv1 uv2
@@ -3686,7 +3690,7 @@
         unfolding lem3[OF p(3)]
         apply (subst setsum_reindex_nonzero[OF p(3)])
         defer
-        apply(subst setsum_reindex_nonzero[OF p(3)])
+        apply (subst setsum_reindex_nonzero[OF p(3)])
         defer
         unfolding lem4[symmetric]
         apply (rule refl)
@@ -3903,7 +3907,7 @@
           unfolding interval_split[OF k] b'_def[symmetric] a'_def[symmetric]
           using p
           using assms
-          by (auto simp add:algebra_simps)
+          by (auto simp add: algebra_simps)
       qed
     qed
   qed
@@ -3927,7 +3931,7 @@
       opp (f ({a..b} \<inter> {x. x\<bullet>k \<le> c})) (f ({a..b} \<inter> {x. x\<bullet>k \<ge> c}))"
   using assms unfolding operative_def by auto
 
-lemma operative_trivial: "operative opp f \<Longrightarrow> content({a..b}) = 0 \<Longrightarrow> f({a..b}) = neutral opp"
+lemma operative_trivial: "operative opp f \<Longrightarrow> content {a..b} = 0 \<Longrightarrow> f {a..b} = neutral opp"
   unfolding operative_def by auto
 
 lemma property_empty_interval: "\<forall>a b. content {a..b} = 0 \<longrightarrow> P {a..b} \<Longrightarrow> P {}"
@@ -4122,11 +4126,8 @@
   apply rule
   done
 
-lemma neutral_monoid: "neutral ((op +)::('a::comm_monoid_add) \<Rightarrow> 'a \<Rightarrow> 'a) = 0"
-  by (rule neutral_add) (* FIXME: duplicate *)
-
 lemma monoidal_monoid[intro]: "monoidal ((op +)::('a::comm_monoid_add) \<Rightarrow> 'a \<Rightarrow> 'a)"
-  unfolding monoidal_def neutral_monoid
+  unfolding monoidal_def neutral_add
   by (auto simp add: algebra_simps)
 
 lemma operative_integral:
@@ -4854,12 +4855,12 @@
 proof -
   have *: "setsum f s = setsum f (support op + f s)"
     apply (rule setsum_mono_zero_right)
-    unfolding support_def neutral_monoid
+    unfolding support_def neutral_add
     using assms
     apply auto
     done
   then show ?thesis unfolding * iterate_def fold'_def setsum.eq_fold
-    unfolding neutral_monoid by (simp add: comp_def)
+    unfolding neutral_add by (simp add: comp_def)
 qed
 
 lemma additive_content_division:
@@ -5180,7 +5181,7 @@
   by auto
 
 lemma has_integral_component_lbound:
-  fixes f :: "'a::ordered_euclidean_space => 'b::ordered_euclidean_space"
+  fixes f :: "'a::ordered_euclidean_space \<Rightarrow> 'b::ordered_euclidean_space"
   assumes "(f has_integral i) {a..b}"
     and "\<forall>x\<in>{a..b}. B \<le> f(x)\<bullet>k"
     and "k \<in> Basis"
@@ -5398,7 +5399,6 @@
   apply (rule iterate_nonzero_image_lemma)
   apply (rule assms monoidal_monoid)+
   unfolding assms
-  using neutral_add
   unfolding neutral_add
   using assms
   apply auto
@@ -6354,54 +6354,121 @@
   using operative_division[OF monoidal_and assms] division_of_finite[OF assms(2)]
   by auto
 
-lemma operative_approximable: assumes "0 \<le> e" fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
-  shows "operative op \<and> (\<lambda>i. \<exists>g. (\<forall>x\<in>i. norm (f x - g (x::'b)) \<le> e) \<and> g integrable_on i)" unfolding operative_def neutral_and
+lemma operative_approximable:
+  fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
+  assumes "0 \<le> e"
+  shows "operative op \<and> (\<lambda>i. \<exists>g. (\<forall>x\<in>i. norm (f x - g (x::'b)) \<le> e) \<and> g integrable_on i)"
+  unfolding operative_def neutral_and
 proof safe
-  fix a b::"'b"
-  { assume "content {a..b} = 0"
-    thus "\<exists>g. (\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b}"
-      apply(rule_tac x=f in exI) using assms by(auto intro!:integrable_on_null) }
-  { fix c g and k :: 'b
-    assume as:"\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e" "g integrable_on {a..b}" and k:"k\<in>Basis"
+  fix a b :: 'b
+  {
+    assume "content {a..b} = 0"
+    then show "\<exists>g. (\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b}"
+      apply (rule_tac x=f in exI)
+      using assms
+      apply (auto intro!:integrable_on_null)
+      done
+  }
+  {
+    fix c g
+    fix k :: 'b
+    assume as: "\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e" "g integrable_on {a..b}"
+    assume k: "k \<in> Basis"
     show "\<exists>g. (\<forall>x\<in>{a..b} \<inter> {x. x \<bullet> k \<le> c}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}"
       "\<exists>g. (\<forall>x\<in>{a..b} \<inter> {x. c \<le> x \<bullet> k}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b} \<inter> {x. c \<le> x \<bullet> k}"
-      apply(rule_tac[!] x=g in exI) using as(1) integrable_split[OF as(2) k] by auto }
-  fix c k g1 g2 assume as:"\<forall>x\<in>{a..b} \<inter> {x. x \<bullet> k \<le> c}. norm (f x - g1 x) \<le> e" "g1 integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}"
-                          "\<forall>x\<in>{a..b} \<inter> {x. c \<le> x \<bullet> k}. norm (f x - g2 x) \<le> e" "g2 integrable_on {a..b} \<inter> {x. c \<le> x \<bullet> k}"
-  assume k:"k\<in>Basis"
+      apply (rule_tac[!] x=g in exI)
+      using as(1) integrable_split[OF as(2) k]
+      apply auto
+      done
+  }
+  fix c k g1 g2
+  assume as: "\<forall>x\<in>{a..b} \<inter> {x. x \<bullet> k \<le> c}. norm (f x - g1 x) \<le> e" "g1 integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}"
+    "\<forall>x\<in>{a..b} \<inter> {x. c \<le> x \<bullet> k}. norm (f x - g2 x) \<le> e" "g2 integrable_on {a..b} \<inter> {x. c \<le> x \<bullet> k}"
+  assume k: "k \<in> Basis"
   let ?g = "\<lambda>x. if x\<bullet>k = c then f x else if x\<bullet>k \<le> c then g1 x else g2 x"
-  show "\<exists>g. (\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b}" apply(rule_tac x="?g" in exI)
-  proof safe case goal1 thus ?case apply- apply(cases "x\<bullet>k=c", case_tac "x\<bullet>k < c") using as assms by auto
-  next case goal2 presume "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}" "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<ge> c}"
-    then guess h1 h2 unfolding integrable_on_def by auto from has_integral_split[OF this k]
-    show ?case unfolding integrable_on_def by auto
-  next show "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}" "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<ge> c}"
-      apply(rule_tac[!] integrable_spike[OF negligible_standard_hyperplane[of k c]]) using k as(2,4) by auto qed qed
-
-lemma approximable_on_division: fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
-  assumes "0 \<le> e" "d division_of {a..b}" "\<forall>i\<in>d. \<exists>g. (\<forall>x\<in>i. norm (f x - g x) \<le> e) \<and> g integrable_on i"
+  show "\<exists>g. (\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b}"
+    apply (rule_tac x="?g" in exI)
+  proof safe
+    case goal1
+    then show ?case
+      apply -
+      apply (cases "x\<bullet>k=c")
+      apply (case_tac "x\<bullet>k < c")
+      using as assms
+      apply auto
+      done
+  next
+    case goal2
+    presume "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}"
+      and "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<ge> c}"
+    then guess h1 h2 unfolding integrable_on_def by auto
+    from has_integral_split[OF this k] show ?case
+      unfolding integrable_on_def by auto
+  next
+    show "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<le> c}" "?g integrable_on {a..b} \<inter> {x. x \<bullet> k \<ge> c}"
+      apply(rule_tac[!] integrable_spike[OF negligible_standard_hyperplane[of k c]])
+      using k as(2,4)
+      apply auto
+      done
+  qed
+qed
+
+lemma approximable_on_division:
+  fixes f :: "'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
+  assumes "0 \<le> e"
+    and "d division_of {a..b}"
+    and "\<forall>i\<in>d. \<exists>g. (\<forall>x\<in>i. norm (f x - g x) \<le> e) \<and> g integrable_on i"
   obtains g where "\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e" "g integrable_on {a..b}"
-proof- note * = operative_division[OF monoidal_and operative_approximable[OF assms(1)] assms(2)]
-  note this[unfolded iterate_and[OF division_of_finite[OF assms(2)]]] from assms(3)[unfolded this[of f]]
-  guess g .. thus thesis apply-apply(rule that[of g]) by auto qed
-
-lemma integrable_continuous: fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
-  assumes "continuous_on {a..b} f" shows "f integrable_on {a..b}"
-proof(rule integrable_uniform_limit,safe) fix e::real assume e:"0 < e"
+proof -
+  note * = operative_division[OF monoidal_and operative_approximable[OF assms(1)] assms(2)]
+  note this[unfolded iterate_and[OF division_of_finite[OF assms(2)]]]
+  from assms(3)[unfolded this[of f]] guess g ..
+  then show thesis
+    apply -
+    apply (rule that[of g])
+    apply auto
+    done
+qed
+
+lemma integrable_continuous:
+  fixes f :: "'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
+  assumes "continuous_on {a..b} f"
+  shows "f integrable_on {a..b}"
+proof (rule integrable_uniform_limit, safe)
+  fix e :: real
+  assume e: "e > 0"
   from compact_uniformly_continuous[OF assms compact_interval,unfolded uniformly_continuous_on_def,rule_format,OF e] guess d ..
   note d=conjunctD2[OF this,rule_format]
   from fine_division_exists[OF gauge_ball[OF d(1)], of a b] guess p . note p=this
   note p' = tagged_division_ofD[OF p(1)]
-  have *:"\<forall>i\<in>snd ` p. \<exists>g. (\<forall>x\<in>i. norm (f x - g x) \<le> e) \<and> g integrable_on i"
-  proof(safe,unfold snd_conv) fix x l assume as:"(x,l) \<in> p"
-    from p'(4)[OF this] guess a b apply-by(erule exE)+ note l=this
-    show "\<exists>g. (\<forall>x\<in>l. norm (f x - g x) \<le> e) \<and> g integrable_on l" apply(rule_tac x="\<lambda>y. f x" in exI)
-    proof safe show "(\<lambda>y. f x) integrable_on l" unfolding integrable_on_def l by(rule,rule has_integral_const)
-      fix y assume y:"y\<in>l" note fineD[OF p(2) as,unfolded subset_eq,rule_format,OF this]
+  have *: "\<forall>i\<in>snd ` p. \<exists>g. (\<forall>x\<in>i. norm (f x - g x) \<le> e) \<and> g integrable_on i"
+  proof (safe, unfold snd_conv)
+    fix x l
+    assume as: "(x, l) \<in> p"
+    from p'(4)[OF this] guess a b by (elim exE) note l=this
+    show "\<exists>g. (\<forall>x\<in>l. norm (f x - g x) \<le> e) \<and> g integrable_on l"
+      apply (rule_tac x="\<lambda>y. f x" in exI)
+    proof safe
+      show "(\<lambda>y. f x) integrable_on l"
+        unfolding integrable_on_def l
+        apply rule
+        apply (rule has_integral_const)
+        done
+      fix y
+      assume y: "y \<in> l"
+      note fineD[OF p(2) as,unfolded subset_eq,rule_format,OF this]
       note d(2)[OF _ _ this[unfolded mem_ball]]
-      thus "norm (f y - f x) \<le> e" using y p'(2-3)[OF as] unfolding dist_norm l norm_minus_commute by fastforce qed qed
-  from e have "0 \<le> e" by auto from approximable_on_division[OF this division_of_tagged_division[OF p(1)] *] guess g .
-  thus "\<exists>g. (\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b}" by auto qed
+      then show "norm (f y - f x) \<le> e"
+        using y p'(2-3)[OF as] unfolding dist_norm l norm_minus_commute by fastforce
+    qed
+  qed
+  from e have "e \<ge> 0"
+    by auto
+  from approximable_on_division[OF this division_of_tagged_division[OF p(1)] *] guess g .
+  then show "\<exists>g. (\<forall>x\<in>{a..b}. norm (f x - g x) \<le> e) \<and> g integrable_on {a..b}"
+    by auto
+qed
+
 
 subsection {* Specialization of additivity to one dimension. *}
 
@@ -6410,374 +6477,978 @@
   and real_inner_1_right: "inner x 1 = x"
   by simp_all
 
-lemma operative_1_lt: assumes "monoidal opp"
+lemma operative_1_lt:
+  assumes "monoidal opp"
   shows "operative opp f \<longleftrightarrow> ((\<forall>a b. b \<le> a \<longrightarrow> f {a..b::real} = neutral opp) \<and>
-                (\<forall>a b c. a < c \<and> c < b \<longrightarrow> opp (f{a..c})(f{c..b}) = f {a..b}))"
-  apply (simp add: operative_def content_eq_0 less_one)
-proof safe fix a b c::"real" assume as:"\<forall>a b c. f {a..b} = opp (f ({a..b} \<inter> {x. x \<le> c}))
-    (f ({a..b} \<inter> {x. c \<le> x}))" "a < c" "c < b"
-    from this(2-) have "{a..b} \<inter> {x. x \<le> c} = {a..c}" "{a..b} \<inter> {x. x \<ge> c} = {c..b}" by auto
-    thus "opp (f {a..c}) (f {c..b}) = f {a..b}" unfolding as(1)[rule_format,of a b "c"] by auto
-next fix a b c::real
-  assume as:"\<forall>a b. b \<le> a \<longrightarrow> f {a..b} = neutral opp" "\<forall>a b c. a < c \<and> c < b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}"
+    (\<forall>a b c. a < c \<and> c < b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}))"
+  apply (simp add: operative_def content_eq_0)
+proof safe
+  fix a b c :: real
+  assume as:
+    "\<forall>a b c. f {a..b} = opp (f ({a..b} \<inter> {x. x \<le> c})) (f ({a..b} \<inter> {x. c \<le> x}))"
+    "a < c"
+    "c < b"
+    from this(2-) have "{a..b} \<inter> {x. x \<le> c} = {a..c}" "{a..b} \<inter> {x. x \<ge> c} = {c..b}"
+      by auto
+    then show "opp (f {a..c}) (f {c..b}) = f {a..b}"
+      unfolding as(1)[rule_format,of a b "c"] by auto
+next
+  fix a b c :: real
+  assume as: "\<forall>a b. b \<le> a \<longrightarrow> f {a..b} = neutral opp"
+    "\<forall>a b c. a < c \<and> c < b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}"
   show "f {a..b} = opp (f ({a..b} \<inter> {x. x \<le> c})) (f ({a..b} \<inter> {x. c \<le> x}))"
-  proof(cases "c \<in> {a .. b}")
-    case False hence "c<a \<or> c>b" by auto
-    thus ?thesis apply-apply(erule disjE)
-    proof- assume "c<a" hence *:"{a..b} \<inter> {x. x \<le> c} = {1..0}"  "{a..b} \<inter> {x. c \<le> x} = {a..b}" by auto
-      show ?thesis unfolding * apply(subst as(1)[rule_format,of 0 1]) using assms by auto
-    next   assume "b<c" hence *:"{a..b} \<inter> {x. x \<le> c} = {a..b}"  "{a..b} \<inter> {x. c \<le> x} = {1..0}" by auto
-      show ?thesis unfolding * apply(subst as(1)[rule_format,of 0 1]) using assms by auto
+  proof (cases "c \<in> {a..b}")
+    case False
+    then have "c < a \<or> c > b" by auto
+    then show ?thesis
+    proof
+      assume "c < a"
+      then have *: "{a..b} \<inter> {x. x \<le> c} = {1..0}" "{a..b} \<inter> {x. c \<le> x} = {a..b}"
+        by auto
+      show ?thesis
+        unfolding *
+        apply (subst as(1)[rule_format,of 0 1])
+        using assms
+        apply auto
+        done
+    next
+      assume "b < c"
+      then have *: "{a..b} \<inter> {x. x \<le> c} = {a..b}" "{a..b} \<inter> {x. c \<le> x} = {1..0}"
+        by auto
+      show ?thesis
+        unfolding *
+        apply (subst as(1)[rule_format,of 0 1])
+        using assms
+        apply auto
+        done
     qed
-  next case True hence *:"min (b) c = c" "max a c = c" by auto
-    have **: "(1::real) \<in> Basis" by simp
-    have ***:"\<And>P Q. (\<Sum>i\<in>Basis. (if i = 1 then P i else Q i) *\<^sub>R i) = (P 1::real)"
+  next
+    case True
+    then have *: "min (b) c = c" "max a c = c"
+      by auto
+    have **: "(1::real) \<in> Basis"
+      by simp
+    have ***: "\<And>P Q. (\<Sum>i\<in>Basis. (if i = 1 then P i else Q i) *\<^sub>R i) = (P 1::real)"
       by simp
     show ?thesis
       unfolding interval_split[OF **, unfolded real_inner_1_right] unfolding *** *
-    proof(cases "c = a \<or> c = b")
-      case False thus "f {a..b} = opp (f {a..c}) (f {c..b})"
-        apply-apply(subst as(2)[rule_format]) using True by auto
-    next case True thus "f {a..b} = opp (f {a..c}) (f {c..b})" apply-
-      proof(erule disjE) assume *:"c=a"
-        hence "f {a..c} = neutral opp" apply-apply(rule as(1)[rule_format]) by auto
-        thus ?thesis using assms unfolding * by auto
-      next assume *:"c=b" hence "f {c..b} = neutral opp" apply-apply(rule as(1)[rule_format]) by auto
-        thus ?thesis using assms unfolding * by auto qed qed qed qed
-
-lemma operative_1_le: assumes "monoidal opp"
+    proof (cases "c = a \<or> c = b")
+      case False
+      then show "f {a..b} = opp (f {a..c}) (f {c..b})"
+        apply -
+        apply (subst as(2)[rule_format])
+        using True
+        apply auto
+        done
+    next
+      case True
+      then show "f {a..b} = opp (f {a..c}) (f {c..b})"
+      proof
+        assume *: "c = a"
+        then have "f {a..c} = neutral opp"
+          apply -
+          apply (rule as(1)[rule_format])
+          apply auto
+          done
+        then show ?thesis
+          using assms unfolding * by auto
+      next
+        assume *: "c = b"
+        then have "f {c..b} = neutral opp"
+          apply -
+          apply (rule as(1)[rule_format])
+          apply auto
+          done
+        then show ?thesis
+          using assms unfolding * by auto
+      qed
+    qed
+  qed
+qed
+
+lemma operative_1_le:
+  assumes "monoidal opp"
   shows "operative opp f \<longleftrightarrow> ((\<forall>a b. b \<le> a \<longrightarrow> f {a..b::real} = neutral opp) \<and>
-                (\<forall>a b c. a \<le> c \<and> c \<le> b \<longrightarrow> opp (f{a..c})(f{c..b}) = f {a..b}))"
-unfolding operative_1_lt[OF assms]
-proof safe fix a b c::"real" assume as:"\<forall>a b c. a \<le> c \<and> c \<le> b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}" "a < c" "c < b"
-  show "opp (f {a..c}) (f {c..b}) = f {a..b}" apply(rule as(1)[rule_format]) using as(2-) by auto
-next fix a b c ::"real" assume "\<forall>a b. b \<le> a \<longrightarrow> f {a..b} = neutral opp"
-    "\<forall>a b c. a < c \<and> c < b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}" "a \<le> c" "c \<le> b"
+    (\<forall>a b c. a \<le> c \<and> c \<le> b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}))"
+  unfolding operative_1_lt[OF assms]
+proof safe
+  fix a b c :: real
+  assume as:
+    "\<forall>a b c. a \<le> c \<and> c \<le> b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}"
+    "a < c"
+    "c < b"
+  show "opp (f {a..c}) (f {c..b}) = f {a..b}"
+    apply (rule as(1)[rule_format])
+    using as(2-)
+    apply auto
+    done
+next
+  fix a b c :: real
+  assume "\<forall>a b. b \<le> a \<longrightarrow> f {a..b} = neutral opp"
+    and "\<forall>a b c. a < c \<and> c < b \<longrightarrow> opp (f {a..c}) (f {c..b}) = f {a..b}"
+    and "a \<le> c"
+    and "c \<le> b"
   note as = this[rule_format]
   show "opp (f {a..c}) (f {c..b}) = f {a..b}"
-  proof(cases "c = a \<or> c = b")
-    case False thus ?thesis apply-apply(subst as(2)) using as(3-) by(auto)
-    next case True thus ?thesis apply-
-      proof(erule disjE) assume *:"c=a" hence "f {a..c} = neutral opp" apply-apply(rule as(1)[rule_format]) by auto
-        thus ?thesis using assms unfolding * by auto
-      next               assume *:"c=b" hence "f {c..b} = neutral opp" apply-apply(rule as(1)[rule_format]) by auto
-        thus ?thesis using assms unfolding * by auto qed qed qed
+  proof (cases "c = a \<or> c = b")
+    case False
+    then show ?thesis
+      apply -
+      apply (subst as(2))
+      using as(3-)
+      apply auto
+      done
+  next
+    case True
+    then show ?thesis
+    proof
+      assume *: "c = a"
+      then have "f {a..c} = neutral opp"
+        apply -
+        apply (rule as(1)[rule_format])
+        apply auto
+        done
+      then show ?thesis
+        using assms unfolding * by auto
+    next
+      assume *: "c = b"
+      then have "f {c..b} = neutral opp"
+        apply -
+        apply (rule as(1)[rule_format])
+        apply auto
+        done
+      then show ?thesis
+        using assms unfolding * by auto
+    qed
+  qed
+qed
+
 
 subsection {* Special case of additivity we need for the FCT. *}
 
-lemma interval_bound_sing[simp]: "interval_upperbound {a} = a"  "interval_lowerbound {a} = a"
-  unfolding interval_upperbound_def interval_lowerbound_def by (auto simp: euclidean_representation)
-
-lemma additive_tagged_division_1: fixes f::"real \<Rightarrow> 'a::real_normed_vector"
-  assumes "a \<le> b" "p tagged_division_of {a..b}"
+lemma interval_bound_sing[simp]:
+  "interval_upperbound {a} = a"
+  "interval_lowerbound {a} = a"
+  unfolding interval_upperbound_def interval_lowerbound_def
+  by (auto simp: euclidean_representation)
+
+lemma additive_tagged_division_1:
+  fixes f :: "real \<Rightarrow> 'a::real_normed_vector"
+  assumes "a \<le> b"
+    and "p tagged_division_of {a..b}"
   shows "setsum (\<lambda>(x,k). f(interval_upperbound k) - f(interval_lowerbound k)) p = f b - f a"
-proof- let ?f = "(\<lambda>k::(real) set. if k = {} then 0 else f(interval_upperbound k) - f(interval_lowerbound k))"
-  have ***:"\<forall>i\<in>Basis. a \<bullet> i \<le> b \<bullet> i" using assms by auto
-  have *:"operative op + ?f" unfolding operative_1_lt[OF monoidal_monoid] interval_eq_empty by auto
-  have **:"{a..b} \<noteq> {}" using assms(1) by auto note operative_tagged_division[OF monoidal_monoid * assms(2)]
+proof -
+  let ?f = "(\<lambda>k::(real) set. if k = {} then 0 else f(interval_upperbound k) - f(interval_lowerbound k))"
+  have ***: "\<forall>i\<in>Basis. a \<bullet> i \<le> b \<bullet> i"
+    using assms by auto
+  have *: "operative op + ?f"
+    unfolding operative_1_lt[OF monoidal_monoid] interval_eq_empty by auto
+  have **: "{a..b} \<noteq> {}"
+    using assms(1) by auto note operative_tagged_division[OF monoidal_monoid * assms(2)]
   note * = this[unfolded if_not_P[OF **] interval_bounds[OF ***],symmetric]
-  show ?thesis unfolding * apply(subst setsum_iterate[symmetric]) defer
-    apply(rule setsum_cong2) unfolding split_paired_all split_conv using assms(2) by auto qed
+  show ?thesis
+    unfolding *
+    apply (subst setsum_iterate[symmetric])
+    defer
+    apply (rule setsum_cong2)
+    unfolding split_paired_all split_conv
+    using assms(2)
+    apply auto
+    done
+qed
+
 
 subsection {* A useful lemma allowing us to factor out the content size. *}
 
 lemma has_integral_factor_content:
-  "(f has_integral i) {a..b} \<longleftrightarrow> (\<forall>e>0. \<exists>d. gauge d \<and> (\<forall>p. p tagged_division_of {a..b} \<and> d fine p
-    \<longrightarrow> norm (setsum (\<lambda>(x,k). content k *\<^sub>R f x) p - i) \<le> e * content {a..b}))"
-proof(cases "content {a..b} = 0")
-  case True show ?thesis unfolding has_integral_null_eq[OF True] apply safe
-    apply(rule,rule,rule gauge_trivial,safe) unfolding setsum_content_null[OF True] True defer
-    apply(erule_tac x=1 in allE,safe) defer apply(rule fine_division_exists[of _ a b],assumption)
-    apply(erule_tac x=p in allE) unfolding setsum_content_null[OF True] by auto
-next case False note F = this[unfolded content_lt_nz[symmetric]]
-  let ?P = "\<lambda>e opp. \<exists>d. gauge d \<and> (\<forall>p. p tagged_division_of {a..b} \<and> d fine p \<longrightarrow> opp (norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f x) - i)) e)"
-  show ?thesis apply(subst has_integral)
-  proof safe fix e::real assume e:"e>0"
-    { assume "\<forall>e>0. ?P e op <" thus "?P (e * content {a..b}) op \<le>" apply(erule_tac x="e * content {a..b}" in allE)
-        apply(erule impE) defer apply(erule exE,rule_tac x=d in exI)
-        using F e by(auto simp add:field_simps intro:mult_pos_pos) }
-    {  assume "\<forall>e>0. ?P (e * content {a..b}) op \<le>" thus "?P e op <" apply(erule_tac x="e / 2 / content {a..b}" in allE)
-        apply(erule impE) defer apply(erule exE,rule_tac x=d in exI)
-        using F e by(auto simp add:field_simps intro:mult_pos_pos) } qed qed
+  "(f has_integral i) {a..b} \<longleftrightarrow>
+    (\<forall>e>0. \<exists>d. gauge d \<and> (\<forall>p. p tagged_division_of {a..b} \<and> d fine p \<longrightarrow>
+      norm (setsum (\<lambda>(x,k). content k *\<^sub>R f x) p - i) \<le> e * content {a..b}))"
+proof (cases "content {a..b} = 0")
+  case True
+  show ?thesis
+    unfolding has_integral_null_eq[OF True]
+    apply safe
+    apply (rule, rule, rule gauge_trivial, safe)
+    unfolding setsum_content_null[OF True] True
+    defer
+    apply (erule_tac x=1 in allE)
+    apply safe
+    defer
+    apply (rule fine_division_exists[of _ a b])
+    apply assumption
+    apply (erule_tac x=p in allE)
+    unfolding setsum_content_null[OF True]
+    apply auto
+    done
+next
+  case False
+  note F = this[unfolded content_lt_nz[symmetric]]
+  let ?P = "\<lambda>e opp. \<exists>d. gauge d \<and>
+    (\<forall>p. p tagged_division_of {a..b} \<and> d fine p \<longrightarrow> opp (norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f x) - i)) e)"
+  show ?thesis
+    apply (subst has_integral)
+  proof safe
+    fix e :: real
+    assume e: "e > 0"
+    {
+      assume "\<forall>e>0. ?P e op <"
+      then show "?P (e * content {a..b}) op \<le>"
+        apply (erule_tac x="e * content {a..b}" in allE)
+        apply (erule impE)
+        defer
+        apply (erule exE,rule_tac x=d in exI)
+        using F e
+        apply (auto simp add:field_simps intro:mult_pos_pos)
+        done
+    }
+    {
+      assume "\<forall>e>0. ?P (e * content {a..b}) op \<le>"
+      then show "?P e op <"
+        apply (erule_tac x="e / 2 / content {a..b}" in allE)
+        apply (erule impE)
+        defer
+        apply (erule exE,rule_tac x=d in exI)
+        using F e
+        apply (auto simp add: field_simps intro: mult_pos_pos)
+        done
+    }
+  qed
+qed
+
 
 subsection {* Fundamental theorem of calculus. *}
 
-lemma interval_bounds_real: assumes "a\<le>(b::real)"
-  shows "interval_upperbound {a..b} = b" "interval_lowerbound {a..b} = a"
-  apply(rule_tac[!] interval_bounds) using assms by auto
-
-lemma fundamental_theorem_of_calculus: fixes f::"real \<Rightarrow> 'a::banach"
-  assumes "a \<le> b"  "\<forall>x\<in>{a..b}. (f has_vector_derivative f' x) (at x within {a..b})"
-  shows "(f' has_integral (f b - f a)) ({a..b})"
-unfolding has_integral_factor_content
-proof safe fix e::real assume e:"e>0"
+lemma interval_bounds_real:
+  fixes q b :: real
+  assumes "a \<le> b"
+  shows "interval_upperbound {a..b} = b"
+    and "interval_lowerbound {a..b} = a"
+  apply (rule_tac[!] interval_bounds)
+  using assms
+  apply auto
+  done
+
+lemma fundamental_theorem_of_calculus:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "a \<le> b"
+    and "\<forall>x\<in>{a..b}. (f has_vector_derivative f' x) (at x within {a..b})"
+  shows "(f' has_integral (f b - f a)) {a..b}"
+  unfolding has_integral_factor_content
+proof safe
+  fix e :: real
+  assume e: "e > 0"
   note assm = assms(2)[unfolded has_vector_derivative_def has_derivative_within_alt]
-  have *:"\<And>P Q. \<forall>x\<in>{a..b}. P x \<and> (\<forall>e>0. \<exists>d>0. Q x e d) \<Longrightarrow> \<forall>x. \<exists>(d::real)>0. x\<in>{a..b} \<longrightarrow> Q x e d" using e by blast
-  note this[OF assm,unfolded gauge_existence_lemma] from choice[OF this,unfolded Ball_def[symmetric]]
-  guess d .. note d=conjunctD2[OF this[rule_format],rule_format]
+  have *: "\<And>P Q. \<forall>x\<in>{a..b}. P x \<and> (\<forall>e>0. \<exists>d>0. Q x e d) \<Longrightarrow> \<forall>x. \<exists>(d::real)>0. x\<in>{a..b} \<longrightarrow> Q x e d"
+    using e by blast
+  note this[OF assm,unfolded gauge_existence_lemma]
+  from choice[OF this,unfolded Ball_def[symmetric]] guess d ..
+  note d=conjunctD2[OF this[rule_format],rule_format]
   show "\<exists>d. gauge d \<and> (\<forall>p. p tagged_division_of {a..b} \<and> d fine p \<longrightarrow>
-                 norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f' x) - (f b - f a)) \<le> e * content {a..b})"
-    apply(rule_tac x="\<lambda>x. ball x (d x)" in exI,safe)
-    apply(rule gauge_ball_dependent,rule,rule d(1))
-  proof- fix p assume as:"p tagged_division_of {a..b}" "(\<lambda>x. ball x (d x)) fine p"
+    norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f' x) - (f b - f a)) \<le> e * content {a..b})"
+    apply (rule_tac x="\<lambda>x. ball x (d x)" in exI)
+    apply safe
+    apply (rule gauge_ball_dependent)
+    apply rule
+    apply (rule d(1))
+  proof -
+    fix p
+    assume as: "p tagged_division_of {a..b}" "(\<lambda>x. ball x (d x)) fine p"
     show "norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f' x) - (f b - f a)) \<le> e * content {a..b}"
       unfolding content_real[OF assms(1)] additive_tagged_division_1[OF assms(1) as(1),of f,symmetric]
       unfolding additive_tagged_division_1[OF assms(1) as(1),of "\<lambda>x. x",symmetric]
-      unfolding setsum_right_distrib defer unfolding setsum_subtractf[symmetric]
-    proof(rule setsum_norm_le,safe) fix x k assume "(x,k)\<in>p"
-      note xk = tagged_division_ofD(2-4)[OF as(1) this] from this(3) guess u v apply-by(erule exE)+ note k=this
-      have *:"u \<le> v" using xk unfolding k by auto
-      have ball:"\<forall>xa\<in>k. xa \<in> ball x (d x)" using as(2)[unfolded fine_def,rule_format,OF `(x,k)\<in>p`,
-        unfolded split_conv subset_eq] .
+      unfolding setsum_right_distrib
+      defer
+      unfolding setsum_subtractf[symmetric]
+    proof (rule setsum_norm_le,safe)
+      fix x k
+      assume "(x, k) \<in> p"
+      note xk = tagged_division_ofD(2-4)[OF as(1) this]
+      from this(3) guess u v by (elim exE) note k=this
+      have *: "u \<le> v"
+        using xk unfolding k by auto
+      have ball: "\<forall>xa\<in>k. xa \<in> ball x (d x)"
+        using as(2)[unfolded fine_def,rule_format,OF `(x,k)\<in>p`,unfolded split_conv subset_eq] .
       have "norm ((v - u) *\<^sub>R f' x - (f v - f u)) \<le>
         norm (f u - f x - (u - x) *\<^sub>R f' x) + norm (f v - f x - (v - x) *\<^sub>R f' x)"
-        apply(rule order_trans[OF _ norm_triangle_ineq4]) apply(rule eq_refl) apply(rule arg_cong[where f=norm])
-        unfolding scaleR_diff_left by(auto simp add:algebra_simps)
-      also have "... \<le> e * norm (u - x) + e * norm (v - x)"
-        apply(rule add_mono) apply(rule d(2)[of "x" "u",unfolded o_def]) prefer 4
-        apply(rule d(2)[of "x" "v",unfolded o_def])
+        apply (rule order_trans[OF _ norm_triangle_ineq4])
+        apply (rule eq_refl)
+        apply (rule arg_cong[where f=norm])
+        unfolding scaleR_diff_left
+        apply (auto simp add:algebra_simps)
+        done
+      also have "\<dots> \<le> e * norm (u - x) + e * norm (v - x)"
+        apply (rule add_mono)
+        apply (rule d(2)[of "x" "u",unfolded o_def])
+        prefer 4
+        apply (rule d(2)[of "x" "v",unfolded o_def])
         using ball[rule_format,of u] ball[rule_format,of v]
-        using xk(1-2) unfolding k subset_eq by(auto simp add:dist_real_def)
-      also have "... \<le> e * (interval_upperbound k - interval_lowerbound k)"
-        unfolding k interval_bounds_real[OF *] using xk(1) unfolding k by(auto simp add:dist_real_def field_simps)
+        using xk(1-2)
+        unfolding k subset_eq
+        apply (auto simp add:dist_real_def)
+        done
+      also have "\<dots> \<le> e * (interval_upperbound k - interval_lowerbound k)"
+        unfolding k interval_bounds_real[OF *]
+        using xk(1)
+        unfolding k
+        by (auto simp add: dist_real_def field_simps)
       finally show "norm (content k *\<^sub>R f' x - (f (interval_upperbound k) - f (interval_lowerbound k))) \<le>
-        e * (interval_upperbound k - interval_lowerbound k)" unfolding k interval_bounds_real[OF *] content_real[OF *] .
-    qed qed qed
+        e * (interval_upperbound k - interval_lowerbound k)"
+        unfolding k interval_bounds_real[OF *] content_real[OF *] .
+    qed
+  qed
+qed
+
 
 subsection {* Attempt a systematic general set of "offset" results for components. *}
 
 lemma gauge_modify:
   assumes "(\<forall>s. open s \<longrightarrow> open {x. f(x) \<in> s})" "gauge d"
   shows "gauge (\<lambda>x. {y. f y \<in> d (f x)})"
-  using assms unfolding gauge_def apply safe defer apply(erule_tac x="f x" in allE)
-  apply(erule_tac x="d (f x)" in allE) by auto
+  using assms
+  unfolding gauge_def
+  apply safe
+  defer
+  apply (erule_tac x="f x" in allE)
+  apply (erule_tac x="d (f x)" in allE)
+  apply auto
+  done
+
 
 subsection {* Only need trivial subintervals if the interval itself is trivial. *}
 
-lemma division_of_nontrivial: fixes s::"('a::ordered_euclidean_space) set set"
-  assumes "s division_of {a..b}" "content({a..b}) \<noteq> 0"
-  shows "{k. k \<in> s \<and> content k \<noteq> 0} division_of {a..b}" using assms(1) apply-
-proof(induct "card s" arbitrary:s rule:nat_less_induct)
-  fix s::"'a set set" assume assm:"s division_of {a..b}"
-    "\<forall>m<card s. \<forall>x. m = card x \<longrightarrow> x division_of {a..b} \<longrightarrow> {k \<in> x. content k \<noteq> 0} division_of {a..b}"
-  note s = division_ofD[OF assm(1)] let ?thesis = "{k \<in> s. content k \<noteq> 0} division_of {a..b}"
-  { presume *:"{k \<in> s. content k \<noteq> 0} \<noteq> s \<Longrightarrow> ?thesis"
-    show ?thesis apply cases defer apply(rule *,assumption) using assm(1) by auto }
-  assume noteq:"{k \<in> s. content k \<noteq> 0} \<noteq> s"
-  then obtain k where k:"k\<in>s" "content k = 0" by auto
-  from s(4)[OF k(1)] guess c d apply-by(erule exE)+ note k=k this
-  from k have "card s > 0" unfolding card_gt_0_iff using assm(1) by auto
-  hence card:"card (s - {k}) < card s" using assm(1) k(1) apply(subst card_Diff_singleton_if) by auto
-  have *:"closed (\<Union>(s - {k}))" apply(rule closed_Union) defer apply rule apply(drule DiffD1,drule s(4))
-    apply safe apply(rule closed_interval) using assm(1) by auto
-  have "k \<subseteq> \<Union>(s - {k})" apply safe apply(rule *[unfolded closed_limpt,rule_format]) unfolding islimpt_approachable
-  proof safe fix x and e::real assume as:"x\<in>k" "e>0"
+lemma division_of_nontrivial:
+  fixes s :: "'a::ordered_euclidean_space set set"
+  assumes "s division_of {a..b}"
+    and "content {a..b} \<noteq> 0"
+  shows "{k. k \<in> s \<and> content k \<noteq> 0} division_of {a..b}"
+  using assms(1)
+  apply -
+proof (induct "card s" arbitrary: s rule: nat_less_induct)
+  fix s::"'a set set"
+  assume assm: "s division_of {a..b}"
+    "\<forall>m<card s. \<forall>x. m = card x \<longrightarrow>
+      x division_of {a..b} \<longrightarrow> {k \<in> x. content k \<noteq> 0} division_of {a..b}"
+  note s = division_ofD[OF assm(1)]
+  let ?thesis = "{k \<in> s. content k \<noteq> 0} division_of {a..b}"
+  {
+    presume *: "{k \<in> s. content k \<noteq> 0} \<noteq> s \<Longrightarrow> ?thesis"
+    show ?thesis
+      apply cases
+      defer
+      apply (rule *)
+      apply assumption
+      using assm(1)
+      apply auto
+      done
+  }
+  assume noteq: "{k \<in> s. content k \<noteq> 0} \<noteq> s"
+  then obtain k where k: "k \<in> s" "content k = 0"
+    by auto
+  from s(4)[OF k(1)] guess c d by (elim exE) note k=k this
+  from k have "card s > 0"
+    unfolding card_gt_0_iff using assm(1) by auto
+  then have card: "card (s - {k}) < card s"
+    using assm(1) k(1)
+    apply (subst card_Diff_singleton_if)
+    apply auto
+    done
+  have *: "closed (\<Union>(s - {k}))"
+    apply (rule closed_Union)
+    defer
+    apply rule
+    apply (drule DiffD1,drule s(4))
+    apply safe
+    apply (rule closed_interval)
+    using assm(1)
+    apply auto
+    done
+  have "k \<subseteq> \<Union>(s - {k})"
+    apply safe
+    apply (rule *[unfolded closed_limpt,rule_format])
+    unfolding islimpt_approachable
+  proof safe
+    fix x
+    fix e :: real
+    assume as: "x \<in> k" "e > 0"
     from k(2)[unfolded k content_eq_0] guess i ..
-    hence i:"c\<bullet>i = d\<bullet>i" "i\<in>Basis" using s(3)[OF k(1),unfolded k] unfolding interval_ne_empty by auto
-    hence xi:"x\<bullet>i = d\<bullet>i" using as unfolding k mem_interval by (metis antisym)
-    def y \<equiv> "(\<Sum>j\<in>Basis. (if j = i then if c\<bullet>i \<le> (a\<bullet>i + b\<bullet>i) / 2 then c\<bullet>i +
-      min e (b\<bullet>i - c\<bullet>i) / 2 else c\<bullet>i - min e (c\<bullet>i - a\<bullet>i) / 2 else x\<bullet>j) *\<^sub>R j)::'a"
-    show "\<exists>x'\<in>\<Union>(s - {k}). x' \<noteq> x \<and> dist x' x < e" apply(rule_tac x=y in bexI)
-    proof have "d \<in> {c..d}" using s(3)[OF k(1)] unfolding k interval_eq_empty mem_interval by(fastforce simp add: not_less)
-      hence "d \<in> {a..b}" using s(2)[OF k(1)] unfolding k by auto note di = this[unfolded mem_interval,THEN bspec[where x=i]]
-      hence xyi:"y\<bullet>i \<noteq> x\<bullet>i"
-        unfolding y_def i xi using as(2) assms(2)[unfolded content_eq_0] i(2)
+    then have i:"c\<bullet>i = d\<bullet>i" "i\<in>Basis"
+      using s(3)[OF k(1),unfolded k] unfolding interval_ne_empty by auto
+    then have xi: "x\<bullet>i = d\<bullet>i"
+      using as unfolding k mem_interval by (metis antisym)
+    def y \<equiv> "\<Sum>j\<in>Basis. (if j = i then if c\<bullet>i \<le> (a\<bullet>i + b\<bullet>i) / 2 then c\<bullet>i +
+      min e (b\<bullet>i - c\<bullet>i) / 2 else c\<bullet>i - min e (c\<bullet>i - a\<bullet>i) / 2 else x\<bullet>j) *\<^sub>R j"
+    show "\<exists>x'\<in>\<Union>(s - {k}). x' \<noteq> x \<and> dist x' x < e"
+      apply (rule_tac x=y in bexI)
+    proof
+      have "d \<in> {c..d}"
+        using s(3)[OF k(1)]
+        unfolding k interval_eq_empty mem_interval
+        by (fastforce simp add: not_less)
+      then have "d \<in> {a..b}"
+        using s(2)[OF k(1)]
+        unfolding k
+        by auto
+      note di = this[unfolded mem_interval,THEN bspec[where x=i]]
+      then have xyi: "y\<bullet>i \<noteq> x\<bullet>i"
+        unfolding y_def i xi
+        using as(2) assms(2)[unfolded content_eq_0] i(2)
         by (auto elim!: ballE[of _ _ i])
-      thus "y \<noteq> x" unfolding euclidean_eq_iff[where 'a='a] using i by auto
-      have *:"Basis = insert i (Basis - {i})" using i by auto
-      have "norm (y - x) < e + setsum (\<lambda>i. 0) Basis" apply(rule le_less_trans[OF norm_le_l1])
-        apply(subst *,subst setsum_insert) prefer 3 apply(rule add_less_le_mono)
-      proof-
+      then show "y \<noteq> x"
+        unfolding euclidean_eq_iff[where 'a='a] using i by auto
+      have *: "Basis = insert i (Basis - {i})"
+        using i by auto
+      have "norm (y - x) < e + setsum (\<lambda>i. 0) Basis"
+        apply (rule le_less_trans[OF norm_le_l1])
+        apply (subst *)
+        apply (subst setsum_insert)
+        prefer 3
+        apply (rule add_less_le_mono)
+      proof -
         show "\<bar>(y - x) \<bullet> i\<bar> < e"
           using di as(2) y_def i xi by (auto simp: inner_simps)
         show "(\<Sum>i\<in>Basis - {i}. \<bar>(y - x) \<bullet> i\<bar>) \<le> (\<Sum>i\<in>Basis. 0)"
           unfolding y_def by (auto simp: inner_simps)
-      qed auto thus "dist y x < e" unfolding dist_norm by auto
-      have "y\<notin>k" unfolding k mem_interval apply rule apply(erule_tac x=i in ballE) using xyi k i xi by auto
-      moreover have "y \<in> \<Union>s"
-        using set_rev_mp[OF as(1) s(2)[OF k(1)]] as(2) di i unfolding s mem_interval y_def
+      qed auto
+      then show "dist y x < e"
+        unfolding dist_norm by auto
+      have "y \<notin> k"
+        unfolding k mem_interval
+        apply rule
+        apply (erule_tac x=i in ballE)
+        using xyi k i xi
+        apply auto
+        done
+      moreover
+      have "y \<in> \<Union>s"
+        using set_rev_mp[OF as(1) s(2)[OF k(1)]] as(2) di i
+        unfolding s mem_interval y_def
         by (auto simp: field_simps elim!: ballE[of _ _ i])
-      ultimately show "y \<in> \<Union>(s - {k})" by auto
-    qed qed hence "\<Union>(s - {k}) = {a..b}" unfolding s(6)[symmetric] by auto
-  hence  "{ka \<in> s - {k}. content ka \<noteq> 0} division_of {a..b}" apply-apply(rule assm(2)[rule_format,OF card refl])
-    apply(rule division_ofI) defer apply(rule_tac[1-4] s) using assm(1) by auto
-  moreover have "{ka \<in> s - {k}. content ka \<noteq> 0} = {k \<in> s. content k \<noteq> 0}" using k by auto ultimately show ?thesis by auto qed
+      ultimately
+      show "y \<in> \<Union>(s - {k})" by auto
+    qed
+  qed
+  then have "\<Union>(s - {k}) = {a..b}"
+    unfolding s(6)[symmetric] by auto
+  then have  "{ka \<in> s - {k}. content ka \<noteq> 0} division_of {a..b}"
+    apply -
+    apply (rule assm(2)[rule_format,OF card refl])
+    apply (rule division_ofI)
+    defer
+    apply (rule_tac[1-4] s)
+    using assm(1)
+    apply auto
+    done
+  moreover
+  have "{ka \<in> s - {k}. content ka \<noteq> 0} = {k \<in> s. content k \<noteq> 0}"
+    using k by auto
+  ultimately show ?thesis by auto
+qed
+
 
 subsection {* Integrability on subintervals. *}
 
-lemma operative_integrable: fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach" shows
-  "operative op \<and> (\<lambda>i. f integrable_on i)"
-  unfolding operative_def neutral_and apply safe apply(subst integrable_on_def)
-  unfolding has_integral_null_eq apply(rule,rule refl) apply(rule,assumption,assumption)+
-  unfolding integrable_on_def by(auto intro!: has_integral_split)
-
-lemma integrable_subinterval: fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
-  assumes "f integrable_on {a..b}" "{c..d} \<subseteq> {a..b}" shows "f integrable_on {c..d}"
-  apply(cases "{c..d} = {}") defer apply(rule partial_division_extend_1[OF assms(2)],assumption)
-  using operative_division_and[OF operative_integrable,symmetric,of _ _ _ f] assms(1) by auto
+lemma operative_integrable:
+  fixes f :: "'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
+  shows "operative op \<and> (\<lambda>i. f integrable_on i)"
+  unfolding operative_def neutral_and
+  apply safe
+  apply (subst integrable_on_def)
+  unfolding has_integral_null_eq
+  apply (rule, rule refl)
+  apply (rule, assumption, assumption)+
+  unfolding integrable_on_def
+  by (auto intro!: has_integral_split)
+
+lemma integrable_subinterval:
+  fixes f :: "'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
+  assumes "f integrable_on {a..b}"
+    and "{c..d} \<subseteq> {a..b}"
+  shows "f integrable_on {c..d}"
+  apply (cases "{c..d} = {}")
+  defer
+  apply (rule partial_division_extend_1[OF assms(2)],assumption)
+  using operative_division_and[OF operative_integrable,symmetric,of _ _ _ f] assms(1)
+  apply auto
+  done
+
 
 subsection {* Combining adjacent intervals in 1 dimension. *}
 
-lemma has_integral_combine: assumes "(a::real) \<le> c" "c \<le> b"
-  "(f has_integral i) {a..c}" "(f has_integral (j::'a::banach)) {c..b}"
+lemma has_integral_combine:
+  fixes a b c :: real
+  assumes "a \<le> c"
+    and "c \<le> b"
+    and "(f has_integral i) {a..c}"
+    and "(f has_integral (j::'a::banach)) {c..b}"
   shows "(f has_integral (i + j)) {a..b}"
-proof- note operative_integral[of f, unfolded operative_1_le[OF monoidal_lifted[OF monoidal_monoid]]]
-  note conjunctD2[OF this,rule_format] note * = this(2)[OF conjI[OF assms(1-2)],unfolded if_P[OF assms(3)]]
-  hence "f integrable_on {a..b}" apply- apply(rule ccontr) apply(subst(asm) if_P) defer
-    apply(subst(asm) if_P) using assms(3-) by auto
-  with * show ?thesis apply-apply(subst(asm) if_P) defer apply(subst(asm) if_P) defer apply(subst(asm) if_P)
-    unfolding lifted.simps using assms(3-) by(auto simp add: integrable_on_def integral_unique) qed
-
-lemma integral_combine: fixes f::"real \<Rightarrow> 'a::banach"
-  assumes "a \<le> c" "c \<le> b" "f integrable_on ({a..b})"
-  shows "integral {a..c} f + integral {c..b} f = integral({a..b}) f"
-  apply(rule integral_unique[symmetric]) apply(rule has_integral_combine[OF assms(1-2)])
-  apply(rule_tac[!] integrable_integral integrable_subinterval[OF assms(3)])+ using assms(1-2) by auto
-
-lemma integrable_combine: fixes f::"real \<Rightarrow> 'a::banach"
-  assumes "a \<le> c" "c \<le> b" "f integrable_on {a..c}" "f integrable_on {c..b}"
-  shows "f integrable_on {a..b}" using assms unfolding integrable_on_def by(fastforce intro!:has_integral_combine)
+proof -
+  note operative_integral[of f, unfolded operative_1_le[OF monoidal_lifted[OF monoidal_monoid]]]
+  note conjunctD2[OF this,rule_format]
+  note * = this(2)[OF conjI[OF assms(1-2)],unfolded if_P[OF assms(3)]]
+  then have "f integrable_on {a..b}"
+    apply -
+    apply (rule ccontr)
+    apply (subst(asm) if_P)
+    defer
+    apply (subst(asm) if_P)
+    using assms(3-)
+    apply auto
+    done
+  with *
+  show ?thesis
+    apply -
+    apply (subst(asm) if_P)
+    defer
+    apply (subst(asm) if_P)
+    defer
+    apply (subst(asm) if_P)
+    unfolding lifted.simps
+    using assms(3-)
+    apply (auto simp add: integrable_on_def integral_unique)
+    done
+qed
+
+lemma integral_combine:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "a \<le> c"
+    and "c \<le> b"
+    and "f integrable_on {a..b}"
+  shows "integral {a..c} f + integral {c..b} f = integral {a..b} f"
+  apply (rule integral_unique[symmetric])
+  apply (rule has_integral_combine[OF assms(1-2)])
+  apply (rule_tac[!] integrable_integral integrable_subinterval[OF assms(3)])+
+  using assms(1-2)
+  apply auto
+  done
+
+lemma integrable_combine:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "a \<le> c"
+    and "c \<le> b"
+    and "f integrable_on {a..c}"
+    and "f integrable_on {c..b}"
+  shows "f integrable_on {a..b}"
+  using assms
+  unfolding integrable_on_def
+  by (fastforce intro!:has_integral_combine)
+
 
 subsection {* Reduce integrability to "local" integrability. *}
 
-lemma integrable_on_little_subintervals: fixes f::"'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
-  assumes "\<forall>x\<in>{a..b}. \<exists>d>0. \<forall>u v. x \<in> {u..v} \<and> {u..v} \<subseteq> ball x d \<and> {u..v} \<subseteq> {a..b} \<longrightarrow> f integrable_on {u..v}"
+lemma integrable_on_little_subintervals:
+  fixes f :: "'b::ordered_euclidean_space \<Rightarrow> 'a::banach"
+  assumes "\<forall>x\<in>{a..b}. \<exists>d>0. \<forall>u v. x \<in> {u..v} \<and> {u..v} \<subseteq> ball x d \<and> {u..v} \<subseteq> {a..b} \<longrightarrow>
+    f integrable_on {u..v}"
   shows "f integrable_on {a..b}"
-proof- have "\<forall>x. \<exists>d. x\<in>{a..b} \<longrightarrow> d>0 \<and> (\<forall>u v. x \<in> {u..v} \<and> {u..v} \<subseteq> ball x d \<and> {u..v} \<subseteq> {a..b} \<longrightarrow> f integrable_on {u..v})"
-    using assms by auto note this[unfolded gauge_existence_lemma] from choice[OF this] guess d .. note d=this[rule_format]
-  guess p apply(rule fine_division_exists[OF gauge_ball_dependent,of d a b]) using d by auto note p=this(1-2)
-  note division_of_tagged_division[OF this(1)] note * = operative_division_and[OF operative_integrable,OF this,symmetric,of f]
-  show ?thesis unfolding * apply safe unfolding snd_conv
-  proof- fix x k assume "(x,k) \<in> p" note tagged_division_ofD(2-4)[OF p(1) this] fineD[OF p(2) this]
-    thus "f integrable_on k" apply safe apply(rule d[THEN conjunct2,rule_format,of x]) by auto qed qed
+proof -
+  have "\<forall>x. \<exists>d. x\<in>{a..b} \<longrightarrow> d>0 \<and> (\<forall>u v. x \<in> {u..v} \<and> {u..v} \<subseteq> ball x d \<and> {u..v} \<subseteq> {a..b} \<longrightarrow>
+    f integrable_on {u..v})"
+    using assms by auto
+  note this[unfolded gauge_existence_lemma]
+  from choice[OF this] guess d .. note d=this[rule_format]
+  guess p
+    apply (rule fine_division_exists[OF gauge_ball_dependent,of d a b])
+    using d
+    by auto
+  note p=this(1-2)
+  note division_of_tagged_division[OF this(1)]
+  note * = operative_division_and[OF operative_integrable,OF this,symmetric,of f]
+  show ?thesis
+    unfolding *
+    apply safe
+    unfolding snd_conv
+  proof -
+    fix x k
+    assume "(x, k) \<in> p"
+    note tagged_division_ofD(2-4)[OF p(1) this] fineD[OF p(2) this]
+    then show "f integrable_on k"
+      apply safe
+      apply (rule d[THEN conjunct2,rule_format,of x])
+      apply auto
+      done
+  qed
+qed
+
 
 subsection {* Second FCT or existence of antiderivative. *}
 
-lemma integrable_const[intro]:"(\<lambda>x. c) integrable_on {a..b}"
-  unfolding integrable_on_def by(rule,rule has_integral_const)
-
-lemma integral_has_vector_derivative: fixes f::"real \<Rightarrow> 'a::banach"
-  assumes "continuous_on {a..b} f" "x \<in> {a..b}"
+lemma integrable_const[intro]: "(\<lambda>x. c) integrable_on {a..b}"
+  unfolding integrable_on_def
+  apply rule
+  apply (rule has_integral_const)
+  done
+
+lemma integral_has_vector_derivative:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "continuous_on {a..b} f"
+    and "x \<in> {a..b}"
   shows "((\<lambda>u. integral {a..u} f) has_vector_derivative f(x)) (at x within {a..b})"
   unfolding has_vector_derivative_def has_derivative_within_alt
-apply safe apply(rule bounded_linear_scaleR_left)
-proof- fix e::real assume e:"e>0"
+  apply safe
+  apply (rule bounded_linear_scaleR_left)
+proof -
+  fix e :: real
+  assume e: "e > 0"
   note compact_uniformly_continuous[OF assms(1) compact_interval,unfolded uniformly_continuous_on_def]
-  from this[rule_format,OF e] guess d apply-by(erule conjE exE)+ note d=this[rule_format]
+  from this[rule_format,OF e] guess d by (elim conjE exE) note d=this[rule_format]
   let ?I = "\<lambda>a b. integral {a..b} f"
-  show "\<exists>d>0. \<forall>y\<in>{a..b}. norm (y - x) < d \<longrightarrow> norm (?I a y - ?I a x - (y - x) *\<^sub>R f x) \<le> e * norm (y - x)"
-  proof(rule,rule,rule d,safe) case goal1 show ?case proof(cases "y < x")
-      case False have "f integrable_on {a..y}" apply(rule integrable_subinterval,rule integrable_continuous)
-        apply(rule assms)  unfolding not_less using assms(2) goal1 by auto
-      hence *:"?I a y - ?I a x = ?I x y" unfolding algebra_simps apply(subst eq_commute) apply(rule integral_combine)
-        using False unfolding not_less using assms(2) goal1 by auto
-      have **:"norm (y - x) = content {x..y}" apply(subst content_real) using False unfolding not_less by auto
-      show ?thesis unfolding ** apply(rule has_integral_bound[where f="(\<lambda>u. f u - f x)"]) unfolding * unfolding o_def
-        defer apply(rule has_integral_sub) apply(rule integrable_integral)
-        apply(rule integrable_subinterval,rule integrable_continuous) apply(rule assms)+
-      proof- show "{x..y} \<subseteq> {a..b}" using goal1 assms(2) by auto
-        have *:"y - x = norm(y - x)" using False by auto
-        show "((\<lambda>xa. f x) has_integral (y - x) *\<^sub>R f x) {x.. y}" apply(subst *) unfolding ** by auto
-        show "\<forall>xa\<in>{x..y}. norm (f xa - f x) \<le> e" apply safe apply(rule less_imp_le)
-          apply(rule d(2)[unfolded dist_norm]) using assms(2) using goal1 by auto
-      qed(insert e,auto)
-    next case True have "f integrable_on {a..x}" apply(rule integrable_subinterval,rule integrable_continuous)
-        apply(rule assms)+  unfolding not_less using assms(2) goal1 by auto
-      hence *:"?I a x - ?I a y = ?I y x" unfolding algebra_simps apply(subst eq_commute) apply(rule integral_combine)
-        using True using assms(2) goal1 by auto
-      have **:"norm (y - x) = content {y..x}" apply(subst content_real) using True unfolding not_less by auto
-      have ***:"\<And>fy fx c::'a. fx - fy - (y - x) *\<^sub>R c = -(fy - fx - (x - y) *\<^sub>R c)" unfolding scaleR_left.diff by auto
-      show ?thesis apply(subst ***) unfolding norm_minus_cancel **
-        apply(rule has_integral_bound[where f="(\<lambda>u. f u - f x)"]) unfolding * unfolding o_def
-        defer apply(rule has_integral_sub) apply(subst minus_minus[symmetric]) unfolding minus_minus
-        apply(rule integrable_integral) apply(rule integrable_subinterval,rule integrable_continuous) apply(rule assms)+
-      proof- show "{y..x} \<subseteq> {a..b}" using goal1 assms(2) by auto
-        have *:"x - y = norm(y - x)" using True by auto
-        show "((\<lambda>xa. f x) has_integral (x - y) *\<^sub>R f x) {y..x}" apply(subst *) unfolding ** by auto
-        show "\<forall>xa\<in>{y..x}. norm (f xa - f x) \<le> e" apply safe apply(rule less_imp_le)
-          apply(rule d(2)[unfolded dist_norm]) using assms(2) using goal1 by auto
-      qed(insert e,auto) qed qed qed
-
-lemma antiderivative_continuous: assumes "continuous_on {a..b::real} f"
-  obtains g where "\<forall>x\<in> {a..b}. (g has_vector_derivative (f(x)::_::banach)) (at x within {a..b})"
-  apply(rule that,rule) using integral_has_vector_derivative[OF assms] by auto
+  show "\<exists>d>0. \<forall>y\<in>{a..b}. norm (y - x) < d \<longrightarrow>
+    norm (?I a y - ?I a x - (y - x) *\<^sub>R f x) \<le> e * norm (y - x)"
+  proof (rule, rule, rule d, safe)
+    case goal1
+    show ?case
+    proof (cases "y < x")
+      case False
+      have "f integrable_on {a..y}"
+        apply (rule integrable_subinterval,rule integrable_continuous)
+        apply (rule assms)
+        unfolding not_less
+        using assms(2) goal1
+        apply auto
+        done
+      then have *: "?I a y - ?I a x = ?I x y"
+        unfolding algebra_simps
+        apply (subst eq_commute)
+        apply (rule integral_combine)
+        using False
+        unfolding not_less
+        using assms(2) goal1
+        apply auto
+        done
+      have **: "norm (y - x) = content {x..y}"
+        apply (subst content_real)
+        using False
+        unfolding not_less
+        apply auto
+        done
+      show ?thesis
+        unfolding **
+        apply (rule has_integral_bound[where f="(\<lambda>u. f u - f x)"])
+        unfolding *
+        unfolding o_def
+        defer
+        apply (rule has_integral_sub)
+        apply (rule integrable_integral)
+        apply (rule integrable_subinterval)
+        apply (rule integrable_continuous)
+        apply (rule assms)+
+      proof -
+        show "{x..y} \<subseteq> {a..b}"
+          using goal1 assms(2) by auto
+        have *: "y - x = norm (y - x)"
+          using False by auto
+        show "((\<lambda>xa. f x) has_integral (y - x) *\<^sub>R f x) {x.. y}"
+          apply (subst *)
+          unfolding **
+          apply auto
+          done
+        show "\<forall>xa\<in>{x..y}. norm (f xa - f x) \<le> e"
+          apply safe
+          apply (rule less_imp_le)
+          apply (rule d(2)[unfolded dist_norm])
+          using assms(2)
+          using goal1
+          apply auto
+          done
+      qed (insert e, auto)
+    next
+      case True
+      have "f integrable_on {a..x}"
+        apply (rule integrable_subinterval,rule integrable_continuous)
+        apply (rule assms)+
+        unfolding not_less
+        using assms(2) goal1
+        apply auto
+        done
+      then have *: "?I a x - ?I a y = ?I y x"
+        unfolding algebra_simps
+        apply (subst eq_commute)
+        apply (rule integral_combine)
+        using True using assms(2) goal1
+        apply auto
+        done
+      have **: "norm (y - x) = content {y..x}"
+        apply (subst content_real)
+        using True
+        unfolding not_less
+        apply auto
+        done
+      have ***: "\<And>fy fx c::'a. fx - fy - (y - x) *\<^sub>R c = -(fy - fx - (x - y) *\<^sub>R c)"
+        unfolding scaleR_left.diff by auto
+      show ?thesis
+        apply (subst ***)
+        unfolding norm_minus_cancel **
+        apply (rule has_integral_bound[where f="(\<lambda>u. f u - f x)"])
+        unfolding *
+        unfolding o_def
+        defer
+        apply (rule has_integral_sub)
+        apply (subst minus_minus[symmetric])
+        unfolding minus_minus
+        apply (rule integrable_integral)
+        apply (rule integrable_subinterval,rule integrable_continuous)
+        apply (rule assms)+
+      proof -
+        show "{y..x} \<subseteq> {a..b}"
+          using goal1 assms(2) by auto
+        have *: "x - y = norm (y - x)"
+          using True by auto
+        show "((\<lambda>xa. f x) has_integral (x - y) *\<^sub>R f x) {y..x}"
+          apply (subst *)
+          unfolding **
+          apply auto
+          done
+        show "\<forall>xa\<in>{y..x}. norm (f xa - f x) \<le> e"
+          apply safe
+          apply (rule less_imp_le)
+          apply (rule d(2)[unfolded dist_norm])
+          using assms(2)
+          using goal1
+          apply auto
+          done
+      qed (insert e, auto)
+    qed
+  qed
+qed
+
+lemma antiderivative_continuous:
+  fixes q b :: real
+  assumes "continuous_on {a..b} f"
+  obtains g where "\<forall>x\<in> {a..b}. (g has_vector_derivative (f x::_::banach)) (at x within {a..b})"
+  apply (rule that)
+  apply rule
+  using integral_has_vector_derivative[OF assms]
+  apply auto
+  done
+
 
 subsection {* Combined fundamental theorem of calculus. *}
 
-lemma antiderivative_integral_continuous: fixes f::"real \<Rightarrow> 'a::banach" assumes "continuous_on {a..b} f"
+lemma antiderivative_integral_continuous:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "continuous_on {a..b} f"
   obtains g where "\<forall>u\<in>{a..b}. \<forall>v \<in> {a..b}. u \<le> v \<longrightarrow> (f has_integral (g v - g u)) {u..v}"
-proof- from antiderivative_continuous[OF assms] guess g . note g=this
-  show ?thesis apply(rule that[of g])
-  proof safe case goal1 have "\<forall>x\<in>{u..v}. (g has_vector_derivative f x) (at x within {u..v})"
-      apply(rule,rule has_vector_derivative_within_subset) apply(rule g[rule_format]) using goal1(1-2) by auto
-    thus ?case using fundamental_theorem_of_calculus[OF goal1(3),of "g" "f"] by auto qed qed
+proof -
+  from antiderivative_continuous[OF assms] guess g . note g=this
+  show ?thesis
+    apply (rule that[of g])
+  proof safe
+    case goal1
+    have "\<forall>x\<in>{u..v}. (g has_vector_derivative f x) (at x within {u..v})"
+      apply rule
+      apply (rule has_vector_derivative_within_subset)
+      apply (rule g[rule_format])
+      using goal1(1-2)
+      apply auto
+      done
+    then show ?case
+      using fundamental_theorem_of_calculus[OF goal1(3),of "g" "f"] by auto
+  qed
+qed
+
 
 subsection {* General "twiddling" for interval-to-interval function image. *}
 
 lemma has_integral_twiddle:
-  assumes "0 < r" "\<forall>x. h(g x) = x" "\<forall>x. g(h x) = x" "\<forall>x. continuous (at x) g"
-  "\<forall>u v. \<exists>w z. g ` {u..v} = {w..z}"
-  "\<forall>u v. \<exists>w z. h ` {u..v} = {w..z}"
-  "\<forall>u v. content(g ` {u..v}) = r * content {u..v}"
-  "(f has_integral i) {a..b}"
+  assumes "0 < r"
+    and "\<forall>x. h(g x) = x"
+    and "\<forall>x. g(h x) = x"
+    and "\<forall>x. continuous (at x) g"
+    and "\<forall>u v. \<exists>w z. g ` {u..v} = {w..z}"
+    and "\<forall>u v. \<exists>w z. h ` {u..v} = {w..z}"
+    and "\<forall>u v. content(g ` {u..v}) = r * content {u..v}"
+    and "(f has_integral i) {a..b}"
   shows "((\<lambda>x. f(g x)) has_integral (1 / r) *\<^sub>R i) (h ` {a..b})"
-proof- { presume *:"{a..b} \<noteq> {} \<Longrightarrow> ?thesis"
-    show ?thesis apply cases defer apply(rule *,assumption)
-    proof- case goal1 thus ?thesis unfolding goal1 assms(8)[unfolded goal1 has_integral_empty_eq] by auto qed }
-  assume "{a..b} \<noteq> {}" from assms(6)[rule_format,of a b] guess w z apply-by(erule exE)+ note wz=this
-  have inj:"inj g" "inj h" unfolding inj_on_def apply safe apply(rule_tac[!] ccontr)
-    using assms(2) apply(erule_tac x=x in allE) using assms(2) apply(erule_tac x=y in allE) defer
-    using assms(3) apply(erule_tac x=x in allE) using assms(3) apply(erule_tac x=y in allE) by auto
-  show ?thesis unfolding has_integral_def has_integral_compact_interval_def apply(subst if_P) apply(rule,rule,rule wz)
-  proof safe fix e::real assume e:"e>0" hence "e * r > 0" using assms(1) by(rule mult_pos_pos)
-    from assms(8)[unfolded has_integral,rule_format,OF this] guess d apply-by(erule exE conjE)+ note d=this[rule_format]
-    def d' \<equiv> "\<lambda>x. {y. g y \<in> d (g x)}" have d':"\<And>x. d' x = {y. g y \<in> (d (g x))}" unfolding d'_def ..
+proof -
+  {
+    presume *: "{a..b} \<noteq> {} \<Longrightarrow> ?thesis"
+    show ?thesis
+      apply cases
+      defer
+      apply (rule *)
+      apply assumption
+    proof -
+      case goal1
+      then show ?thesis
+        unfolding goal1 assms(8)[unfolded goal1 has_integral_empty_eq] by auto qed
+  }
+  assume "{a..b} \<noteq> {}"
+  from assms(6)[rule_format,of a b] guess w z by (elim exE) note wz=this
+  have inj: "inj g" "inj h"
+    unfolding inj_on_def
+    apply safe
+    apply(rule_tac[!] ccontr)
+    using assms(2)
+    apply(erule_tac x=x in allE)
+    using assms(2)
+    apply(erule_tac x=y in allE)
+    defer
+    using assms(3)
+    apply (erule_tac x=x in allE)
+    using assms(3)
+    apply(erule_tac x=y in allE)
+    apply auto
+    done
+  show ?thesis
+    unfolding has_integral_def has_integral_compact_interval_def
+    apply (subst if_P)
+    apply rule
+    apply rule
+    apply (rule wz)
+  proof safe
+    fix e :: real
+    assume e: "e > 0"
+    then have "e * r > 0"
+      using assms(1) by (rule mult_pos_pos)
+    from assms(8)[unfolded has_integral,rule_format,OF this] guess d by (elim exE conjE) note d=this[rule_format]
+    def d' \<equiv> "\<lambda>x. {y. g y \<in> d (g x)}"
+    have d': "\<And>x. d' x = {y. g y \<in> (d (g x))}"
+      unfolding d'_def ..
     show "\<exists>d. gauge d \<and> (\<forall>p. p tagged_division_of h ` {a..b} \<and> d fine p \<longrightarrow> norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - (1 / r) *\<^sub>R i) < e)"
-    proof(rule_tac x=d' in exI,safe) show "gauge d'" using d(1) unfolding gauge_def d' using continuous_open_preimage_univ[OF assms(4)] by auto
-      fix p assume as:"p tagged_division_of h ` {a..b}" "d' fine p" note p = tagged_division_ofD[OF as(1)]
-      have "(\<lambda>(x, k). (g x, g ` k)) ` p tagged_division_of {a..b} \<and> d fine (\<lambda>(x, k). (g x, g ` k)) ` p" unfolding tagged_division_of
-      proof safe show "finite ((\<lambda>(x, k). (g x, g ` k)) ` p)" using as by auto
-        show "d fine (\<lambda>(x, k). (g x, g ` k)) ` p" using as(2) unfolding fine_def d' by auto
-        fix x k assume xk[intro]:"(x,k) \<in> p" show "g x \<in> g ` k" using p(2)[OF xk] by auto
-        show "\<exists>u v. g ` k = {u..v}" using p(4)[OF xk] using assms(5-6) by auto
-        { fix y assume "y \<in> k" thus "g y \<in> {a..b}" "g y \<in> {a..b}" using p(3)[OF xk,unfolded subset_eq,rule_format,of "h (g y)"]
-            using assms(2)[rule_format,of y] unfolding inj_image_mem_iff[OF inj(2)] by auto }
-        fix x' k' assume xk':"(x',k') \<in> p" fix z assume "z \<in> interior (g ` k)" "z \<in> interior (g ` k')"
-        hence *:"interior (g ` k) \<inter> interior (g ` k') \<noteq> {}" by auto
-        have same:"(x, k) = (x', k')" apply-apply(rule ccontr,drule p(5)[OF xk xk'])
-        proof- assume as:"interior k \<inter> interior k' = {}" from nonempty_witness[OF *] guess z .
-          hence "z \<in> g ` (interior k \<inter> interior k')" using interior_image_subset[OF assms(4) inj(1)]
-            unfolding image_Int[OF inj(1)] by auto thus False using as by blast
-        qed thus "g x = g x'" by auto
-        { fix z assume "z \<in> k"  thus  "g z \<in> g ` k'" using same by auto }
-        { fix z assume "z \<in> k'" thus  "g z \<in> g ` k"  using same by auto }
-      next fix x assume "x \<in> {a..b}" hence "h x \<in>  \<Union>{k. \<exists>x. (x, k) \<in> p}" using p(6) by auto
-        then guess X unfolding Union_iff .. note X=this from this(1) guess y unfolding mem_Collect_eq ..
-        thus "x \<in> \<Union>{k. \<exists>x. (x, k) \<in> (\<lambda>(x, k). (g x, g ` k)) ` p}" apply-
-          apply(rule_tac X="g ` X" in UnionI) defer apply(rule_tac x="h x" in image_eqI)
-          using X(2) assms(3)[rule_format,of x] by auto
-      qed note ** = d(2)[OF this] have *:"inj_on (\<lambda>(x, k). (g x, g ` k)) p" using inj(1) unfolding inj_on_def by fastforce
-       have "(\<Sum>(x, k)\<in>(\<lambda>(x, k). (g x, g ` k)) ` p. content k *\<^sub>R f x) - i = r *\<^sub>R (\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - i" (is "?l = _") unfolding algebra_simps add_left_cancel
-        unfolding setsum_reindex[OF *] apply(subst scaleR_right.setsum) defer apply(rule setsum_cong2) unfolding o_def split_paired_all split_conv
-        apply(drule p(4)) apply safe unfolding assms(7)[rule_format] using p by auto
-      also have "... = r *\<^sub>R ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - (1 / r) *\<^sub>R i)" (is "_ = ?r") unfolding scaleR_diff_right scaleR_scaleR
-        using assms(1) by auto finally have *:"?l = ?r" .
-      show "norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - (1 / r) *\<^sub>R i) < e" using ** unfolding * unfolding norm_scaleR
-        using assms(1) by(auto simp add:field_simps) qed qed qed
+    proof (rule_tac x=d' in exI, safe)
+      show "gauge d'"
+        using d(1)
+        unfolding gauge_def d'
+        using continuous_open_preimage_univ[OF assms(4)]
+        by auto
+      fix p
+      assume as: "p tagged_division_of h ` {a..b}" "d' fine p"
+      note p = tagged_division_ofD[OF as(1)]
+      have "(\<lambda>(x, k). (g x, g ` k)) ` p tagged_division_of {a..b} \<and> d fine (\<lambda>(x, k). (g x, g ` k)) ` p"
+        unfolding tagged_division_of
+      proof safe
+        show "finite ((\<lambda>(x, k). (g x, g ` k)) ` p)"
+          using as by auto
+        show "d fine (\<lambda>(x, k). (g x, g ` k)) ` p"
+          using as(2) unfolding fine_def d' by auto
+        fix x k
+        assume xk[intro]: "(x, k) \<in> p"
+        show "g x \<in> g ` k"
+          using p(2)[OF xk] by auto
+        show "\<exists>u v. g ` k = {u..v}"
+          using p(4)[OF xk] using assms(5-6) by auto
+        {
+          fix y
+          assume "y \<in> k"
+          then show "g y \<in> {a..b}" "g y \<in> {a..b}"
+            using p(3)[OF xk,unfolded subset_eq,rule_format,of "h (g y)"]
+            using assms(2)[rule_format,of y]
+            unfolding inj_image_mem_iff[OF inj(2)]
+            by auto
+        }
+        fix x' k'
+        assume xk': "(x', k') \<in> p"
+        fix z
+        assume "z \<in> interior (g ` k)" and "z \<in> interior (g ` k')"
+        then have *: "interior (g ` k) \<inter> interior (g ` k') \<noteq> {}"
+          by auto
+        have same: "(x, k) = (x', k')"
+          apply -
+          apply (rule ccontr,drule p(5)[OF xk xk'])
+        proof -
+          assume as: "interior k \<inter> interior k' = {}"
+          from nonempty_witness[OF *] guess z .
+          then have "z \<in> g ` (interior k \<inter> interior k')"
+            using interior_image_subset[OF assms(4) inj(1)]
+            unfolding image_Int[OF inj(1)]
+            by auto
+          then show False
+            using as by blast
+        qed
+        then show "g x = g x'"
+          by auto
+        {
+          fix z
+          assume "z \<in> k"
+          then show "g z \<in> g ` k'"
+            using same by auto
+        }
+        {
+          fix z
+          assume "z \<in> k'"
+          then show "g z \<in> g ` k"
+            using same by auto
+        }
+      next
+        fix x
+        assume "x \<in> {a..b}"
+        then have "h x \<in>  \<Union>{k. \<exists>x. (x, k) \<in> p}"
+          using p(6) by auto
+        then guess X unfolding Union_iff .. note X=this
+        from this(1) guess y unfolding mem_Collect_eq ..
+        then show "x \<in> \<Union>{k. \<exists>x. (x, k) \<in> (\<lambda>(x, k). (g x, g ` k)) ` p}"
+          apply -
+          apply (rule_tac X="g ` X" in UnionI)
+          defer
+          apply (rule_tac x="h x" in image_eqI)
+          using X(2) assms(3)[rule_format,of x]
+          apply auto
+          done
+      qed
+        note ** = d(2)[OF this]
+        have *: "inj_on (\<lambda>(x, k). (g x, g ` k)) p"
+          using inj(1) unfolding inj_on_def by fastforce
+        have "(\<Sum>(x, k)\<in>(\<lambda>(x, k). (g x, g ` k)) ` p. content k *\<^sub>R f x) - i = r *\<^sub>R (\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - i" (is "?l = _")
+          unfolding algebra_simps add_left_cancel
+          unfolding setsum_reindex[OF *]
+          apply (subst scaleR_right.setsum)
+          defer
+          apply (rule setsum_cong2)
+          unfolding o_def split_paired_all split_conv
+          apply (drule p(4))
+          apply safe
+          unfolding assms(7)[rule_format]
+          using p
+          apply auto
+          done
+      also have "\<dots> = r *\<^sub>R ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - (1 / r) *\<^sub>R i)" (is "_ = ?r")
+        unfolding scaleR_diff_right scaleR_scaleR
+        using assms(1)
+        by auto
+      finally have *: "?l = ?r" .
+      show "norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f (g x)) - (1 / r) *\<^sub>R i) < e"
+        using **
+        unfolding *
+        unfolding norm_scaleR
+        using assms(1)
+        by (auto simp add:field_simps)
+    qed
+  qed
+qed
+
 
 subsection {* Special case of a basic affine transformation. *}
 
-lemma interval_image_affinity_interval: shows "\<exists>u v. (\<lambda>x. m *\<^sub>R (x::'a::ordered_euclidean_space) + c) ` {a..b} = {u..v}"
-  unfolding image_affinity_interval by auto
-
-lemma setprod_cong2: assumes "\<And>x. x \<in> A \<Longrightarrow> f x = g x" shows "setprod f A = setprod g A"
-  apply(rule setprod_cong) using assms by auto
+lemma interval_image_affinity_interval:
+  "\<exists>u v. (\<lambda>x. m *\<^sub>R (x::'a::ordered_euclidean_space) + c) ` {a..b} = {u..v}"
+  unfolding image_affinity_interval
+  by auto
+
+lemma setprod_cong2:
+  assumes "\<And>x. x \<in> A \<Longrightarrow> f x = g x"
+  shows "setprod f A = setprod g A"
+  apply (rule setprod_cong)
+  using assms
+  apply auto
+  done
 
 lemma content_image_affinity_interval:
- "content((\<lambda>x::'a::ordered_euclidean_space. m *\<^sub>R x + c) ` {a..b}) = (abs m) ^ DIM('a) * content {a..b}" (is "?l = ?r")
-proof- { presume *:"{a..b}\<noteq>{} \<Longrightarrow> ?thesis" show ?thesis apply(cases,rule *,assumption)
-      unfolding not_not using content_empty by auto }
-  assume as: "{a..b}\<noteq>{}"
+  "content((\<lambda>x::'a::ordered_euclidean_space. m *\<^sub>R x + c) ` {a..b}) =
+    abs m ^ DIM('a) * content {a..b}" (is "?l = ?r")
+proof -
+  {
+    presume *: "{a..b} \<noteq> {} \<Longrightarrow> ?thesis"
+    show ?thesis
+      apply cases
+      apply (rule *)
+      apply assumption
+      unfolding not_not
+      using content_empty
+      apply auto
+      done
+  }
+  assume as: "{a..b} \<noteq> {}"
   show ?thesis
   proof (cases "m \<ge> 0")
     case True
@@ -6791,7 +7462,7 @@
       by (simp add: inner_simps field_simps)
     ultimately show ?thesis
       by (simp add: image_affinity_interval True content_closed_interval'
-                    setprod_timesf setprod_constant inner_diff_left)
+        setprod_timesf setprod_constant inner_diff_left)
   next
     case False
     with as have "{m *\<^sub>R b + c..m *\<^sub>R a + c} \<noteq> {}"
@@ -6804,20 +7475,43 @@
       by (simp add: inner_simps field_simps)
     ultimately show ?thesis using False
       by (simp add: image_affinity_interval content_closed_interval'
-                    setprod_timesf[symmetric] setprod_constant[symmetric] inner_diff_left)
+        setprod_timesf[symmetric] setprod_constant[symmetric] inner_diff_left)
   qed
 qed
 
-lemma has_integral_affinity: fixes a::"'a::ordered_euclidean_space" assumes "(f has_integral i) {a..b}" "m \<noteq> 0"
+lemma has_integral_affinity:
+  fixes a :: "'a::ordered_euclidean_space"
+  assumes "(f has_integral i) {a..b}"
+    and "m \<noteq> 0"
   shows "((\<lambda>x. f(m *\<^sub>R x + c)) has_integral ((1 / (abs(m) ^ DIM('a))) *\<^sub>R i)) ((\<lambda>x. (1 / m) *\<^sub>R x + -((1 / m) *\<^sub>R c)) ` {a..b})"
-  apply(rule has_integral_twiddle,safe) apply(rule zero_less_power) unfolding euclidean_eq_iff[where 'a='a]
+  apply (rule has_integral_twiddle)
+  apply safe
+  apply (rule zero_less_power)
+  unfolding euclidean_eq_iff[where 'a='a]
   unfolding scaleR_right_distrib inner_simps scaleR_scaleR
-  defer apply(insert assms(2), simp add:field_simps) apply(insert assms(2), simp add:field_simps)
-  apply(rule continuous_intros)+ apply(rule interval_image_affinity_interval)+ apply(rule content_image_affinity_interval) using assms by auto
-
-lemma integrable_affinity: assumes "f integrable_on {a..b}" "m \<noteq> 0"
+  defer
+  apply (insert assms(2))
+  apply (simp add: field_simps)
+  apply (insert assms(2))
+  apply (simp add: field_simps)
+  apply (rule continuous_intros)+
+  apply (rule interval_image_affinity_interval)+
+  apply (rule content_image_affinity_interval)
+  using assms
+  apply auto
+  done
+
+lemma integrable_affinity:
+  assumes "f integrable_on {a..b}"
+    and "m \<noteq> 0"
   shows "(\<lambda>x. f(m *\<^sub>R x + c)) integrable_on ((\<lambda>x. (1 / m) *\<^sub>R x + -((1/m) *\<^sub>R c)) ` {a..b})"
-  using assms unfolding integrable_on_def apply safe apply(drule has_integral_affinity) by auto
+  using assms
+  unfolding integrable_on_def
+  apply safe
+  apply (drule has_integral_affinity)
+  apply auto
+  done
+
 
 subsection {* Special case of stretching coordinate axes separately. *}
 
@@ -6856,310 +7550,744 @@
 qed simp
 
 lemma interval_image_stretch_interval:
-    "\<exists>u v. (\<lambda>x. \<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k) ` {a..b::'a::ordered_euclidean_space} = {u..v::'a}"
+  "\<exists>u v. (\<lambda>x. \<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k) ` {a..b::'a::ordered_euclidean_space} = {u..v::'a}"
   unfolding image_stretch_interval by auto
 
 lemma content_image_stretch_interval:
-  "content((\<lambda>x::'a::ordered_euclidean_space. (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)::'a) ` {a..b}) = abs(setprod m Basis) * content({a..b})"
-proof(cases "{a..b} = {}") case True thus ?thesis
+  "content ((\<lambda>x::'a::ordered_euclidean_space. (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)::'a) ` {a..b}) =
+    abs (setprod m Basis) * content {a..b}"
+proof (cases "{a..b} = {}")
+  case True
+  then show ?thesis
     unfolding content_def image_is_empty image_stretch_interval if_P[OF True] by auto
-next case False hence "(\<lambda>x. (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)::'a) ` {a..b} \<noteq> {}" by auto
-  thus ?thesis using False unfolding content_def image_stretch_interval apply- unfolding interval_bounds' if_not_P
-    unfolding abs_setprod setprod_timesf[symmetric] apply(rule setprod_cong2) unfolding lessThan_iff
-  proof (simp only: inner_setsum_left_Basis)
-    fix i :: 'a assume i:"i\<in>Basis" have "(m i < 0 \<or> m i > 0) \<or> m i = 0" by auto
-    thus "max (m i * (a \<bullet> i)) (m i * (b \<bullet> i)) - min (m i * (a \<bullet> i)) (m i * (b \<bullet> i)) =
-        \<bar>m i\<bar> * (b \<bullet> i - a \<bullet> i)"
-      apply-apply(erule disjE)+ unfolding min_def max_def using False[unfolded interval_ne_empty,rule_format,of i] i
-      by(auto simp add:field_simps not_le mult_le_cancel_left_neg mult_le_cancel_left_pos) qed qed
-
-lemma has_integral_stretch: fixes f::"'a::ordered_euclidean_space => 'b::real_normed_vector"
-  assumes "(f has_integral i) {a..b}" "\<forall>k\<in>Basis. ~(m k = 0)"
+next
+  case False
+  then have "(\<lambda>x. (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)) ` {a..b} \<noteq> {}"
+    by auto
+  then show ?thesis
+    using False
+    unfolding content_def image_stretch_interval
+    apply -
+    unfolding interval_bounds' if_not_P
+    unfolding abs_setprod setprod_timesf[symmetric]
+    apply (rule setprod_cong2)
+    unfolding lessThan_iff
+    apply (simp only: inner_setsum_left_Basis)
+  proof -
+    fix i :: 'a
+    assume i: "i \<in> Basis"
+    have "(m i < 0 \<or> m i > 0) \<or> m i = 0"
+      by auto
+    then show "max (m i * (a \<bullet> i)) (m i * (b \<bullet> i)) - min (m i * (a \<bullet> i)) (m i * (b \<bullet> i)) =
+      \<bar>m i\<bar> * (b \<bullet> i - a \<bullet> i)"
+      apply -
+      apply (erule disjE)+
+      unfolding min_def max_def
+      using False[unfolded interval_ne_empty,rule_format,of i] i
+      apply (auto simp add:field_simps not_le mult_le_cancel_left_neg mult_le_cancel_left_pos)
+      done
+  qed
+qed
+
+lemma has_integral_stretch:
+  fixes f :: "'a::ordered_euclidean_space \<Rightarrow> 'b::real_normed_vector"
+  assumes "(f has_integral i) {a..b}"
+    and "\<forall>k\<in>Basis. m k \<noteq> 0"
   shows "((\<lambda>x. f (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)) has_integral
-             ((1/(abs(setprod m Basis))) *\<^sub>R i)) ((\<lambda>x. (\<Sum>k\<in>Basis. (1 / m k * (x\<bullet>k))*\<^sub>R k)) ` {a..b})"
-  apply(rule has_integral_twiddle[where f=f]) unfolding zero_less_abs_iff content_image_stretch_interval
-  unfolding image_stretch_interval empty_as_interval euclidean_eq_iff[where 'a='a] using assms
-proof- show "\<forall>y::'a. continuous (at y) (\<lambda>x. (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k))"
-   apply(rule,rule linear_continuous_at) unfolding linear_linear
-   unfolding linear_def inner_simps euclidean_eq_iff[where 'a='a] by(auto simp add:field_simps)
+    ((1/(abs(setprod m Basis))) *\<^sub>R i)) ((\<lambda>x. (\<Sum>k\<in>Basis. (1 / m k * (x\<bullet>k))*\<^sub>R k)) ` {a..b})"
+  apply (rule has_integral_twiddle[where f=f])
+  unfolding zero_less_abs_iff content_image_stretch_interval
+  unfolding image_stretch_interval empty_as_interval euclidean_eq_iff[where 'a='a]
+  using assms
+proof -
+  show "\<forall>y::'a. continuous (at y) (\<lambda>x. (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k))"
+    apply rule
+    apply (rule linear_continuous_at)
+    unfolding linear_linear
+    unfolding linear_iff inner_simps euclidean_eq_iff[where 'a='a]
+    apply (auto simp add: field_simps)
+    done
 qed auto
 
-lemma integrable_stretch:  fixes f::"'a::ordered_euclidean_space => 'b::real_normed_vector"
-  assumes "f integrable_on {a..b}" "\<forall>k\<in>Basis. ~(m k = 0)"
-  shows "(\<lambda>x::'a. f (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)) integrable_on ((\<lambda>x. \<Sum>k\<in>Basis. (1 / m k * (x\<bullet>k))*\<^sub>R k) ` {a..b})"
-  using assms unfolding integrable_on_def apply-apply(erule exE)
-  apply(drule has_integral_stretch,assumption) by auto
+lemma integrable_stretch:
+  fixes f :: "'a::ordered_euclidean_space \<Rightarrow> 'b::real_normed_vector"
+  assumes "f integrable_on {a..b}"
+    and "\<forall>k\<in>Basis. m k \<noteq> 0"
+  shows "(\<lambda>x::'a. f (\<Sum>k\<in>Basis. (m k * (x\<bullet>k))*\<^sub>R k)) integrable_on
+    ((\<lambda>x. \<Sum>k\<in>Basis. (1 / m k * (x\<bullet>k))*\<^sub>R k) ` {a..b})"
+  using assms
+  unfolding integrable_on_def
+  apply -
+  apply (erule exE)
+  apply (drule has_integral_stretch)
+  apply assumption
+  apply auto
+  done
+
 
 subsection {* even more special cases. *}
 
-lemma uminus_interval_vector[simp]:"uminus ` {a..b} = {-b .. -a::'a::ordered_euclidean_space}"
-  apply(rule set_eqI,rule) defer unfolding image_iff
-  apply(rule_tac x="-x" in bexI) by(auto simp add:minus_le_iff le_minus_iff eucl_le[where 'a='a])
-
-lemma has_integral_reflect_lemma[intro]: assumes "(f has_integral i) {a..b}"
-  shows "((\<lambda>x. f(-x)) has_integral i) {-b .. -a}"
-  using has_integral_affinity[OF assms, of "-1" 0] by auto
-
-lemma has_integral_reflect[simp]: "((\<lambda>x. f(-x)) has_integral i) {-b..-a} \<longleftrightarrow> (f has_integral i) ({a..b})"
-  apply rule apply(drule_tac[!] has_integral_reflect_lemma) by auto
+lemma uminus_interval_vector[simp]:
+  fixes a b :: "'a::ordered_euclidean_space"
+  shows "uminus ` {a..b} = {-b..-a}"
+  apply (rule set_eqI)
+  apply rule
+  defer
+  unfolding image_iff
+  apply (rule_tac x="-x" in bexI)
+  apply (auto simp add:minus_le_iff le_minus_iff eucl_le[where 'a='a])
+  done
+
+lemma has_integral_reflect_lemma[intro]:
+  assumes "(f has_integral i) {a..b}"
+  shows "((\<lambda>x. f(-x)) has_integral i) {-b..-a}"
+  using has_integral_affinity[OF assms, of "-1" 0]
+  by auto
+
+lemma has_integral_reflect[simp]:
+  "((\<lambda>x. f (-x)) has_integral i) {-b..-a} \<longleftrightarrow> (f has_integral i) {a..b}"
+  apply rule
+  apply (drule_tac[!] has_integral_reflect_lemma)
+  apply auto
+  done
 
 lemma integrable_reflect[simp]: "(\<lambda>x. f(-x)) integrable_on {-b..-a} \<longleftrightarrow> f integrable_on {a..b}"
   unfolding integrable_on_def by auto
 
-lemma integral_reflect[simp]: "integral {-b..-a} (\<lambda>x. f(-x)) = integral ({a..b}) f"
+lemma integral_reflect[simp]: "integral {-b..-a} (\<lambda>x. f (-x)) = integral {a..b} f"
   unfolding integral_def by auto
 
+
 subsection {* Stronger form of FCT; quite a tedious proof. *}
 
-lemma bgauge_existence_lemma: "(\<forall>x\<in>s. \<exists>d::real. 0 < d \<and> q d x) \<longleftrightarrow> (\<forall>x. \<exists>d>0. x\<in>s \<longrightarrow> q d x)" by(meson zero_less_one)
-
-lemma additive_tagged_division_1': fixes f::"real \<Rightarrow> 'a::real_normed_vector"
-  assumes "a \<le> b" "p tagged_division_of {a..b}"
+lemma bgauge_existence_lemma: "(\<forall>x\<in>s. \<exists>d::real. 0 < d \<and> q d x) \<longleftrightarrow> (\<forall>x. \<exists>d>0. x\<in>s \<longrightarrow> q d x)"
+  by (meson zero_less_one)
+
+lemma additive_tagged_division_1':
+  fixes f :: "real \<Rightarrow> 'a::real_normed_vector"
+  assumes "a \<le> b"
+    and "p tagged_division_of {a..b}"
   shows "setsum (\<lambda>(x,k). f (interval_upperbound k) - f(interval_lowerbound k)) p = f b - f a"
-  using additive_tagged_division_1[OF _ assms(2), of f] using assms(1) by auto
-
-lemma split_minus[simp]:"(\<lambda>(x, k). f x k) x - (\<lambda>(x, k). g x k) x = (\<lambda>(x, k). f x k - g x k) x"
-  unfolding split_def by(rule refl)
+  using additive_tagged_division_1[OF _ assms(2), of f]
+  using assms(1)
+  by auto
+
+lemma split_minus[simp]: "(\<lambda>(x, k). f x k) x - (\<lambda>(x, k). g x k) x = (\<lambda>(x, k). f x k - g x k) x"
+  by (simp add: split_def)
 
 lemma norm_triangle_le_sub: "norm x + norm y \<le> e \<Longrightarrow> norm (x - y) \<le> e"
-  apply(subst(asm)(2) norm_minus_cancel[symmetric])
-  apply(drule norm_triangle_le) by(auto simp add:algebra_simps)
-
-lemma fundamental_theorem_of_calculus_interior: fixes f::"real => 'a::real_normed_vector"
-  assumes"a \<le> b" "continuous_on {a..b} f" "\<forall>x\<in>{a<..<b}. (f has_vector_derivative f'(x)) (at x)"
+  apply (subst(asm)(2) norm_minus_cancel[symmetric])
+  apply (drule norm_triangle_le)
+  apply (auto simp add: algebra_simps)
+  done
+
+lemma fundamental_theorem_of_calculus_interior:
+  fixes f :: "real \<Rightarrow> 'a::real_normed_vector"
+  assumes "a \<le> b"
+    and "continuous_on {a..b} f"
+    and "\<forall>x\<in>{a<..<b}. (f has_vector_derivative f'(x)) (at x)"
   shows "(f' has_integral (f b - f a)) {a..b}"
-proof- { presume *:"a < b \<Longrightarrow> ?thesis"
-    show ?thesis proof(cases,rule *,assumption)
-      assume "\<not> a < b" hence "a = b" using assms(1) by auto
-      hence *:"{a .. b} = {b}" "f b - f a = 0" by(auto simp add:  order_antisym)
-      show ?thesis unfolding *(2) apply(rule has_integral_null) unfolding content_eq_0 using * `a=b`
+proof -
+  {
+    presume *: "a < b \<Longrightarrow> ?thesis"
+    show ?thesis
+    proof (cases "a < b")
+      case True
+      then show ?thesis by (rule *)
+    next
+      case False
+      then have "a = b"
+        using assms(1) by auto
+      then have *: "{a .. b} = {b}" "f b - f a = 0"
+        by (auto simp add:  order_antisym)
+      show ?thesis
+        unfolding *(2)
+        apply (rule has_integral_null)
+        unfolding content_eq_0
+        using * `a = b`
         by (auto simp: ex_in_conv)
-    qed } assume ab:"a < b"
+    qed
+  }
+  assume ab: "a < b"
   let ?P = "\<lambda>e. \<exists>d. gauge d \<and> (\<forall>p. p tagged_division_of {a..b} \<and> d fine p \<longrightarrow>
-                   norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f' x) - (f b - f a)) \<le> e * content {a..b})"
-  { presume "\<And>e. e>0 \<Longrightarrow> ?P e" thus ?thesis unfolding has_integral_factor_content by auto }
-  fix e::real assume e:"e>0"
+    norm ((\<Sum>(x, k)\<in>p. content k *\<^sub>R f' x) - (f b - f a)) \<le> e * content {a..b})"
+  { presume "\<And>e. e > 0 \<Longrightarrow> ?P e" then show ?thesis unfolding has_integral_factor_content by auto }
+  fix e :: real
+  assume e: "e > 0"
   note assms(3)[unfolded has_vector_derivative_def has_derivative_at_alt ball_conj_distrib]
-  note conjunctD2[OF this] note bounded=this(1) and this(2)
-  from this(2) have "\<forall>x\<in>{a<..<b}. \<exists>d>0. \<forall>y. norm (y - x) < d \<longrightarrow> norm (f y - f x - (y - x) *\<^sub>R f' x) \<le> e/2 * norm (y - x)"
-    apply-apply safe apply(erule_tac x=x in ballE,erule_tac x="e/2" in allE) using e by auto note this[unfolded bgauge_existence_lemma]
-  from choice[OF this] guess d .. note conjunctD2[OF this[rule_format]] note d = this[rule_format]
-  have "bounded (f ` {a..b})" apply(rule compact_imp_bounded compact_continuous_image)+ using compact_interval assms by auto
+  note conjunctD2[OF this]
+  note bounded=this(1) and this(2)
+  from this(2) have "\<forall>x\<in>{a<..<b}. \<exists>d>0. \<forall>y. norm (y - x) < d \<longrightarrow>
+    norm (f y - f x - (y - x) *\<^sub>R f' x) \<le> e/2 * norm (y - x)"
+    apply -
+    apply safe
+    apply (erule_tac x=x in ballE)
+    apply (erule_tac x="e/2" in allE)
+    using e
+    apply auto
+    done
+  note this[unfolded bgauge_existence_lemma]
+  from choice[OF this] guess d ..
+  note conjunctD2[OF this[rule_format]]
+  note d = this[rule_format]
+  have "bounded (f ` {a..b})"
+    apply (rule compact_imp_bounded compact_continuous_image)+
+    using compact_interval assms
+    apply auto
+    done
   from this[unfolded bounded_pos] guess B .. note B = this[rule_format]
 
-  have "\<exists>da. 0 < da \<and> (\<forall>c. a \<le> c \<and> {a..c} \<subseteq> {a..b} \<and> {a..c} \<subseteq> ball a da
-    \<longrightarrow> norm(content {a..c} *\<^sub>R f' a - (f c - f a)) \<le> (e * (b - a)) / 4)"
-  proof- have "a\<in>{a..b}" using ab by auto
+  have "\<exists>da. 0 < da \<and> (\<forall>c. a \<le> c \<and> {a..c} \<subseteq> {a..b} \<and> {a..c} \<subseteq> ball a da \<longrightarrow>
+    norm (content {a..c} *\<^sub>R f' a - (f c - f a)) \<le> (e * (b - a)) / 4)"
+  proof -
+    have "a \<in> {a..b}"
+      using ab by auto
     note assms(2)[unfolded continuous_on_eq_continuous_within,rule_format,OF this]
-    note * = this[unfolded continuous_within Lim_within,rule_format] have "(e * (b - a)) / 8 > 0" using e ab by(auto simp add:field_simps)
+    note * = this[unfolded continuous_within Lim_within,rule_format]
+    have "(e * (b - a)) / 8 > 0"
+      using e ab by (auto simp add: field_simps)
     from *[OF this] guess k .. note k = conjunctD2[OF this,rule_format]
     have "\<exists>l. 0 < l \<and> norm(l *\<^sub>R f' a) \<le> (e * (b - a)) / 8"
-    proof(cases "f' a = 0") case True
-      thus ?thesis apply(rule_tac x=1 in exI) using ab e by(auto intro!:mult_nonneg_nonneg)
-    next case False thus ?thesis
-        apply(rule_tac x="(e * (b - a)) / 8 / norm (f' a)" in exI) using ab e by(auto simp add:field_simps)
-    qed then guess l .. note l = conjunctD2[OF this]
-    show ?thesis apply(rule_tac x="min k l" in exI) apply safe unfolding min_less_iff_conj apply(rule,(rule l k)+)
-    proof- fix c assume as:"a \<le> c" "{a..c} \<subseteq> {a..b}" "{a..c} \<subseteq> ball a (min k l)"
+    proof (cases "f' a = 0")
+      case True
+      then show ?thesis
+        apply (rule_tac x=1 in exI)
+        using ab e
+        apply (auto intro!:mult_nonneg_nonneg)
+        done
+    next
+      case False
+      then show ?thesis
+        apply (rule_tac x="(e * (b - a)) / 8 / norm (f' a)" in exI)
+        using ab e
+        apply (auto simp add: field_simps)
+        done
+    qed
+    then guess l .. note l = conjunctD2[OF this]
+    show ?thesis
+      apply (rule_tac x="min k l" in exI)
+      apply safe
+      unfolding min_less_iff_conj
+      apply rule
+      apply (rule l k)+
+    proof -
+      fix c
+      assume as: "a \<le> c" "{a..c} \<subseteq> {a..b}" "{a..c} \<subseteq> ball a (min k l)"
       note as' = this[unfolded subset_eq Ball_def mem_ball dist_real_def mem_interval]
-      have "norm ((c - a) *\<^sub>R f' a - (f c - f a)) \<le> norm ((c - a) *\<^sub>R f' a) + norm (f c - f a)" by(rule norm_triangle_ineq4)
-      also have "... \<le> e * (b - a) / 8 + e * (b - a) / 8"
-      proof(rule add_mono) case goal1 have "\<bar>c - a\<bar> \<le> \<bar>l\<bar>" using as' by auto
-        thus ?case apply-apply(rule order_trans[OF _ l(2)]) unfolding norm_scaleR apply(rule mult_right_mono) by auto
-      next case goal2 show ?case apply(rule less_imp_le) apply(cases "a = c") defer
-          apply(rule k(2)[unfolded dist_norm]) using as' e ab by(auto simp add:field_simps)
-      qed finally show "norm (content {a..c} *\<^sub>R f' a - (f c - f a)) \<le> e * (b - a) / 4"
+      have "norm ((c - a) *\<^sub>R f' a - (f c - f a)) \<le> norm ((c - a) *\<^sub>R f' a) + norm (f c - f a)"
+        by (rule norm_triangle_ineq4)
+      also have "\<dots> \<le> e * (b - a) / 8 + e * (b - a) / 8"
+      proof (rule add_mono)
+        case goal1
+        have "\<bar>c - a\<bar> \<le> \<bar>l\<bar>"
+          using as' by auto
+        then show ?case
+          apply -
+          apply (rule order_trans[OF _ l(2)])
+          unfolding norm_scaleR
+          apply (rule mult_right_mono)
+          apply auto
+          done
+      next
+        case goal2
+        show ?case
+          apply (rule less_imp_le)
+          apply (cases "a = c")
+          defer
+          apply (rule k(2)[unfolded dist_norm])
+          using as' e ab
+          apply (auto simp add: field_simps)
+          done
+      qed
+      finally show "norm (content {a..c} *\<^sub>R f' a - (f c - f a)) \<le> e * (b - a) / 4"
         unfolding content_real[OF as(1)] by auto
-    qed qed then guess da .. note da=conjunctD2[OF this,rule_format]
+    qed
+  qed
+  then guess da .. note da=conjunctD2[OF this,rule_format]
 
   have "\<exists>db>0. \<forall>c\<le>b. {c..b} \<subseteq> {a..b} \<and> {c..b} \<subseteq> ball b db \<longrightarrow>
-    norm(content {c..b} *\<^sub>R f' b - (f b - f c)) \<le> (e * (b - a)) / 4"
-  proof- have "b\<in>{a..b}" using ab by auto
+    norm (content {c..b} *\<^sub>R f' b - (f b - f c)) \<le> (e * (b - a)) / 4"
+  proof -
+    have "b \<in> {a..b}"
+      using ab by auto
     note assms(2)[unfolded continuous_on_eq_continuous_within,rule_format,OF this]
     note * = this[unfolded continuous_within Lim_within,rule_format] have "(e * (b - a)) / 8 > 0"
-      using e ab by(auto simp add:field_simps)
+      using e ab by (auto simp add: field_simps)
     from *[OF this] guess k .. note k = conjunctD2[OF this,rule_format]
-    have "\<exists>l. 0 < l \<and> norm(l *\<^sub>R f' b) \<le> (e * (b - a)) / 8"
-    proof(cases "f' b = 0") case True
-      thus ?thesis apply(rule_tac x=1 in exI) using ab e by(auto intro!:mult_nonneg_nonneg)
-    next case False thus ?thesis
-        apply(rule_tac x="(e * (b - a)) / 8 / norm (f' b)" in exI)
-        using ab e by(auto simp add:field_simps)
-    qed then guess l .. note l = conjunctD2[OF this]
-    show ?thesis apply(rule_tac x="min k l" in exI) apply safe unfolding min_less_iff_conj apply(rule,(rule l k)+)
-    proof- fix c assume as:"c \<le> b" "{c..b} \<subseteq> {a..b}" "{c..b} \<subseteq> ball b (min k l)"
+    have "\<exists>l. 0 < l \<and> norm (l *\<^sub>R f' b) \<le> (e * (b - a)) / 8"
+    proof (cases "f' b = 0")
+      case True
+      then show ?thesis
+        apply (rule_tac x=1 in exI)
+        using ab e
+        apply (auto intro!: mult_nonneg_nonneg)
+        done
+    next
+      case False
+      then show ?thesis
+        apply (rule_tac x="(e * (b - a)) / 8 / norm (f' b)" in exI)
+        using ab e
+        apply (auto simp add: field_simps)
+        done
+    qed
+    then guess l .. note l = conjunctD2[OF this]
+    show ?thesis
+      apply (rule_tac x="min k l" in exI)
+      apply safe
+      unfolding min_less_iff_conj
+      apply rule
+      apply (rule l k)+
+    proof -
+      fix c
+      assume as: "c \<le> b" "{c..b} \<subseteq> {a..b}" "{c..b} \<subseteq> ball b (min k l)"
       note as' = this[unfolded subset_eq Ball_def mem_ball dist_real_def mem_interval]
-      have "norm ((b - c) *\<^sub>R f' b - (f b - f c)) \<le> norm ((b - c) *\<^sub>R f' b) + norm (f b - f c)" by(rule norm_triangle_ineq4)
-      also have "... \<le> e * (b - a) / 8 + e * (b - a) / 8"
-      proof(rule add_mono) case goal1 have "\<bar>c - b\<bar> \<le> \<bar>l\<bar>" using as' by auto
-        thus ?case apply-apply(rule order_trans[OF _ l(2)]) unfolding norm_scaleR apply(rule mult_right_mono) by auto
-      next case goal2 show ?case apply(rule less_imp_le) apply(cases "b = c") defer apply(subst norm_minus_commute)
-          apply(rule k(2)[unfolded dist_norm]) using as' e ab by(auto simp add:field_simps)
-      qed finally show "norm (content {c..b} *\<^sub>R f' b - (f b - f c)) \<le> e * (b - a) / 4"
+      have "norm ((b - c) *\<^sub>R f' b - (f b - f c)) \<le> norm ((b - c) *\<^sub>R f' b) + norm (f b - f c)"
+        by (rule norm_triangle_ineq4)
+      also have "\<dots> \<le> e * (b - a) / 8 + e * (b - a) / 8"
+      proof (rule add_mono)
+        case goal1
+        have "\<bar>c - b\<bar> \<le> \<bar>l\<bar>"
+          using as' by auto
+        then show ?case
+          apply -
+          apply (rule order_trans[OF _ l(2)])
+          unfolding norm_scaleR
+          apply (rule mult_right_mono)
+          apply auto
+          done
+      next
+        case goal2
+        show ?case
+          apply (rule less_imp_le)
+          apply (cases "b = c")
+          defer
+          apply (subst norm_minus_commute)
+          apply (rule k(2)[unfolded dist_norm])
+          using as' e ab
+          apply (auto simp add: field_simps)
+          done
+      qed
+      finally show "norm (content {c..b} *\<^sub>R f' b - (f b - f c)) \<le> e * (b - a) / 4"
         unfolding content_real[OF as(1)] by auto
-    qed qed then guess db .. note db=conjunctD2[OF this,rule_format]
+    qed
+  qed
+  then guess db .. note db=conjunctD2[OF this,rule_format]
 
   let ?d = "(\<lambda>x. ball x (if x=a then da else if x=b then db else d x))"
-  show "?P e" apply(rule_tac x="?d" in exI)
-  proof safe case goal1 show ?case apply(rule gauge_ball_dependent) using ab db(1) da(1) d(1) by auto
-  next case goal2 note as=this let ?A = "{t. fst t \<in> {a, b}}" note p = tagged_division_ofD[OF goal2(1)]
-    have pA:"p = (p \<inter> ?A) \<union> (p - ?A)" "finite (p \<inter> ?A)" "finite (p - ?A)" "(p \<inter> ?A) \<inter> (p - ?A) = {}"  using goal2 by auto
+  show "?P e"
+    apply (rule_tac x="?d" in exI)
+  proof safe
+    case goal1
+    show ?case
+      apply (rule gauge_ball_dependent)
+      using ab db(1) da(1) d(1)
+      apply auto
+      done
+  next
+    case goal2
+    note as=this
+    let ?A = "{t. fst t \<in> {a, b}}"
+    note p = tagged_division_ofD[OF goal2(1)]
+    have pA: "p = (p \<inter> ?A) \<union> (p - ?A)" "finite (p \<inter> ?A)" "finite (p - ?A)" "(p \<inter> ?A) \<inter> (p - ?A) = {}"
+      using goal2 by auto
     note * = additive_tagged_division_1'[OF assms(1) goal2(1), symmetric]
-    have **:"\<And>n1 s1 n2 s2::real. n2 \<le> s2 / 2 \<Longrightarrow> n1 - s1 \<le> s2 / 2 \<Longrightarrow> n1 + n2 \<le> s1 + s2" by arith
-    show ?case unfolding content_real[OF assms(1)] and *[of "\<lambda>x. x"] *[of f] setsum_subtractf[symmetric] split_minus
-      unfolding setsum_right_distrib apply(subst(2) pA,subst pA) unfolding setsum_Un_disjoint[OF pA(2-)]
-    proof(rule norm_triangle_le,rule **)
-      case goal1 show ?case apply(rule order_trans,rule setsum_norm_le) defer apply(subst setsum_divide_distrib)
-      proof(rule order_refl,safe,unfold not_le o_def split_conv fst_conv,rule ccontr) fix x k assume as:"(x,k) \<in> p"
-          "e * (interval_upperbound k -  interval_lowerbound k) / 2
-          < norm (content k *\<^sub>R f' x - (f (interval_upperbound k) - f (interval_lowerbound k)))"
-        from p(4)[OF this(1)] guess u v apply-by(erule exE)+ note k=this
-        hence "u \<le> v" and uv:"{u,v}\<subseteq>{u..v}" using p(2)[OF as(1)] by auto
+    have **: "\<And>n1 s1 n2 s2::real. n2 \<le> s2 / 2 \<Longrightarrow> n1 - s1 \<le> s2 / 2 \<Longrightarrow> n1 + n2 \<le> s1 + s2"
+      by arith
+    show ?case
+      unfolding content_real[OF assms(1)] and *[of "\<lambda>x. x"] *[of f] setsum_subtractf[symmetric] split_minus
+      unfolding setsum_right_distrib
+      apply (subst(2) pA)
+      apply (subst pA)
+      unfolding setsum_Un_disjoint[OF pA(2-)]
+    proof (rule norm_triangle_le, rule **)
+      case goal1
+      show ?case
+        apply (rule order_trans)
+        apply (rule setsum_norm_le)
+        defer
+        apply (subst setsum_divide_distrib)
+        apply (rule order_refl)
+        apply safe
+        apply (unfold not_le o_def split_conv fst_conv)
+      proof (rule ccontr)
+        fix x k
+        assume as: "(x, k) \<in> p"
+          "e * (interval_upperbound k -  interval_lowerbound k) / 2 <
+            norm (content k *\<^sub>R f' x - (f (interval_upperbound k) - f (interval_lowerbound k)))"
+        from p(4)[OF this(1)] guess u v by (elim exE) note k=this
+        then have "u \<le> v" and uv: "{u, v} \<subseteq> {u..v}"
+          using p(2)[OF as(1)] by auto
         note result = as(2)[unfolded k interval_bounds_real[OF this(1)] content_real[OF this(1)]]
 
-        assume as':"x \<noteq> a" "x \<noteq> b" hence "x \<in> {a<..<b}" using p(2-3)[OF as(1)] by auto
+        assume as': "x \<noteq> a" "x \<noteq> b"
+        then have "x \<in> {a<..<b}"
+          using p(2-3)[OF as(1)] by auto
         note  * = d(2)[OF this]
         have "norm ((v - u) *\<^sub>R f' (x) - (f (v) - f (u))) =
           norm ((f (u) - f (x) - (u - x) *\<^sub>R f' (x)) - (f (v) - f (x) - (v - x) *\<^sub>R f' (x)))"
-          apply(rule arg_cong[of _ _ norm]) unfolding scaleR_left.diff by auto
-        also have "... \<le> e / 2 * norm (u - x) + e / 2 * norm (v - x)" apply(rule norm_triangle_le_sub)
-          apply(rule add_mono) apply(rule_tac[!] *) using fineD[OF goal2(2) as(1)] as' unfolding k subset_eq
-          apply- apply(erule_tac x=u in ballE,erule_tac[3] x=v in ballE) using uv by(auto simp:dist_real_def)
-        also have "... \<le> e / 2 * norm (v - u)" using p(2)[OF as(1)] unfolding k by(auto simp add:field_simps)
+          apply (rule arg_cong[of _ _ norm])
+          unfolding scaleR_left.diff
+          apply auto
+          done
+        also have "\<dots> \<le> e / 2 * norm (u - x) + e / 2 * norm (v - x)"
+          apply (rule norm_triangle_le_sub)
+          apply (rule add_mono)
+          apply (rule_tac[!] *)
+          using fineD[OF goal2(2) as(1)] as'
+          unfolding k subset_eq
+          apply -
+          apply (erule_tac x=u in ballE)
+          apply (erule_tac[3] x=v in ballE)
+          using uv
+          apply (auto simp:dist_real_def)
+          done
+        also have "\<dots> \<le> e / 2 * norm (v - u)"
+          using p(2)[OF as(1)]
+          unfolding k
+          by (auto simp add: field_simps)
         finally have "e * (v - u) / 2 < e * (v - u) / 2"
-          apply- apply(rule less_le_trans[OF result]) using uv by auto thus False by auto qed
-
-    next have *:"\<And>x s1 s2::real. 0 \<le> s1 \<Longrightarrow> x \<le> (s1 + s2) / 2 \<Longrightarrow> x - s1 \<le> s2 / 2" by auto
-      case goal2 show ?case apply(rule *) apply(rule setsum_nonneg) apply(rule,unfold split_paired_all split_conv)
-        defer unfolding setsum_Un_disjoint[OF pA(2-),symmetric] pA(1)[symmetric] unfolding setsum_right_distrib[symmetric]
-        apply(subst additive_tagged_division_1[OF _ as(1)]) apply(rule assms)
-      proof- fix x k assume "(x,k) \<in> p \<inter> {t. fst t \<in> {a, b}}" note xk=IntD1[OF this]
-        from p(4)[OF this] guess u v apply-by(erule exE)+ note uv=this
-        with p(2)[OF xk] have "{u..v} \<noteq> {}" by auto
-        thus "0 \<le> e * ((interval_upperbound k) - (interval_lowerbound k))"
-          unfolding uv using e by(auto simp add:field_simps)
-      next have *:"\<And>s f t e. setsum f s = setsum f t \<Longrightarrow> norm(setsum f t) \<le> e \<Longrightarrow> norm(setsum f s) \<le> e" by auto
+          apply -
+          apply (rule less_le_trans[OF result])
+          using uv
+          apply auto
+          done
+        then show False by auto
+      qed
+    next
+      have *: "\<And>x s1 s2::real. 0 \<le> s1 \<Longrightarrow> x \<le> (s1 + s2) / 2 \<Longrightarrow> x - s1 \<le> s2 / 2"
+        by auto
+      case goal2
+      show ?case
+        apply (rule *)
+        apply (rule setsum_nonneg)
+        apply rule
+        apply (unfold split_paired_all split_conv)
+        defer
+        unfolding setsum_Un_disjoint[OF pA(2-),symmetric] pA(1)[symmetric]
+        unfolding setsum_right_distrib[symmetric]
+        apply (subst additive_tagged_division_1[OF _ as(1)])
+        apply (rule assms)
+      proof -
+        fix x k
+        assume "(x, k) \<in> p \<inter> {t. fst t \<in> {a, b}}"
+        note xk=IntD1[OF this]
+        from p(4)[OF this] guess u v by (elim exE) note uv=this
+        with p(2)[OF xk] have "{u..v} \<noteq> {}"
+          by auto
+        then show "0 \<le> e * ((interval_upperbound k) - (interval_lowerbound k))"
+          unfolding uv using e by (auto simp add: field_simps)
+      next
+        have *: "\<And>s f t e. setsum f s = setsum f t \<Longrightarrow> norm (setsum f t) \<le> e \<Longrightarrow> norm (setsum f s) \<le> e"
+          by auto
         show "norm (\<Sum>(x, k)\<in>p \<inter> ?A. content k *\<^sub>R f' x -
           (f ((interval_upperbound k)) - f ((interval_lowerbound k)))) \<le> e * (b - a) / 2"
-          apply(rule *[where t="p \<inter> {t. fst t \<in> {a, b} \<and> content(snd t) \<noteq> 0}"])
-          apply(rule setsum_mono_zero_right[OF pA(2)]) defer apply(rule) unfolding split_paired_all split_conv o_def
-        proof- fix x k assume "(x,k) \<in> p \<inter> {t. fst t \<in> {a, b}} - p \<inter> {t. fst t \<in> {a, b} \<and> content (snd t) \<noteq> 0}"
-          hence xk:"(x,k)\<in>p" "content k = 0" by auto from p(4)[OF xk(1)] guess u v apply-by(erule exE)+ note uv=this
-          have "k\<noteq>{}" using p(2)[OF xk(1)] by auto hence *:"u = v" using xk
-            unfolding uv content_eq_0 interval_eq_empty by auto
-          thus "content k *\<^sub>R (f' (x)) - (f ((interval_upperbound k)) - f ((interval_lowerbound k))) = 0" using xk unfolding uv by auto
-        next have *:"p \<inter> {t. fst t \<in> {a, b} \<and> content(snd t) \<noteq> 0} =
-            {t. t\<in>p \<and> fst t = a \<and> content(snd t) \<noteq> 0} \<union> {t. t\<in>p \<and> fst t = b \<and> content(snd t) \<noteq> 0}" by blast
-          have **:"\<And>s f. \<And>e::real. (\<forall>x y. x \<in> s \<and> y \<in> s \<longrightarrow> x = y) \<Longrightarrow> (\<forall>x. x \<in> s \<longrightarrow> norm(f x) \<le> e)
-            \<Longrightarrow> e>0 \<Longrightarrow> norm(setsum f s) \<le> e"
-          proof(case_tac "s={}") case goal2 then obtain x where "x\<in>s" by auto hence *:"s = {x}" using goal2(1) by auto
-            thus ?case using `x\<in>s` goal2(2) by auto
+          apply (rule *[where t="p \<inter> {t. fst t \<in> {a, b} \<and> content(snd t) \<noteq> 0}"])
+          apply (rule setsum_mono_zero_right[OF pA(2)])
+          defer
+          apply rule
+          unfolding split_paired_all split_conv o_def
+        proof -
+          fix x k
+          assume "(x, k) \<in> p \<inter> {t. fst t \<in> {a, b}} - p \<inter> {t. fst t \<in> {a, b} \<and> content (snd t) \<noteq> 0}"
+          then have xk: "(x, k) \<in> p" "content k = 0"
+            by auto
+          from p(4)[OF xk(1)] guess u v by (elim exE) note uv=this
+          have "k \<noteq> {}"
+            using p(2)[OF xk(1)] by auto
+          then have *: "u = v"
+            using xk
+            unfolding uv content_eq_0 interval_eq_empty
+            by auto
+          then show "content k *\<^sub>R (f' (x)) - (f ((interval_upperbound k)) - f ((interval_lowerbound k))) = 0"
+            using xk unfolding uv by auto
+        next
+          have *: "p \<inter> {t. fst t \<in> {a, b} \<and> content(snd t) \<noteq> 0} =
+            {t. t\<in>p \<and> fst t = a \<and> content(snd t) \<noteq> 0} \<union> {t. t\<in>p \<and> fst t = b \<and> content(snd t) \<noteq> 0}"
+            by blast
+          have **: "\<And>s f. \<And>e::real. (\<forall>x y. x \<in> s \<and> y \<in> s \<longrightarrow> x = y) \<Longrightarrow>
+            (\<forall>x. x \<in> s \<longrightarrow> norm (f x) \<le> e) \<Longrightarrow> e > 0 \<Longrightarrow> norm (setsum f s) \<le> e"
+          proof (case_tac "s = {}")
+            case goal2
+            then obtain x where "x \<in> s"
+              by auto
+            then have *: "s = {x}"
+              using goal2(1) by auto
+            then show ?case
+              using `x \<in> s` goal2(2) by auto
           qed auto
-          case goal2 show ?case apply(subst *, subst setsum_Un_disjoint) prefer 4
-            apply(rule order_trans[of _ "e * (b - a)/4 + e * (b - a)/4"])
-            apply(rule norm_triangle_le,rule add_mono) apply(rule_tac[1-2] **)
-          proof- let ?B = "\<lambda>x. {t \<in> p. fst t = x \<and> content (snd t) \<noteq> 0}"
-            have pa:"\<And>k. (a, k) \<in> p \<Longrightarrow> \<exists>v. k = {a .. v} \<and> a \<le> v"
-            proof- case goal1 guess u v using p(4)[OF goal1] apply-by(erule exE)+ note uv=this
-              have *:"u \<le> v" using p(2)[OF goal1] unfolding uv by auto
-              have u:"u = a" proof(rule ccontr)  have "u \<in> {u..v}" using p(2-3)[OF goal1(1)] unfolding uv by auto
-                have "u \<ge> a" using p(2-3)[OF goal1(1)] unfolding uv subset_eq by auto moreover assume "u\<noteq>a" ultimately
-                have "u > a" by auto
-                thus False using p(2)[OF goal1(1)] unfolding uv by(auto simp add:)
-              qed thus ?case apply(rule_tac x=v in exI) unfolding uv using * by auto
+          case goal2
+          show ?case
+            apply (subst *)
+            apply (subst setsum_Un_disjoint)
+            prefer 4
+            apply (rule order_trans[of _ "e * (b - a)/4 + e * (b - a)/4"])
+            apply (rule norm_triangle_le,rule add_mono)
+            apply (rule_tac[1-2] **)
+          proof -
+            let ?B = "\<lambda>x. {t \<in> p. fst t = x \<and> content (snd t) \<noteq> 0}"
+            have pa: "\<And>k. (a, k) \<in> p \<Longrightarrow> \<exists>v. k = {a .. v} \<and> a \<le> v"
+            proof -
+              case goal1
+              guess u v using p(4)[OF goal1] by (elim exE) note uv=this
+              have *: "u \<le> v"
+                using p(2)[OF goal1] unfolding uv by auto
+              have u: "u = a"
+              proof (rule ccontr)
+                have "u \<in> {u..v}"
+                  using p(2-3)[OF goal1(1)] unfolding uv by auto
+                have "u \<ge> a"
+                  using p(2-3)[OF goal1(1)] unfolding uv subset_eq by auto
+                moreover assume "u \<noteq> a"
+                ultimately have "u > a" by auto
+                then show False
+                  using p(2)[OF goal1(1)] unfolding uv by (auto simp add:)
+              qed
+              then show ?case
+                apply (rule_tac x=v in exI)
+                unfolding uv
+                using *
+                apply auto
+                done
             qed
-            have pb:"\<And>k. (b, k) \<in> p \<Longrightarrow> \<exists>v. k = {v .. b} \<and> b \<ge> v"
-            proof- case goal1 guess u v using p(4)[OF goal1] apply-by(erule exE)+ note uv=this
-              have *:"u \<le> v" using p(2)[OF goal1] unfolding uv by auto
-              have u:"v =  b" proof(rule ccontr)  have "u \<in> {u..v}" using p(2-3)[OF goal1(1)] unfolding uv by auto
-                have "v \<le>  b" using p(2-3)[OF goal1(1)] unfolding uv subset_eq by auto moreover assume "v\<noteq> b" ultimately
-                have "v <  b" by auto
-                thus False using p(2)[OF goal1(1)] unfolding uv by(auto simp add:)
-              qed thus ?case apply(rule_tac x=u in exI) unfolding uv using * by auto
+            have pb: "\<And>k. (b, k) \<in> p \<Longrightarrow> \<exists>v. k = {v .. b} \<and> b \<ge> v"
+            proof -
+              case goal1
+              guess u v using p(4)[OF goal1] by (elim exE) note uv=this
+              have *: "u \<le> v"
+                using p(2)[OF goal1] unfolding uv by auto
+              have u: "v =  b"
+              proof (rule ccontr)
+                have "u \<in> {u..v}"
+                  using p(2-3)[OF goal1(1)] unfolding uv by auto
+                have "v \<le> b"
+                  using p(2-3)[OF goal1(1)] unfolding uv subset_eq by auto
+                moreover assume "v \<noteq> b"
+                ultimately have "v < b" by auto
+                then show False
+                  using p(2)[OF goal1(1)] unfolding uv by (auto simp add:)
+              qed
+              then show ?case
+                apply (rule_tac x=u in exI)
+                unfolding uv
+                using *
+                apply auto
+                done
             qed
-
-            show "\<forall>x y. x \<in> ?B a \<and> y \<in> ?B a \<longrightarrow> x = y" apply(rule,rule,rule,unfold split_paired_all)
-              unfolding mem_Collect_eq fst_conv snd_conv apply safe
-            proof- fix x k k' assume k:"( a, k) \<in> p" "( a, k') \<in> p" "content k \<noteq> 0" "content k' \<noteq> 0"
+            show "\<forall>x y. x \<in> ?B a \<and> y \<in> ?B a \<longrightarrow> x = y"
+              apply (rule,rule,rule,unfold split_paired_all)
+              unfolding mem_Collect_eq fst_conv snd_conv
+              apply safe
+            proof -
+              fix x k k'
+              assume k: "(a, k) \<in> p" "(a, k') \<in> p" "content k \<noteq> 0" "content k' \<noteq> 0"
               guess v using pa[OF k(1)] .. note v = conjunctD2[OF this]
-              guess v' using pa[OF k(2)] .. note v' = conjunctD2[OF this] let ?v = " (min (v) (v'))"
-              have "{ a <..< ?v} \<subseteq> k \<inter> k'" unfolding v v' by(auto simp add:) note interior_mono[OF this,unfolded interior_inter]
-              moreover have " ((a + ?v)/2) \<in> { a <..< ?v}" using k(3-)
-                unfolding v v' content_eq_0 not_le by(auto simp add:not_le)
-              ultimately have " ((a + ?v)/2) \<in> interior k \<inter> interior k'" unfolding interior_open[OF open_interval] by auto
-              hence *:"k = k'" apply- apply(rule ccontr) using p(5)[OF k(1-2)] by auto
-              { assume "x\<in>k" thus "x\<in>k'" unfolding * . } { assume "x\<in>k'" thus "x\<in>k" unfolding * . }
+              guess v' using pa[OF k(2)] .. note v' = conjunctD2[OF this] let ?v = "min v v'"
+              have "{a <..< ?v} \<subseteq> k \<inter> k'"
+                unfolding v v' by (auto simp add:)
+              note interior_mono[OF this,unfolded interior_inter]
+              moreover have "(a + ?v)/2 \<in> { a <..< ?v}"
+                using k(3-)
+                unfolding v v' content_eq_0 not_le
+                by (auto simp add: not_le)
+              ultimately have "(a + ?v)/2 \<in> interior k \<inter> interior k'"
+                unfolding interior_open[OF open_interval] by auto
+              then have *: "k = k'"
+                apply -
+                apply (rule ccontr)
+                using p(5)[OF k(1-2)]
+                apply auto
+                done
+              { assume "x \<in> k" then show "x \<in> k'" unfolding * . }
+              { assume "x \<in> k'" then show "x\<in>k" unfolding * . }
             qed
-            show "\<forall>x y. x \<in> ?B b \<and> y \<in> ?B b \<longrightarrow> x = y" apply(rule,rule,rule,unfold split_paired_all)
-              unfolding mem_Collect_eq fst_conv snd_conv apply safe
-            proof- fix x k k' assume k:"( b, k) \<in> p" "( b, k') \<in> p" "content k \<noteq> 0" "content k' \<noteq> 0"
+            show "\<forall>x y. x \<in> ?B b \<and> y \<in> ?B b \<longrightarrow> x = y"
+              apply rule
+              apply rule
+              apply rule
+              apply (unfold split_paired_all)
+              unfolding mem_Collect_eq fst_conv snd_conv
+              apply safe
+            proof -
+              fix x k k'
+              assume k: "(b, k) \<in> p" "(b, k') \<in> p" "content k \<noteq> 0" "content k' \<noteq> 0"
               guess v using pb[OF k(1)] .. note v = conjunctD2[OF this]
-              guess v' using pb[OF k(2)] .. note v' = conjunctD2[OF this] let ?v = " (max (v) (v'))"
-              have "{?v <..<  b} \<subseteq> k \<inter> k'" unfolding v v' by(auto simp add:) note interior_mono[OF this,unfolded interior_inter]
-              moreover have " ((b + ?v)/2) \<in> {?v <..<  b}" using k(3-) unfolding v v' content_eq_0 not_le by auto
-              ultimately have " ((b + ?v)/2) \<in> interior k \<inter> interior k'" unfolding interior_open[OF open_interval] by auto
-              hence *:"k = k'" apply- apply(rule ccontr) using p(5)[OF k(1-2)] by auto
-              { assume "x\<in>k" thus "x\<in>k'" unfolding * . } { assume "x\<in>k'" thus "x\<in>k" unfolding * . }
+              guess v' using pb[OF k(2)] .. note v' = conjunctD2[OF this]
+              let ?v = "max v v'"
+              have "{?v <..< b} \<subseteq> k \<inter> k'"
+                unfolding v v' by auto
+                note interior_mono[OF this,unfolded interior_inter]
+              moreover have " ((b + ?v)/2) \<in> {?v <..<  b}"
+                using k(3-) unfolding v v' content_eq_0 not_le by auto
+              ultimately have " ((b + ?v)/2) \<in> interior k \<inter> interior k'"
+                unfolding interior_open[OF open_interval] by auto
+              then have *: "k = k'"
+                apply -
+                apply (rule ccontr)
+                using p(5)[OF k(1-2)]
+                apply auto
+                done
+              { assume "x \<in> k" then show "x \<in> k'" unfolding * . }
+              { assume "x \<in> k'" then show "x\<in>k" unfolding * . }
             qed
 
             let ?a = a and ?b = b (* a is something else while proofing the next theorem. *)
-            show "\<forall>x. x \<in> ?B a \<longrightarrow> norm ((\<lambda>(x, k). content k *\<^sub>R f' (x) - (f ((interval_upperbound k)) -
-              f ((interval_lowerbound k)))) x) \<le> e * (b - a) / 4" apply(rule,rule) unfolding mem_Collect_eq
+            show "\<forall>x. x \<in> ?B a \<longrightarrow> norm ((\<lambda>(x, k). content k *\<^sub>R f' x - (f (interval_upperbound k) -
+              f (interval_lowerbound k))) x) \<le> e * (b - a) / 4"
+              apply rule
+              apply rule
+              unfolding mem_Collect_eq
               unfolding split_paired_all fst_conv snd_conv
-            proof safe case goal1 guess v using pa[OF goal1(1)] .. note v = conjunctD2[OF this]
-              have " ?a\<in>{ ?a..v}" using v(2) by auto hence "v \<le> ?b" using p(3)[OF goal1(1)] unfolding subset_eq v by auto
-              moreover have "{?a..v} \<subseteq> ball ?a da" using fineD[OF as(2) goal1(1)]
-                apply-apply(subst(asm) if_P,rule refl) unfolding subset_eq apply safe apply(erule_tac x=" x" in ballE)
-                by(auto simp add:subset_eq dist_real_def v) ultimately
-              show ?case unfolding v interval_bounds_real[OF v(2)] apply- apply(rule da(2)[of "v"])
-                using goal1 fineD[OF as(2) goal1(1)] unfolding v content_eq_0 by auto
+            proof safe
+              case goal1
+              guess v using pa[OF goal1(1)] .. note v = conjunctD2[OF this]
+              have "?a \<in> {?a..v}"
+                using v(2) by auto
+              then have "v \<le> ?b"
+                using p(3)[OF goal1(1)] unfolding subset_eq v by auto
+              moreover have "{?a..v} \<subseteq> ball ?a da"
+                using fineD[OF as(2) goal1(1)]
+                apply -
+                apply (subst(asm) if_P)
+                apply (rule refl)
+                unfolding subset_eq
+                apply safe
+                apply (erule_tac x=" x" in ballE)
+                apply (auto simp add:subset_eq dist_real_def v)
+                done
+              ultimately show ?case
+                unfolding v interval_bounds_real[OF v(2)]
+                apply -
+                apply(rule da(2)[of "v"])
+                using goal1 fineD[OF as(2) goal1(1)]
+                unfolding v content_eq_0
+                apply auto
+                done
             qed
-            show "\<forall>x. x \<in> ?B b \<longrightarrow> norm ((\<lambda>(x, k). content k *\<^sub>R f' (x) -
-              (f ((interval_upperbound k)) - f ((interval_lowerbound k)))) x) \<le> e * (b - a) / 4"
-              apply(rule,rule) unfolding mem_Collect_eq unfolding split_paired_all fst_conv snd_conv
-            proof safe case goal1 guess v using pb[OF goal1(1)] .. note v = conjunctD2[OF this]
-              have " ?b\<in>{v.. ?b}" using v(2) by auto hence "v \<ge> ?a" using p(3)[OF goal1(1)]
+            show "\<forall>x. x \<in> ?B b \<longrightarrow> norm ((\<lambda>(x, k). content k *\<^sub>R f' x -
+              (f (interval_upperbound k) - f (interval_lowerbound k))) x) \<le> e * (b - a) / 4"
+              apply rule
+              apply rule
+              unfolding mem_Collect_eq
+              unfolding split_paired_all fst_conv snd_conv
+            proof safe
+              case goal1 guess v using pb[OF goal1(1)] .. note v = conjunctD2[OF this]
+              have "?b \<in> {v.. ?b}"
+                using v(2) by auto
+              then have "v \<ge> ?a" using p(3)[OF goal1(1)]
                 unfolding subset_eq v by auto
-              moreover have "{v..?b} \<subseteq> ball ?b db" using fineD[OF as(2) goal1(1)]
-                apply-apply(subst(asm) if_P,rule refl) unfolding subset_eq apply safe
-                apply(erule_tac x=" x" in ballE) using ab
-                by(auto simp add:subset_eq v dist_real_def) ultimately
-              show ?case unfolding v unfolding interval_bounds_real[OF v(2)] apply- apply(rule db(2)[of "v"])
-                using goal1 fineD[OF as(2) goal1(1)] unfolding v content_eq_0 by auto
+              moreover have "{v..?b} \<subseteq> ball ?b db"
+                using fineD[OF as(2) goal1(1)]
+                apply -
+                apply (subst(asm) if_P, rule refl)
+                unfolding subset_eq
+                apply safe
+                apply (erule_tac x=" x" in ballE)
+                using ab
+                apply (auto simp add:subset_eq v dist_real_def)
+                done
+              ultimately show ?case
+                unfolding v
+                unfolding interval_bounds_real[OF v(2)]
+                apply -
+                apply(rule db(2)[of "v"])
+                using goal1 fineD[OF as(2) goal1(1)]
+                unfolding v content_eq_0
+                apply auto
+                done
             qed
-          qed(insert p(1) ab e, auto simp add:field_simps) qed auto qed qed qed qed
+          qed (insert p(1) ab e, auto simp add: field_simps)
+        qed auto
+      qed
+    qed
+  qed
+qed
+
 
 subsection {* Stronger form with finite number of exceptional points. *}
 
-lemma fundamental_theorem_of_calculus_interior_strong: fixes f::"real \<Rightarrow> 'a::banach"
-  assumes"finite s" "a \<le> b" "continuous_on {a..b} f"
-  "\<forall>x\<in>{a<..<b} - s. (f has_vector_derivative f'(x)) (at x)"
-  shows "(f' has_integral (f b - f a)) {a..b}" using assms apply-
-proof(induct "card s" arbitrary:s a b)
-  case 0 show ?case apply(rule fundamental_theorem_of_calculus_interior) using 0 by auto
-next case (Suc n) from this(2) guess c s' apply-apply(subst(asm) eq_commute) unfolding card_Suc_eq
-    apply(subst(asm)(2) eq_commute) by(erule exE conjE)+ note cs = this[rule_format]
-  show ?case proof(cases "c\<in>{a<..<b}")
-    case False thus ?thesis apply- apply(rule Suc(1)[OF cs(3) _ Suc(4,5)]) apply safe defer
-      apply(rule Suc(6)[rule_format]) using Suc(3) unfolding cs by auto
-  next have *:"f b - f a = (f c - f a) + (f b - f c)" by auto
-    case True hence "a \<le> c" "c \<le> b" by auto
-    thus ?thesis apply(subst *) apply(rule has_integral_combine) apply assumption+
-      apply(rule_tac[!] Suc(1)[OF cs(3)]) using Suc(3) unfolding cs
-    proof- show "continuous_on {a..c} f" "continuous_on {c..b} f"
-        apply(rule_tac[!] continuous_on_subset[OF Suc(5)]) using True by auto
+lemma fundamental_theorem_of_calculus_interior_strong:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "finite s"
+    and "a \<le> b"
+    and "continuous_on {a..b} f"
+    and "\<forall>x\<in>{a<..<b} - s. (f has_vector_derivative f'(x)) (at x)"
+  shows "(f' has_integral (f b - f a)) {a..b}"
+  using assms
+proof (induct "card s" arbitrary: s a b)
+  case 0
+  show ?case
+    apply (rule fundamental_theorem_of_calculus_interior)
+    using 0
+    apply auto
+    done
+next
+  case (Suc n)
+  from this(2) guess c s'
+    apply -
+    apply (subst(asm) eq_commute)
+    unfolding card_Suc_eq
+    apply (subst(asm)(2) eq_commute)
+    apply (elim exE conjE)
+    done
+  note cs = this[rule_format]
+  show ?case
+  proof (cases "c \<in> {a<..<b}")
+    case False
+    then show ?thesis
+      apply -
+      apply (rule Suc(1)[OF cs(3) _ Suc(4,5)])
+      apply safe
+      defer
+      apply (rule Suc(6)[rule_format])
+      using Suc(3)
+      unfolding cs
+      apply auto
+      done
+  next
+    have *: "f b - f a = (f c - f a) + (f b - f c)"
+      by auto
+    case True
+    then have "a \<le> c" "c \<le> b"
+      by auto
+    then show ?thesis
+      apply (subst *)
+      apply (rule has_integral_combine)
+      apply assumption+
+      apply (rule_tac[!] Suc(1)[OF cs(3)])
+      using Suc(3)
+      unfolding cs
+    proof -
+      show "continuous_on {a..c} f" "continuous_on {c..b} f"
+        apply (rule_tac[!] continuous_on_subset[OF Suc(5)])
+        using True
+        apply auto
+        done
       let ?P = "\<lambda>i j. \<forall>x\<in>{i<..<j} - s'. (f has_vector_derivative f' x) (at x)"
-      show "?P a c" "?P c b" apply safe apply(rule_tac[!] Suc(6)[rule_format]) using True unfolding cs by auto
-    qed auto qed qed
-
-lemma fundamental_theorem_of_calculus_strong: fixes f::"real \<Rightarrow> 'a::banach"
-  assumes "finite s" "a \<le> b" "continuous_on {a..b} f"
-  "\<forall>x\<in>{a..b} - s. (f has_vector_derivative f'(x)) (at x)"
+      show "?P a c" "?P c b"
+        apply safe
+        apply (rule_tac[!] Suc(6)[rule_format])
+        using True
+        unfolding cs
+        apply auto
+        done
+    qed auto
+  qed
+qed
+
+lemma fundamental_theorem_of_calculus_strong:
+  fixes f :: "real \<Rightarrow> 'a::banach"
+  assumes "finite s"
+    and "a \<le> b"
+    and "continuous_on {a..b} f"
+    and "\<forall>x\<in>{a..b} - s. (f has_vector_derivative f'(x)) (at x)"
   shows "(f' has_integral (f(b) - f(a))) {a..b}"
-  apply(rule fundamental_theorem_of_calculus_interior_strong[OF assms(1-3), of f'])
-  using assms(4) by auto
-
-lemma indefinite_integral_continuous_left: fixes f::"real \<Rightarrow> 'a::banach"
+  apply (rule fundamental_theorem_of_calculus_interior_strong[OF assms(1-3), of f'])
+  using assms(4)
+  apply auto
+  done
+
+lemma indefinite_integral_continuous_left:
+  fixes f::"real \<Rightarrow> 'a::banach"
   assumes "f integrable_on {a..b}" "a < c" "c \<le> b" "0 < e"
   obtains d where "0 < d" "\<forall>t. c - d < t \<and> t \<le> c \<longrightarrow> norm(integral {a..c} f - integral {a..t} f) < e"
 proof- have "\<exists>w>0. \<forall>t. c - w < t \<and> t < c \<longrightarrow> norm(f c) * norm(c - t) < e / 3"
@@ -7377,7 +8505,7 @@
     thus ?thesis using integrable_integral unfolding g_def by auto }
 
   note iterate_eq_neutral[OF mon,unfolded neutral_lifted[OF monoidal_monoid]]
-  note * = this[unfolded neutral_monoid]
+  note * = this[unfolded neutral_add]
   have iterate:"iterate (lifted op +) (p - {{c..d}})
       (\<lambda>i. if g integrable_on i then Some (integral i g) else None) = Some 0"
   proof(rule *,rule) case goal1 hence "x\<in>p" by auto note div = division_ofD(2-5)[OF p(1) this]
@@ -8286,7 +9414,7 @@
   next case goal2 thus ?case apply(rule integrable_sub) using assms(1) by auto
   next case goal3 thus ?case using *[of x "Suc k" "Suc (Suc k)"] by auto
   next case goal4 thus ?case apply-apply(rule tendsto_diff)
-      using seq_offset[OF assms(3)[rule_format],of x 1] by auto
+      using LIMSEQ_ignore_initial_segment[OF assms(3)[rule_format],of x 1] by auto
   next case goal5 thus ?case using assms(4) unfolding bounded_iff
       apply safe apply(rule_tac x="a + norm (integral s (\<lambda>x. f 0 x))" in exI)
       apply safe apply(erule_tac x="integral s (\<lambda>x. f (Suc k) x)" in ballE) unfolding sub
@@ -8294,7 +9422,7 @@
   note conjunctD2[OF this] note tendsto_add[OF this(2) tendsto_const[of "integral s (f 0)"]]
     integrable_add[OF this(1) assms(1)[rule_format,of 0]]
   thus ?thesis unfolding sub apply-apply rule defer apply(subst(asm) integral_sub)
-    using assms(1) apply auto apply(rule seq_offset_rev[where k=1]) by auto qed
+    using assms(1) apply auto by(rule LIMSEQ_imp_Suc) qed
 
 lemma monotone_convergence_decreasing: fixes f::"nat \<Rightarrow> 'n::ordered_euclidean_space \<Rightarrow> real"
   assumes "\<forall>k. (f k) integrable_on s"  "\<forall>k. \<forall>x\<in>s. (f (Suc k) x) \<le> (f k x)"
@@ -9087,7 +10215,8 @@
         apply (rule_tac x=N in exI)
       proof safe
         case goal1
-        have *:"\<And>y ix. y < i + r \<longrightarrow> i \<le> ix \<longrightarrow> ix \<le> y \<longrightarrow> abs(ix - i) < r" by arith
+        have *: "\<And>y ix. y < i + r \<longrightarrow> i \<le> ix \<longrightarrow> ix \<le> y \<longrightarrow> abs(ix - i) < r"
+          by arith
         show ?case
           unfolding real_norm_def
             apply (rule *[rule_format,OF y(2)])
--- a/src/HOL/Multivariate_Analysis/Linear_Algebra.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Linear_Algebra.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -248,35 +248,36 @@
 
 subsection {* Linear functions. *}
 
-definition linear :: "('a::real_vector \<Rightarrow> 'b::real_vector) \<Rightarrow> bool"
-  where "linear f \<longleftrightarrow> (\<forall>x y. f(x + y) = f x + f y) \<and> (\<forall>c x. f(c *\<^sub>R x) = c *\<^sub>R f x)"
-
-lemma linearI:
-  assumes "\<And>x y. f (x + y) = f x + f y"
-    and "\<And>c x. f (c *\<^sub>R x) = c *\<^sub>R f x"
-  shows "linear f"
-  using assms unfolding linear_def by auto
+lemma linear_iff:
+  "linear f \<longleftrightarrow> (\<forall>x y. f(x + y) = f x + f y) \<and> (\<forall>c x. f(c *\<^sub>R x) = c *\<^sub>R f x)"
+  (is "linear f \<longleftrightarrow> ?rhs")
+proof
+  assume "linear f" then interpret f: linear f .
+  show "?rhs" by (simp add: f.add f.scaleR)
+next
+  assume "?rhs" then show "linear f" by unfold_locales simp_all
+qed
 
 lemma linear_compose_cmul: "linear f \<Longrightarrow> linear (\<lambda>x. c *\<^sub>R f x)"
-  by (simp add: linear_def algebra_simps)
+  by (simp add: linear_iff algebra_simps)
 
 lemma linear_compose_neg: "linear f \<Longrightarrow> linear (\<lambda>x. - f x)"
-  by (simp add: linear_def)
+  by (simp add: linear_iff)
 
 lemma linear_compose_add: "linear f \<Longrightarrow> linear g \<Longrightarrow> linear (\<lambda>x. f x + g x)"
-  by (simp add: linear_def algebra_simps)
+  by (simp add: linear_iff algebra_simps)
 
 lemma linear_compose_sub: "linear f \<Longrightarrow> linear g \<Longrightarrow> linear (\<lambda>x. f x - g x)"
-  by (simp add: linear_def algebra_simps)
+  by (simp add: linear_iff algebra_simps)
 
 lemma linear_compose: "linear f \<Longrightarrow> linear g \<Longrightarrow> linear (g \<circ> f)"
-  by (simp add: linear_def)
+  by (simp add: linear_iff)
 
 lemma linear_id: "linear id"
-  by (simp add: linear_def id_def)
+  by (simp add: linear_iff id_def)
 
 lemma linear_zero: "linear (\<lambda>x. 0)"
-  by (simp add: linear_def)
+  by (simp add: linear_iff)
 
 lemma linear_compose_setsum:
   assumes fS: "finite S"
@@ -288,20 +289,20 @@
   done
 
 lemma linear_0: "linear f \<Longrightarrow> f 0 = 0"
-  unfolding linear_def
+  unfolding linear_iff
   apply clarsimp
   apply (erule allE[where x="0::'a"])
   apply simp
   done
 
 lemma linear_cmul: "linear f \<Longrightarrow> f (c *\<^sub>R x) = c *\<^sub>R f x"
-  by (simp add: linear_def)
+  by (simp add: linear_iff)
 
 lemma linear_neg: "linear f \<Longrightarrow> f (- x) = - f x"
   using linear_cmul [where c="-1"] by simp
 
 lemma linear_add: "linear f \<Longrightarrow> f(x + y) = f x + f y"
-  by (metis linear_def)
+  by (metis linear_iff)
 
 lemma linear_sub: "linear f \<Longrightarrow> f(x - y) = f x - f y"
   by (simp add: diff_minus linear_add linear_neg)
@@ -354,16 +355,16 @@
 definition "bilinear f \<longleftrightarrow> (\<forall>x. linear (\<lambda>y. f x y)) \<and> (\<forall>y. linear (\<lambda>x. f x y))"
 
 lemma bilinear_ladd: "bilinear h \<Longrightarrow> h (x + y) z = h x z + h y z"
-  by (simp add: bilinear_def linear_def)
+  by (simp add: bilinear_def linear_iff)
 
 lemma bilinear_radd: "bilinear h \<Longrightarrow> h x (y + z) = h x y + h x z"
-  by (simp add: bilinear_def linear_def)
+  by (simp add: bilinear_def linear_iff)
 
 lemma bilinear_lmul: "bilinear h \<Longrightarrow> h (c *\<^sub>R x) y = c *\<^sub>R h x y"
-  by (simp add: bilinear_def linear_def)
+  by (simp add: bilinear_def linear_iff)
 
 lemma bilinear_rmul: "bilinear h \<Longrightarrow> h x (c *\<^sub>R y) = c *\<^sub>R h x y"
-  by (simp add: bilinear_def linear_def)
+  by (simp add: bilinear_def linear_iff)
 
 lemma bilinear_lneg: "bilinear h \<Longrightarrow> h (- x) y = - h x y"
   by (simp only: scaleR_minus1_left [symmetric] bilinear_lmul)
@@ -475,7 +476,7 @@
   fixes f:: "'n::euclidean_space \<Rightarrow> 'm::euclidean_space"
   assumes lf: "linear f"
   shows "linear (adjoint f)"
-  by (simp add: lf linear_def euclidean_eq_iff[where 'a='n] euclidean_eq_iff[where 'a='m]
+  by (simp add: lf linear_iff euclidean_eq_iff[where 'a='n] euclidean_eq_iff[where 'a='m]
     adjoint_clauses[OF lf] inner_simps)
 
 lemma adjoint_adjoint:
@@ -560,6 +561,9 @@
 lemma subset_hull: "S t \<Longrightarrow> S hull s \<subseteq> t \<longleftrightarrow> s \<subseteq> t"
   unfolding hull_def by blast
 
+lemma hull_UNIV: "S hull UNIV = UNIV"
+  unfolding hull_def by auto
+
 lemma hull_unique: "s \<subseteq> t \<Longrightarrow> S t \<Longrightarrow> (\<And>t'. s \<subseteq> t' \<Longrightarrow> S t' \<Longrightarrow> t \<subseteq> t') \<Longrightarrow> (S hull s = t)"
   unfolding hull_def by auto
 
@@ -744,7 +748,7 @@
     and sS: "subspace S"
   shows "subspace (f ` S)"
   using lf sS linear_0[OF lf]
-  unfolding linear_def subspace_def
+  unfolding linear_iff subspace_def
   apply (auto simp add: image_iff)
   apply (rule_tac x="x + y" in bexI)
   apply auto
@@ -753,10 +757,10 @@
   done
 
 lemma subspace_linear_vimage: "linear f \<Longrightarrow> subspace S \<Longrightarrow> subspace (f -` S)"
-  by (auto simp add: subspace_def linear_def linear_0[of f])
+  by (auto simp add: subspace_def linear_iff linear_0[of f])
 
 lemma subspace_linear_preimage: "linear f \<Longrightarrow> subspace S \<Longrightarrow> subspace {x. f x \<in> S}"
-  by (auto simp add: subspace_def linear_def linear_0[of f])
+  by (auto simp add: subspace_def linear_iff linear_0[of f])
 
 lemma subspace_trivial: "subspace {0}"
   by (simp add: subspace_def)
@@ -984,7 +988,7 @@
     by safe (force intro: span_clauses)+
 next
   have "linear (\<lambda>(a, b). a + b)"
-    by (simp add: linear_def scaleR_add_right)
+    by (simp add: linear_iff scaleR_add_right)
   moreover have "subspace (span A \<times> span B)"
     by (intro subspace_Times subspace_span)
   ultimately show "subspace ((\<lambda>(a, b). a + b) ` (span A \<times> span B))"
@@ -1521,7 +1525,7 @@
   by (metis Basis_le_norm order_trans)
 
 lemma norm_bound_Basis_lt: "b \<in> Basis \<Longrightarrow> norm x < e \<Longrightarrow> \<bar>x \<bullet> b\<bar> < e"
-  by (metis Basis_le_norm basic_trans_rules(21))
+  by (metis Basis_le_norm le_less_trans)
 
 lemma norm_le_l1: "norm x \<le> (\<Sum>b\<in>Basis. \<bar>x \<bullet> b\<bar>)"
   apply (subst euclidean_representation[of x, symmetric])
@@ -1639,11 +1643,11 @@
   proof
     fix x y
     show "f (x + y) = f x + f y"
-      using `linear f` unfolding linear_def by simp
+      using `linear f` unfolding linear_iff by simp
   next
     fix r x
     show "f (scaleR r x) = scaleR r (f x)"
-      using `linear f` unfolding linear_def by simp
+      using `linear f` unfolding linear_iff by simp
   next
     have "\<exists>B. \<forall>x. norm (f x) \<le> B * norm x"
       using `linear f` by (rule linear_bounded)
@@ -1653,7 +1657,7 @@
 next
   assume "bounded_linear f"
   then interpret f: bounded_linear f .
-  show "linear f" by (simp add: f.add f.scaleR linear_def)
+  show "linear f" by (simp add: f.add f.scaleR linear_iff)
 qed
 
 lemma bounded_linearI':
@@ -1725,20 +1729,20 @@
   proof
     fix x y z
     show "h (x + y) z = h x z + h y z"
-      using `bilinear h` unfolding bilinear_def linear_def by simp
+      using `bilinear h` unfolding bilinear_def linear_iff by simp
   next
     fix x y z
     show "h x (y + z) = h x y + h x z"
-      using `bilinear h` unfolding bilinear_def linear_def by simp
+      using `bilinear h` unfolding bilinear_def linear_iff by simp
   next
     fix r x y
     show "h (scaleR r x) y = scaleR r (h x y)"
-      using `bilinear h` unfolding bilinear_def linear_def
+      using `bilinear h` unfolding bilinear_def linear_iff
       by simp
   next
     fix r x y
     show "h x (scaleR r y) = scaleR r (h x y)"
-      using `bilinear h` unfolding bilinear_def linear_def
+      using `bilinear h` unfolding bilinear_def linear_iff
       by simp
   next
     have "\<exists>B. \<forall>x y. norm (h x y) \<le> B * norm x * norm y"
@@ -2444,7 +2448,7 @@
      (\<forall>x\<in> span C. \<forall>c. g (c*\<^sub>R x) = c *\<^sub>R g x) \<and>
      (\<forall>x\<in> C. g x = f x)" by blast
   from g show ?thesis
-    unfolding linear_def
+    unfolding linear_iff
     using C
     apply clarsimp
     apply blast
@@ -2613,7 +2617,7 @@
 proof -
   let ?P = "{x. \<forall>y\<in> span C. f x y = g x y}"
   from bf bg have sp: "subspace ?P"
-    unfolding bilinear_def linear_def subspace_def bf bg
+    unfolding bilinear_def linear_iff subspace_def bf bg
     by (auto simp add: span_0 bilinear_lzero[OF bf] bilinear_lzero[OF bg] span_add Ball_def
       intro: bilinear_ladd[OF bf])
 
@@ -2623,7 +2627,7 @@
     apply (rule span_induct')
     apply (simp add: fg)
     apply (auto simp add: subspace_def)
-    using bf bg unfolding bilinear_def linear_def
+    using bf bg unfolding bilinear_def linear_iff
     apply (auto simp add: span_0 bilinear_rzero[OF bf] bilinear_rzero[OF bg] span_add Ball_def
       intro: bilinear_ladd[OF bf])
     done
--- a/src/HOL/Multivariate_Analysis/Path_Connected.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Path_Connected.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -587,7 +587,7 @@
 qed
 
 lemma open_path_component:
-  fixes s :: "'a::real_normed_vector set" (*TODO: generalize to metric_space*)
+  fixes s :: "'a::real_normed_vector set"
   assumes "open s"
   shows "open {y. path_component s x y}"
   unfolding open_contains_ball
@@ -620,7 +620,7 @@
 qed
 
 lemma open_non_path_component:
-  fixes s :: "'a::real_normed_vector set" (*TODO: generalize to metric_space*)
+  fixes s :: "'a::real_normed_vector set"
   assumes "open s"
   shows "open(s - {y. path_component s x y})"
   unfolding open_contains_ball
@@ -648,7 +648,7 @@
 qed
 
 lemma connected_open_path_connected:
-  fixes s :: "'a::real_normed_vector set" (*TODO: generalize to metric_space*)
+  fixes s :: "'a::real_normed_vector set"
   assumes "open s" "connected s"
   shows "path_connected s"
   unfolding path_connected_component_set
--- a/src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -978,9 +978,6 @@
     unfolding th0 th1 by simp
 qed
 
-lemma connected_empty[simp, intro]: "connected {}"  (* FIXME duplicate? *)
-  by simp
-
 
 subsection{* Limit points *}
 
@@ -2125,32 +2122,20 @@
 
 text{* Some other lemmas about sequences. *}
 
-lemma sequentially_offset:
+lemma sequentially_offset: (* TODO: move to Topological_Spaces.thy *)
   assumes "eventually (\<lambda>i. P i) sequentially"
   shows "eventually (\<lambda>i. P (i + k)) sequentially"
-  using assms unfolding eventually_sequentially by (metis trans_le_add1)
-
-lemma seq_offset:
-  assumes "(f ---> l) sequentially"
-  shows "((\<lambda>i. f (i + k)) ---> l) sequentially"
-  using assms by (rule LIMSEQ_ignore_initial_segment) (* FIXME: redundant *)
-
-lemma seq_offset_neg:
+  using assms by (rule eventually_sequentially_seg [THEN iffD2])
+
+lemma seq_offset_neg: (* TODO: move to Topological_Spaces.thy *)
   "(f ---> l) sequentially \<Longrightarrow> ((\<lambda>i. f(i - k)) ---> l) sequentially"
-  apply (rule topological_tendstoI)
-  apply (drule (2) topological_tendstoD)
-  apply (simp only: eventually_sequentially)
-  apply (subgoal_tac "\<And>N k (n::nat). N + k <= n \<Longrightarrow> N <= n - k")
-  apply metis
+  apply (erule filterlim_compose)
+  apply (simp add: filterlim_def le_sequentially eventually_filtermap eventually_sequentially)
   apply arith
   done
 
-lemma seq_offset_rev:
-  "((\<lambda>i. f(i + k)) ---> l) sequentially \<Longrightarrow> (f ---> l) sequentially"
-  by (rule LIMSEQ_offset) (* FIXME: redundant *)
-
 lemma seq_harmonic: "((\<lambda>n. inverse (real n)) ---> 0) sequentially"
-  using LIMSEQ_inverse_real_of_nat by (rule LIMSEQ_imp_Suc)
+  using LIMSEQ_inverse_real_of_nat by (rule LIMSEQ_imp_Suc) (* TODO: move to Limits.thy *)
 
 subsection {* More properties of closed balls *}
 
--- a/src/HOL/NthRoot.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/NthRoot.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -410,17 +410,17 @@
 lemma real_sqrt_eq_iff [simp]: "(sqrt x = sqrt y) = (x = y)"
 unfolding sqrt_def by (rule real_root_eq_iff [OF pos2])
 
-lemmas real_sqrt_gt_0_iff [simp] = real_sqrt_less_iff [where x=0, simplified]
-lemmas real_sqrt_lt_0_iff [simp] = real_sqrt_less_iff [where y=0, simplified]
-lemmas real_sqrt_ge_0_iff [simp] = real_sqrt_le_iff [where x=0, simplified]
-lemmas real_sqrt_le_0_iff [simp] = real_sqrt_le_iff [where y=0, simplified]
-lemmas real_sqrt_eq_0_iff [simp] = real_sqrt_eq_iff [where y=0, simplified]
+lemmas real_sqrt_gt_0_iff [simp] = real_sqrt_less_iff [where x=0, unfolded real_sqrt_zero]
+lemmas real_sqrt_lt_0_iff [simp] = real_sqrt_less_iff [where y=0, unfolded real_sqrt_zero]
+lemmas real_sqrt_ge_0_iff [simp] = real_sqrt_le_iff [where x=0, unfolded real_sqrt_zero]
+lemmas real_sqrt_le_0_iff [simp] = real_sqrt_le_iff [where y=0, unfolded real_sqrt_zero]
+lemmas real_sqrt_eq_0_iff [simp] = real_sqrt_eq_iff [where y=0, unfolded real_sqrt_zero]
 
-lemmas real_sqrt_gt_1_iff [simp] = real_sqrt_less_iff [where x=1, simplified]
-lemmas real_sqrt_lt_1_iff [simp] = real_sqrt_less_iff [where y=1, simplified]
-lemmas real_sqrt_ge_1_iff [simp] = real_sqrt_le_iff [where x=1, simplified]
-lemmas real_sqrt_le_1_iff [simp] = real_sqrt_le_iff [where y=1, simplified]
-lemmas real_sqrt_eq_1_iff [simp] = real_sqrt_eq_iff [where y=1, simplified]
+lemmas real_sqrt_gt_1_iff [simp] = real_sqrt_less_iff [where x=1, unfolded real_sqrt_one]
+lemmas real_sqrt_lt_1_iff [simp] = real_sqrt_less_iff [where y=1, unfolded real_sqrt_one]
+lemmas real_sqrt_ge_1_iff [simp] = real_sqrt_le_iff [where x=1, unfolded real_sqrt_one]
+lemmas real_sqrt_le_1_iff [simp] = real_sqrt_le_iff [where y=1, unfolded real_sqrt_one]
+lemmas real_sqrt_eq_1_iff [simp] = real_sqrt_eq_iff [where y=1, unfolded real_sqrt_one]
 
 lemma isCont_real_sqrt: "isCont sqrt x"
 unfolding sqrt_def by (rule isCont_real_root)
--- a/src/HOL/Number_Theory/Primes.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Number_Theory/Primes.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -167,18 +167,24 @@
   by (metis div_mult_self1_is_id div_mult_self2_is_id
       int_div_less_self int_one_le_iff_zero_less zero_less_mult_pos less_le)
 
-lemma prime_dvd_power_nat [rule_format]: "prime (p::nat) -->
-    n > 0 --> (p dvd x^n --> p dvd x)"
-  by (induct n rule: nat_induct) auto
+lemma prime_dvd_power_nat: "prime (p::nat) \<Longrightarrow> p dvd x^n \<Longrightarrow> p dvd x"
+  by (induct n) auto
 
-lemma prime_dvd_power_int [rule_format]: "prime (p::int) -->
-    n > 0 --> (p dvd x^n --> p dvd x)"
-  apply (induct n rule: nat_induct)
-  apply auto
+lemma prime_dvd_power_int: "prime (p::int) \<Longrightarrow> p dvd x^n \<Longrightarrow> p dvd x"
+  apply (induct n)
   apply (frule prime_ge_0_int)
   apply auto
   done
 
+lemma prime_dvd_power_nat_iff: "prime (p::nat) \<Longrightarrow> n > 0 \<Longrightarrow>
+    p dvd x^n \<longleftrightarrow> p dvd x"
+  by (cases n) (auto elim: prime_dvd_power_nat)
+
+lemma prime_dvd_power_int_iff: "prime (p::int) \<Longrightarrow> n > 0 \<Longrightarrow>
+    p dvd x^n \<longleftrightarrow> p dvd x"
+  by (cases n) (auto elim: prime_dvd_power_int)
+
+
 subsubsection {* Make prime naively executable *}
 
 lemma zero_not_prime_nat [simp]: "~prime (0::nat)"
--- a/src/HOL/Real_Vector_Spaces.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Real_Vector_Spaces.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -934,8 +934,16 @@
 
 subsection {* Bounded Linear and Bilinear Operators *}
 
-locale bounded_linear = additive f for f :: "'a::real_normed_vector \<Rightarrow> 'b::real_normed_vector" +
+locale linear = additive f for f :: "'a::real_vector \<Rightarrow> 'b::real_vector" +
   assumes scaleR: "f (scaleR r x) = scaleR r (f x)"
+
+lemma linearI:
+  assumes "\<And>x y. f (x + y) = f x + f y"
+  assumes "\<And>c x. f (c *\<^sub>R x) = c *\<^sub>R f x"
+  shows "linear f"
+  by default (rule assms)+
+
+locale bounded_linear = linear f for f :: "'a::real_normed_vector \<Rightarrow> 'b::real_normed_vector" +
   assumes bounded: "\<exists>K. \<forall>x. norm (f x) \<le> norm x * K"
 begin
 
@@ -1547,4 +1555,3 @@
 qed
 
 end
-
--- a/src/HOL/Series.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Series.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -446,7 +446,7 @@
 lemma sumr_pos_lt_pair:
   fixes f :: "nat \<Rightarrow> real"
   shows "\<lbrakk>summable f;
-        \<forall>d. 0 < f (k + (Suc(Suc 0) * d)) + f (k + ((Suc(Suc 0) * d) + 1))\<rbrakk>
+        \<And>d. 0 < f (k + (Suc(Suc 0) * d)) + f (k + ((Suc(Suc 0) * d) + 1))\<rbrakk>
       \<Longrightarrow> setsum f {0..<k} < suminf f"
 unfolding One_nat_def
 apply (subst suminf_split_initial_segment [where k="k"])
--- a/src/HOL/TPTP/atp_theory_export.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/TPTP/atp_theory_export.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -71,7 +71,7 @@
       tracing ("Ran ATP: " ^
                (case outcome of
                   NONE => "Success"
-                | SOME failure => string_of_failure failure))
+                | SOME failure => string_of_atp_failure failure))
   in outcome end
 
 fun is_problem_line_reprovable ctxt format prelude axioms deps
--- a/src/HOL/Tools/ATP/atp_problem.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/ATP/atp_problem.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -7,21 +7,23 @@
 
 signature ATP_PROBLEM =
 sig
-  datatype ('a, 'b) ho_term =
-    ATerm of ('a * 'b list) * ('a, 'b) ho_term list |
-    AAbs of (('a * 'b) * ('a, 'b) ho_term) * ('a, 'b) ho_term list
-  datatype quantifier = AForall | AExists
-  datatype connective = ANot | AAnd | AOr | AImplies | AIff
-  datatype ('a, 'b, 'c, 'd) formula =
-    ATyQuant of quantifier * ('b * 'd list) list * ('a, 'b, 'c, 'd) formula |
-    AQuant of quantifier * ('a * 'b option) list * ('a, 'b, 'c, 'd) formula |
-    AConn of connective * ('a, 'b, 'c, 'd) formula list |
+  datatype ('a, 'b) atp_term =
+    ATerm of ('a * 'b list) * ('a, 'b) atp_term list |
+    AAbs of (('a * 'b) * ('a, 'b) atp_term) * ('a, 'b) atp_term list
+  datatype atp_quantifier = AForall | AExists
+  datatype atp_connective = ANot | AAnd | AOr | AImplies | AIff
+  datatype ('a, 'b, 'c, 'd) atp_formula =
+    ATyQuant of atp_quantifier * ('b * 'd list) list
+        * ('a, 'b, 'c, 'd) atp_formula |
+    AQuant of atp_quantifier * ('a * 'b option) list
+        * ('a, 'b, 'c, 'd) atp_formula |
+    AConn of atp_connective * ('a, 'b, 'c, 'd) atp_formula list |
     AAtom of 'c
 
-  datatype 'a ho_type =
-    AType of 'a * 'a ho_type list |
-    AFun of 'a ho_type * 'a ho_type |
-    APi of 'a list * 'a ho_type
+  datatype 'a atp_type =
+    AType of 'a * 'a atp_type list |
+    AFun of 'a atp_type * 'a atp_type |
+    APi of 'a list * 'a atp_type
 
   type term_order =
     {is_lpo : bool,
@@ -41,22 +43,22 @@
     THF of polymorphism * thf_choice * thf_defs |
     DFG of polymorphism
 
-  datatype formula_role =
+  datatype atp_formula_role =
     Axiom | Definition | Lemma | Hypothesis | Conjecture | Negated_Conjecture |
     Plain | Unknown
 
-  datatype 'a problem_line =
+  datatype 'a atp_problem_line =
     Class_Decl of string * 'a * 'a list |
     Type_Decl of string * 'a * int |
-    Sym_Decl of string * 'a * 'a ho_type |
-    Datatype_Decl of string * ('a * 'a list) list * 'a ho_type
-                     * ('a, 'a ho_type) ho_term list * bool |
-    Class_Memb of string * ('a * 'a list) list * 'a ho_type * 'a |
-    Formula of (string * string) * formula_role
-               * ('a, 'a ho_type, ('a, 'a ho_type) ho_term, 'a) formula
-               * (string, string ho_type) ho_term option
-               * (string, string ho_type) ho_term list
-  type 'a problem = (string * 'a problem_line list) list
+    Sym_Decl of string * 'a * 'a atp_type |
+    Datatype_Decl of string * ('a * 'a list) list * 'a atp_type
+                     * ('a, 'a atp_type) atp_term list * bool |
+    Class_Memb of string * ('a * 'a list) list * 'a atp_type * 'a |
+    Formula of (string * string) * atp_formula_role
+               * ('a, 'a atp_type, ('a, 'a atp_type) atp_term, 'a) atp_formula
+               * (string, string atp_type) atp_term option
+               * (string, string atp_type) atp_term list
+  type 'a atp_problem = (string * 'a atp_problem_line list) list
 
   val tptp_cnf : string
   val tptp_fof : string
@@ -89,9 +91,9 @@
   val tptp_true : string
   val tptp_empty_list : string
   val isabelle_info_prefix : string
-  val isabelle_info : string -> int -> (string, 'a) ho_term list
-  val extract_isabelle_status : (string, 'a) ho_term list -> string option
-  val extract_isabelle_rank : (string, 'a) ho_term list -> int
+  val isabelle_info : string -> int -> (string, 'a) atp_term list
+  val extract_isabelle_status : (string, 'a) atp_term list -> string option
+  val extract_isabelle_rank : (string, 'a) atp_term list -> int
   val inductionN : string
   val introN : string
   val inductiveN : string
@@ -107,37 +109,37 @@
   val is_built_in_tptp_symbol : string -> bool
   val is_tptp_variable : string -> bool
   val is_tptp_user_symbol : string -> bool
-  val bool_atype : (string * string) ho_type
-  val individual_atype : (string * string) ho_type
-  val mk_anot : ('a, 'b, 'c, 'd) formula -> ('a, 'b, 'c, 'd) formula
+  val bool_atype : (string * string) atp_type
+  val individual_atype : (string * string) atp_type
+  val mk_anot : ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'c, 'd) atp_formula
   val mk_aconn :
-    connective -> ('a, 'b, 'c, 'd) formula -> ('a, 'b, 'c, 'd) formula
-    -> ('a, 'b, 'c, 'd) formula
+    atp_connective -> ('a, 'b, 'c, 'd) atp_formula
+    -> ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'c, 'd) atp_formula
   val aconn_fold :
-    bool option -> (bool option -> 'a -> 'b -> 'b) -> connective * 'a list
+    bool option -> (bool option -> 'a -> 'b -> 'b) -> atp_connective * 'a list
     -> 'b -> 'b
   val aconn_map :
-    bool option -> (bool option -> 'a -> ('b, 'c, 'd, 'e) formula)
-    -> connective * 'a list -> ('b, 'c, 'd, 'e) formula
+    bool option -> (bool option -> 'a -> ('b, 'c, 'd, 'e) atp_formula)
+    -> atp_connective * 'a list -> ('b, 'c, 'd, 'e) atp_formula
   val formula_fold :
-    bool option -> (bool option -> 'c -> 'e -> 'e) -> ('a, 'b, 'c, 'd) formula
-    -> 'e -> 'e
+    bool option -> (bool option -> 'c -> 'e -> 'e)
+    -> ('a, 'b, 'c, 'd) atp_formula -> 'e -> 'e
   val formula_map :
-    ('c -> 'e) -> ('a, 'b, 'c, 'd) formula -> ('a, 'b, 'e, 'd) formula
-  val strip_atype : 'a ho_type -> 'a list * ('a ho_type list * 'a ho_type)
+    ('c -> 'e) -> ('a, 'b, 'c, 'd) atp_formula -> ('a, 'b, 'e, 'd) atp_formula
+  val strip_atype : 'a atp_type -> 'a list * ('a atp_type list * 'a atp_type)
   val is_format_higher_order : atp_format -> bool
-  val tptp_string_of_line : atp_format -> string problem_line -> string
+  val tptp_string_of_line : atp_format -> string atp_problem_line -> string
   val lines_of_atp_problem :
-    atp_format -> term_order -> (unit -> (string * int) list) -> string problem
-    -> string list
+    atp_format -> term_order -> (unit -> (string * int) list)
+    -> string atp_problem -> string list
   val ensure_cnf_problem :
-    (string * string) problem -> (string * string) problem
+    (string * string) atp_problem -> (string * string) atp_problem
   val filter_cnf_ueq_problem :
-    (string * string) problem -> (string * string) problem
-  val declared_in_atp_problem : 'a problem -> ('a list * 'a list) * 'a list
+    (string * string) atp_problem -> (string * string) atp_problem
+  val declared_in_atp_problem : 'a atp_problem -> ('a list * 'a list) * 'a list
   val nice_atp_problem :
-    bool -> atp_format -> ('a * (string * string) problem_line list) list
-    -> ('a * string problem_line list) list
+    bool -> atp_format -> ('a * (string * string) atp_problem_line list) list
+    -> ('a * string atp_problem_line list) list
        * (string Symtab.table * string Symtab.table) option
 end;
 
@@ -151,21 +153,23 @@
 
 (** ATP problem **)
 
-datatype ('a, 'b) ho_term =
-  ATerm of ('a * 'b list) * ('a, 'b) ho_term list |
-  AAbs of (('a * 'b) * ('a, 'b) ho_term) * ('a, 'b) ho_term list
-datatype quantifier = AForall | AExists
-datatype connective = ANot | AAnd | AOr | AImplies | AIff
-datatype ('a, 'b, 'c, 'd) formula =
-  ATyQuant of quantifier * ('b * 'd list) list * ('a, 'b, 'c, 'd) formula |
-  AQuant of quantifier * ('a * 'b option) list * ('a, 'b, 'c, 'd) formula |
-  AConn of connective * ('a, 'b, 'c, 'd) formula list |
+datatype ('a, 'b) atp_term =
+  ATerm of ('a * 'b list) * ('a, 'b) atp_term list |
+  AAbs of (('a * 'b) * ('a, 'b) atp_term) * ('a, 'b) atp_term list
+datatype atp_quantifier = AForall | AExists
+datatype atp_connective = ANot | AAnd | AOr | AImplies | AIff
+datatype ('a, 'b, 'c, 'd) atp_formula =
+  ATyQuant of atp_quantifier * ('b * 'd list) list
+      * ('a, 'b, 'c, 'd) atp_formula |
+  AQuant of atp_quantifier * ('a * 'b option) list
+      * ('a, 'b, 'c, 'd) atp_formula |
+  AConn of atp_connective * ('a, 'b, 'c, 'd) atp_formula list |
   AAtom of 'c
 
-datatype 'a ho_type =
-  AType of 'a * 'a ho_type list |
-  AFun of 'a ho_type * 'a ho_type |
-  APi of 'a list * 'a ho_type
+datatype 'a atp_type =
+  AType of 'a * 'a atp_type list |
+  AFun of 'a atp_type * 'a atp_type |
+  APi of 'a list * 'a atp_type
 
 type term_order =
   {is_lpo : bool,
@@ -185,22 +189,22 @@
   THF of polymorphism * thf_choice * thf_defs |
   DFG of polymorphism
 
-datatype formula_role =
+datatype atp_formula_role =
   Axiom | Definition | Lemma | Hypothesis | Conjecture | Negated_Conjecture |
   Plain | Unknown
 
-datatype 'a problem_line =
+datatype 'a atp_problem_line =
   Class_Decl of string * 'a * 'a list |
   Type_Decl of string * 'a * int |
-  Sym_Decl of string * 'a * 'a ho_type |
-  Datatype_Decl of string * ('a * 'a list) list * 'a ho_type
-                   * ('a, 'a ho_type) ho_term list * bool |
-  Class_Memb of string * ('a * 'a list) list * 'a ho_type * 'a |
-  Formula of (string * string) * formula_role
-             * ('a, 'a ho_type, ('a, 'a ho_type) ho_term, 'a) formula
-             * (string, string ho_type) ho_term option
-             * (string, string ho_type) ho_term list
-type 'a problem = (string * 'a problem_line list) list
+  Sym_Decl of string * 'a * 'a atp_type |
+  Datatype_Decl of string * ('a * 'a list) list * 'a atp_type
+                   * ('a, 'a atp_type) atp_term list * bool |
+  Class_Memb of string * ('a * 'a list) list * 'a atp_type * 'a |
+  Formula of (string * string) * atp_formula_role
+             * ('a, 'a atp_type, ('a, 'a atp_type) atp_term, 'a) atp_formula
+             * (string, string atp_type) atp_term option
+             * (string, string atp_type) atp_term list
+type 'a atp_problem = (string * 'a atp_problem_line list) list
 
 (* official TPTP syntax *)
 val tptp_cnf = "cnf"
--- a/src/HOL/Tools/ATP/atp_problem_generate.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/ATP/atp_problem_generate.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -8,12 +8,12 @@
 
 signature ATP_PROBLEM_GENERATE =
 sig
-  type ('a, 'b) ho_term = ('a, 'b) ATP_Problem.ho_term
-  type connective = ATP_Problem.connective
-  type ('a, 'b, 'c, 'd) formula = ('a, 'b, 'c, 'd) ATP_Problem.formula
+  type ('a, 'b) atp_term = ('a, 'b) ATP_Problem.atp_term
+  type atp_connective = ATP_Problem.atp_connective
+  type ('a, 'b, 'c, 'd) atp_formula = ('a, 'b, 'c, 'd) ATP_Problem.atp_formula
   type atp_format = ATP_Problem.atp_format
-  type formula_role = ATP_Problem.formula_role
-  type 'a problem = 'a ATP_Problem.problem
+  type atp_formula_role = ATP_Problem.atp_formula_role
+  type 'a atp_problem = 'a ATP_Problem.atp_problem
 
   datatype mode = Metis | Sledgehammer | Sledgehammer_Completish | Exporter
 
@@ -100,8 +100,9 @@
   val adjust_type_enc : atp_format -> type_enc -> type_enc
   val is_lambda_free : term -> bool
   val mk_aconns :
-    connective -> ('a, 'b, 'c, 'd) formula list -> ('a, 'b, 'c, 'd) formula
-  val unmangled_const : string -> string * (string, 'b) ho_term list
+    atp_connective -> ('a, 'b, 'c, 'd) atp_formula list
+    -> ('a, 'b, 'c, 'd) atp_formula
+  val unmangled_const : string -> string * (string, 'b) atp_term list
   val unmangled_const_name : string -> string list
   val helper_table : ((string * bool) * (status * thm) list) list
   val trans_lams_of_string :
@@ -109,13 +110,13 @@
   val string_of_status : status -> string
   val factsN : string
   val prepare_atp_problem :
-    Proof.context -> atp_format -> formula_role -> type_enc -> mode -> string
-    -> bool -> bool -> bool -> term list -> term
+    Proof.context -> atp_format -> atp_formula_role -> type_enc -> mode
+    -> string -> bool -> bool -> bool -> term list -> term
     -> ((string * stature) * term) list
-    -> string problem * string Symtab.table * (string * stature) list vector
+    -> string atp_problem * string Symtab.table * (string * stature) list vector
        * (string * term) list * int Symtab.table
-  val atp_problem_selection_weights : string problem -> (string * real) list
-  val atp_problem_term_order_info : string problem -> (string * int) list
+  val atp_problem_selection_weights : string atp_problem -> (string * real) list
+  val atp_problem_term_order_info : string atp_problem -> (string * int) list
 end;
 
 structure ATP_Problem_Generate : ATP_PROBLEM_GENERATE =
@@ -826,8 +827,8 @@
 type ifact =
   {name : string,
    stature : stature,
-   role : formula_role,
-   iformula : (string * string, typ, iterm, string * string) formula,
+   role : atp_formula_role,
+   iformula : (string * string, typ, iterm, string * string) atp_formula,
    atomic_types : typ list}
 
 fun update_iformula f ({name, stature, role, iformula, atomic_types} : ifact) =
@@ -916,9 +917,9 @@
     | term (TVar z) = AType (tvar_name z, [])
   in term end
 
-fun ho_term_of_ho_type (AType (name, tys)) =
-    ATerm ((name, []), map ho_term_of_ho_type tys)
-  | ho_term_of_ho_type _ = raise Fail "unexpected type"
+fun atp_term_of_ho_type (AType (name, tys)) =
+    ATerm ((name, []), map atp_term_of_ho_type tys)
+  | atp_term_of_ho_type _ = raise Fail "unexpected type"
 
 fun ho_type_of_type_arg type_enc T =
   if T = dummyT then NONE else SOME (raw_ho_type_of_typ type_enc T)
@@ -983,7 +984,7 @@
   if is_type_enc_native type_enc then
     (map (native_ho_type_of_typ type_enc false 0) T_args, [])
   else
-    ([], map_filter (Option.map ho_term_of_ho_type
+    ([], map_filter (Option.map atp_term_of_ho_type
                      o ho_type_of_type_arg type_enc) T_args)
 
 fun class_atom type_enc (cl, T) =
@@ -2071,10 +2072,10 @@
 fun tag_with_type ctxt mono type_enc pos T tm =
   IConst (type_tag, T --> T, [T])
   |> mangle_type_args_in_iterm type_enc
-  |> ho_term_of_iterm ctxt mono type_enc pos
+  |> atp_term_of_iterm ctxt mono type_enc pos
   |> (fn ATerm ((s, tys), tms) => ATerm ((s, tys), tms @ [tm])
        | _ => raise Fail "unexpected lambda-abstraction")
-and ho_term_of_iterm ctxt mono type_enc pos =
+and atp_term_of_iterm ctxt mono type_enc pos =
   let
     fun term site u =
       let
@@ -2112,7 +2113,7 @@
   let
     val thy = Proof_Context.theory_of ctxt
     val level = level_of_type_enc type_enc
-    val do_term = ho_term_of_iterm ctxt mono type_enc
+    val do_term = atp_term_of_iterm ctxt mono type_enc
     fun do_out_of_bound_type pos phi universal (name, T) =
       if should_guard_type ctxt mono type_enc
              (fn () => should_guard_var thy level pos phi universal name) T then
@@ -2599,7 +2600,7 @@
         val base_ary = min_ary_of sym_tab0 base_s
         fun do_const name = IConst (name, T, T_args)
         val filter_ty_args = filter_type_args_in_iterm thy ctrss type_enc
-        val ho_term_of = ho_term_of_iterm ctxt mono type_enc (SOME true)
+        val atp_term_of = atp_term_of_iterm ctxt mono type_enc (SOME true)
         val name1 as (s1, _) =
           base_name |> ary - 1 > base_ary ? aliased_uncurried (ary - 1)
         val name2 as (s2, _) = base_name |> aliased_uncurried ary
@@ -2619,7 +2620,7 @@
         val eq =
           eq_formula type_enc (atomic_types_of T)
                      (map (apsnd do_bound_type) bounds) false
-                     (ho_term_of tm1) (ho_term_of tm2)
+                     (atp_term_of tm1) (atp_term_of tm2)
       in
         ([tm1, tm2],
          [Formula ((uncurried_alias_eq_prefix ^ s2, ""), role,
--- a/src/HOL/Tools/ATP/atp_proof.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/ATP/atp_proof.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -8,14 +8,14 @@
 
 signature ATP_PROOF =
 sig
-  type ('a, 'b) ho_term = ('a, 'b) ATP_Problem.ho_term
-  type formula_role = ATP_Problem.formula_role
-  type ('a, 'b, 'c, 'd) formula = ('a, 'b, 'c, 'd) ATP_Problem.formula
-  type 'a problem = 'a ATP_Problem.problem
+  type ('a, 'b) atp_term = ('a, 'b) ATP_Problem.atp_term
+  type atp_formula_role = ATP_Problem.atp_formula_role
+  type ('a, 'b, 'c, 'd) atp_formula = ('a, 'b, 'c, 'd) ATP_Problem.atp_formula
+  type 'a atp_problem = 'a ATP_Problem.atp_problem
 
   exception UNRECOGNIZED_ATP_PROOF of unit
 
-  datatype failure =
+  datatype atp_failure =
     Unprovable |
     GaveUp |
     ProofMissing |
@@ -34,32 +34,36 @@
     InternalError |
     UnknownError of string
 
-  type step_name = string * string list
-  type 'a step = step_name * formula_role * 'a * string * step_name list
+  type atp_step_name = string * string list
+  type ('a, 'b) atp_step =
+    atp_step_name * atp_formula_role * 'a * 'b * atp_step_name list
 
-  type 'a proof = ('a, 'a, ('a, 'a) ho_term, 'a) formula step list
+  type 'a atp_proof =
+    (('a, 'a, ('a, 'a) atp_term, 'a) atp_formula, string) atp_step list
 
   val short_output : bool -> string -> string
-  val string_of_failure : failure -> string
+  val string_of_atp_failure : atp_failure -> string
   val extract_important_message : string -> string
-  val extract_known_failure :
-    (failure * string) list -> string -> failure option
+  val extract_known_atp_failure :
+    (atp_failure * string) list -> string -> atp_failure option
   val extract_tstplike_proof_and_outcome :
-    bool -> (string * string) list -> (failure * string) list -> string
-    -> string * failure option
-  val is_same_atp_step : step_name -> step_name -> bool
+    bool -> (string * string) list -> (atp_failure * string) list -> string
+    -> string * atp_failure option
+  val is_same_atp_step : atp_step_name -> atp_step_name -> bool
   val scan_general_id : string list -> string * string list
   val agsyhol_coreN : string
   val satallax_coreN : string
   val z3_tptp_coreN : string
   val parse_formula :
     string list
-    -> (string, 'a, (string, 'a) ho_term, string) formula * string list
-  val atp_proof_of_tstplike_proof : string problem -> string -> string proof
-  val clean_up_atp_proof_dependencies : string proof -> string proof
+    -> (string, 'a, (string, 'a) atp_term, string) atp_formula * string list
+  val atp_proof_of_tstplike_proof :
+    string atp_problem -> string -> string atp_proof
+  val clean_up_atp_proof_dependencies : string atp_proof -> string atp_proof
   val map_term_names_in_atp_proof :
-    (string -> string) -> string proof -> string proof
-  val nasty_atp_proof : string Symtab.table -> string proof -> string proof
+    (string -> string) -> string atp_proof -> string atp_proof
+  val nasty_atp_proof :
+    string Symtab.table -> string atp_proof -> string atp_proof
 end;
 
 structure ATP_Proof : ATP_PROOF =
@@ -70,7 +74,7 @@
 
 exception UNRECOGNIZED_ATP_PROOF of unit
 
-datatype failure =
+datatype atp_failure =
   Unprovable |
   GaveUp |
   ProofMissing |
@@ -103,37 +107,37 @@
   | involving ss =
     " involving " ^ space_implode " " (Try.serial_commas "and" (map quote ss))
 
-fun string_of_failure Unprovable = "The generated problem is unprovable."
-  | string_of_failure GaveUp = "The prover gave up."
-  | string_of_failure ProofMissing =
+fun string_of_atp_failure Unprovable = "The generated problem is unprovable."
+  | string_of_atp_failure GaveUp = "The prover gave up."
+  | string_of_atp_failure ProofMissing =
     "The prover claims the conjecture is a theorem but did not provide a proof."
-  | string_of_failure ProofIncomplete =
+  | string_of_atp_failure ProofIncomplete =
     "The prover claims the conjecture is a theorem but provided an incomplete \
     \(or unparsable) proof."
-  | string_of_failure (UnsoundProof (false, ss)) =
+  | string_of_atp_failure (UnsoundProof (false, ss)) =
     "The prover derived \"False\" using" ^ involving ss ^
     ". Specify a sound type encoding or omit the \"type_enc\" option."
-  | string_of_failure (UnsoundProof (true, ss)) =
+  | string_of_atp_failure (UnsoundProof (true, ss)) =
     "The prover derived \"False\" using" ^ involving ss ^
     ". This could be due to inconsistent axioms (including \"sorry\"s) or to \
     \a bug in Sledgehammer. If the problem persists, please contact the \
     \Isabelle developers."
-  | string_of_failure CantConnect = "Cannot connect to remote server."
-  | string_of_failure TimedOut = "Timed out."
-  | string_of_failure Inappropriate =
+  | string_of_atp_failure CantConnect = "Cannot connect to remote server."
+  | string_of_atp_failure TimedOut = "Timed out."
+  | string_of_atp_failure Inappropriate =
     "The generated problem lies outside the prover's scope."
-  | string_of_failure OutOfResources = "The prover ran out of resources."
-  | string_of_failure NoPerl = "Perl" ^ missing_message_tail
-  | string_of_failure NoLibwwwPerl =
+  | string_of_atp_failure OutOfResources = "The prover ran out of resources."
+  | string_of_atp_failure NoPerl = "Perl" ^ missing_message_tail
+  | string_of_atp_failure NoLibwwwPerl =
     "The Perl module \"libwww-perl\"" ^ missing_message_tail
-  | string_of_failure MalformedInput =
+  | string_of_atp_failure MalformedInput =
     "The generated problem is malformed. Please report this to the Isabelle \
     \developers."
-  | string_of_failure MalformedOutput = "The prover output is malformed."
-  | string_of_failure Interrupted = "The prover was interrupted."
-  | string_of_failure Crashed = "The prover crashed."
-  | string_of_failure InternalError = "An internal prover error occurred."
-  | string_of_failure (UnknownError s) =
+  | string_of_atp_failure MalformedOutput = "The prover output is malformed."
+  | string_of_atp_failure Interrupted = "The prover was interrupted."
+  | string_of_atp_failure Crashed = "The prover crashed."
+  | string_of_atp_failure InternalError = "An internal prover error occurred."
+  | string_of_atp_failure (UnknownError s) =
     "A prover error occurred" ^
     (if s = "" then ". (Pass the \"verbose\" option for details.)"
      else ":\n" ^ s)
@@ -163,7 +167,7 @@
     extract_delimited (begin_delim, end_delim) output
   | _ => ""
 
-fun extract_known_failure known_failures output =
+fun extract_known_atp_failure known_failures output =
   known_failures
   |> find_first (fn (_, pattern) => String.isSubstring pattern output)
   |> Option.map fst
@@ -171,14 +175,14 @@
 fun extract_tstplike_proof_and_outcome verbose proof_delims known_failures
                                        output =
   case (extract_tstplike_proof proof_delims output,
-        extract_known_failure known_failures output) of
+        extract_known_atp_failure known_failures output) of
     (_, SOME ProofIncomplete) => ("", NONE)
   | ("", SOME ProofMissing) => ("", NONE)
   | ("", NONE) => ("", SOME (UnknownError (short_output verbose output)))
   | res as ("", _) => res
   | (tstplike_proof, _) => (tstplike_proof, NONE)
 
-type step_name = string * string list
+type atp_step_name = string * string list
 
 fun is_same_atp_step (s1, _) (s2, _) = s1 = s2
 
@@ -193,9 +197,11 @@
     | _ => raise Fail "not Vampire"
   end
 
-type 'a step = step_name * formula_role * 'a * string * step_name list
+type ('a, 'b) atp_step =
+  atp_step_name * atp_formula_role * 'a * 'b * atp_step_name list
 
-type 'a proof = ('a, 'a, ('a, 'a) ho_term, 'a) formula step list
+type 'a atp_proof =
+  (('a, 'a, ('a, 'a) atp_term, 'a) atp_formula, string) atp_step list
 
 (**** PARSING OF TSTP FORMAT ****)
 
@@ -205,8 +211,6 @@
   || Scan.repeat ($$ "$") -- Scan.many1 Symbol.is_letdig
      >> (fn (ss1, ss2) => implode ss1 ^ implode ss2)
 
-val scan_nat = Scan.repeat1 (Scan.one Symbol.is_ascii_digit) >> implode
-
 val skip_term =
   let
     fun skip _ accum [] = (accum, [])
--- a/src/HOL/Tools/ATP/atp_proof_reconstruct.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/ATP/atp_proof_reconstruct.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -8,8 +8,8 @@
 
 signature ATP_PROOF_RECONSTRUCT =
 sig
-  type ('a, 'b) ho_term = ('a, 'b) ATP_Problem.ho_term
-  type ('a, 'b, 'c, 'd) formula = ('a, 'b, 'c, 'd) ATP_Problem.formula
+  type ('a, 'b) atp_term = ('a, 'b) ATP_Problem.atp_term
+  type ('a, 'b, 'c, 'd) atp_formula = ('a, 'b, 'c, 'd) ATP_Problem.atp_formula
 
   val metisN : string
   val full_typesN : string
@@ -28,10 +28,10 @@
   val unalias_type_enc : string -> string list
   val term_of_atp :
     Proof.context -> bool -> int Symtab.table -> typ option ->
-    (string, string) ho_term -> term
+    (string, string) atp_term -> term
   val prop_of_atp :
     Proof.context -> bool -> int Symtab.table ->
-    (string, string, (string, string) ho_term, string) formula -> term
+    (string, string, (string, string) atp_term, string) atp_formula -> term
 end;
 
 structure ATP_Proof_Reconstruct : ATP_PROOF_RECONSTRUCT =
@@ -90,9 +90,9 @@
     TFree (ww, the_default HOLogic.typeS (Variable.def_sort ctxt (ww, ~1)))
   end
 
-exception HO_TERM of (string, string) ho_term list
-exception FORMULA of
-    (string, string, (string, string) ho_term, string) formula list
+exception ATP_TERM of (string, string) atp_term list
+exception ATP_FORMULA of
+    (string, string, (string, string) atp_term, string) atp_formula list
 exception SAME of unit
 
 (* Type variables are given the basic sort "HOL.type". Some will later be
@@ -103,7 +103,7 @@
       SOME b => Type (invert_const b, Ts)
     | NONE =>
       if not (null us) then
-        raise HO_TERM [u]  (* only "tconst"s have type arguments *)
+        raise ATP_TERM [u]  (* only "tconst"s have type arguments *)
       else case unprefix_and_unascii tfree_prefix a of
         SOME b => make_tfree ctxt b
       | NONE =>
@@ -120,7 +120,7 @@
 fun type_constraint_of_term ctxt (u as ATerm ((a, _), us)) =
   case (unprefix_and_unascii class_prefix a, map (typ_of_atp ctxt) us) of
     (SOME b, [T]) => (b, T)
-  | _ => raise HO_TERM [u]
+  | _ => raise ATP_TERM [u]
 
 (* Accumulate type constraints in a formula: negative type literals. *)
 fun add_var (key, z)  = Vartab.map_default (key, []) (cons z)
@@ -178,7 +178,8 @@
       case u of
         ATerm ((s, _), us) =>
         if s = ""
-          then error "Isar proof reconstruction failed because the ATP proof contained unparsable material."
+          then error "Isar proof reconstruction failed because the ATP proof \
+                     \contains unparsable material."
         else if String.isPrefix native_type_prefix s then
           @{const True} (* ignore TPTP type information *)
         else if s = tptp_equal then
@@ -199,7 +200,7 @@
               case mangled_us @ us of
                 [typ_u, term_u] =>
                 do_term extra_ts (SOME (typ_of_atp ctxt typ_u)) term_u
-              | _ => raise HO_TERM us
+              | _ => raise ATP_TERM us
             else if s' = predicator_name then
               do_term [] (SOME @{typ bool}) (hd us)
             else if s' = app_op_name then
@@ -335,7 +336,7 @@
              | AIff => s_iff
              | ANot => raise Fail "impossible connective")
       | AAtom tm => term_of_atom ctxt textual sym_tab pos tm
-      | _ => raise FORMULA [phi]
+      | _ => raise ATP_FORMULA [phi]
   in repair_tvar_sorts (do_formula true phi Vartab.empty) end
 
 end;
--- a/src/HOL/Tools/ATP/atp_systems.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/ATP/atp_systems.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -9,8 +9,8 @@
 sig
   type term_order = ATP_Problem.term_order
   type atp_format = ATP_Problem.atp_format
-  type formula_role = ATP_Problem.formula_role
-  type failure = ATP_Proof.failure
+  type atp_formula_role = ATP_Problem.atp_formula_role
+  type atp_failure = ATP_Proof.atp_failure
 
   type slice_spec = (int * string) * atp_format * string * string * bool
   type atp_config =
@@ -20,8 +20,8 @@
        -> term_order * (unit -> (string * int) list)
           * (unit -> (string * real) list) -> string,
      proof_delims : (string * string) list,
-     known_failures : (failure * string) list,
-     prem_role : formula_role,
+     known_failures : (atp_failure * string) list,
+     prem_role : atp_formula_role,
      best_slices : Proof.context -> (real * (slice_spec * string)) list,
      best_max_mono_iters : int,
      best_max_new_mono_instances : int}
@@ -69,7 +69,7 @@
   val remote_prefix : string
   val remote_atp :
     string -> string -> string list -> (string * string) list
-    -> (failure * string) list -> formula_role
+    -> (atp_failure * string) list -> atp_formula_role
     -> (Proof.context -> slice_spec * string) -> string * (unit -> atp_config)
   val add_atp : string * (unit -> atp_config) -> theory -> theory
   val get_atp : theory -> string -> (unit -> atp_config)
@@ -91,7 +91,7 @@
 (* ATP configuration *)
 
 val default_max_mono_iters = 3 (* FUDGE *)
-val default_max_new_mono_instances = 200 (* FUDGE *)
+val default_max_new_mono_instances = 100 (* FUDGE *)
 
 type slice_spec = (int * string) * atp_format * string * string * bool
 
@@ -102,8 +102,8 @@
      -> term_order * (unit -> (string * int) list)
         * (unit -> (string * real) list) -> string,
    proof_delims : (string * string) list,
-   known_failures : (failure * string) list,
-   prem_role : formula_role,
+   known_failures : (atp_failure * string) list,
+   prem_role : atp_formula_role,
    best_slices : Proof.context -> (real * (slice_spec * string)) list,
    best_max_mono_iters : int,
    best_max_new_mono_instances : int}
@@ -225,7 +225,7 @@
      (* FUDGE *)
      K [(1.0, (((60, ""), agsyhol_thf0, "mono_native_higher", keep_lamsN, false), ""))],
    best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *),
-   best_max_new_mono_instances = default_max_new_mono_instances div 2 (* FUDGE *)}
+   best_max_new_mono_instances = default_max_new_mono_instances}
 
 val agsyhol = (agsyholN, fn () => agsyhol_config)
 
@@ -480,7 +480,7 @@
      (* FUDGE *)
      K [(1.0, (((40, ""), leo2_thf0, "mono_native_higher", keep_lamsN, false), ""))],
    best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *),
-   best_max_new_mono_instances = default_max_new_mono_instances div 2 (* FUDGE *)}
+   best_max_new_mono_instances = default_max_new_mono_instances}
 
 val leo2 = (leo2N, fn () => leo2_config)
 
@@ -502,7 +502,7 @@
      (* FUDGE *)
      K [(1.0, (((60, ""), satallax_thf0, "mono_native_higher", keep_lamsN, false), ""))],
    best_max_mono_iters = default_max_mono_iters - 1 (* FUDGE *),
-   best_max_new_mono_instances = default_max_new_mono_instances div 2 (* FUDGE *)}
+   best_max_new_mono_instances = default_max_new_mono_instances}
 
 val satallax = (satallaxN, fn () => satallax_config)
 
@@ -609,7 +609,7 @@
      |> (if Config.get ctxt force_sos then hd #> apfst (K 1.0) #> single
          else I),
    best_max_mono_iters = default_max_mono_iters,
-   best_max_new_mono_instances = default_max_new_mono_instances}
+   best_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)}
 
 val vampire = (vampireN, fn () => vampire_config)
 
@@ -633,7 +633,7 @@
         (0.125, (((62, mashN), z3_tff0, "mono_native", combsN, false), "")),
         (0.125, (((31, meshN), z3_tff0, "mono_native", combsN, false), ""))],
    best_max_mono_iters = default_max_mono_iters,
-   best_max_new_mono_instances = default_max_new_mono_instances}
+   best_max_new_mono_instances = 2 * default_max_new_mono_instances (* FUDGE *)}
 
 val z3_tptp = (z3_tptpN, fn () => z3_tptp_config)
 
@@ -675,8 +675,8 @@
             "\"$ISABELLE_ATP/scripts/remote_atp\" -w 2>&1" of
           (output, 0) => split_lines output
         | (output, _) =>
-          (warning (case extract_known_failure known_perl_failures output of
-                      SOME failure => string_of_failure failure
+          (warning (case extract_known_atp_failure known_perl_failures output of
+                      SOME failure => string_of_atp_failure failure
                     | NONE => trim_line output ^ "."); [])) ()
   handle TimeLimit.TimeOut => []
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/ExpandFeatures.py	Fri Sep 13 09:31:45 2013 +0200
@@ -0,0 +1,162 @@
+'''
+Created on Aug 21, 2013
+
+@author: daniel
+'''
+
+from math import log
+from gensim import corpora, models, similarities
+
+class ExpandFeatures(object):
+
+    def __init__(self,dicts):
+        self.dicts = dicts
+        self.featureMap = {}
+        self.alpha = 0.1
+        self.featureCounts = {}
+        self.counter = 0        
+        self.corpus = []
+        self.LSIModel = models.lsimodel.LsiModel(self.corpus,num_topics=500)
+
+    def initialize(self,dicts):
+        self.dicts = dicts
+        IS = open(dicts.accFile,'r')
+        for line in IS:
+            line = line.split(':')
+            name = line[0]
+            #print 'name',name
+            nameId = dicts.nameIdDict[name]    
+            features = dicts.featureDict[nameId]
+            dependencies = dicts.dependenciesDict[nameId]   
+            x = [self.dicts.idNameDict[d] for d in dependencies]
+            #print x  
+            self.update(features, dependencies)
+            self.corpus.append([(x,1) for x in features.keys()])
+        IS.close()
+        print 'x'
+        #self.LSIModel = models.lsimodel.LsiModel(self.corpus,num_topics=500)
+        print self.LSIModel
+        print 'y'
+        
+    def update(self,features,dependencies):
+        self.counter += 1
+        self.corpus.append([(x,1) for x in features.keys()])
+        self.LSIModel.add_documents([[(x,1) for x in features.keys()]])
+        """
+        for f in features.iterkeys():
+            try:
+                self.featureCounts[f] += 1
+            except:
+                self.featureCounts[f] = 1
+            if self.featureCounts[f] > 100:
+                continue
+            try:
+                self.featureMap[f] = self.featureMap[f].intersection(features.keys())
+            except:
+                self.featureMap[f] = set(features.keys())
+            #print 'fOld',len(fMap),self.featureCounts[f],len(dependencies)
+
+            for d in dependencies[1:]:
+                #print 'dep',self.dicts.idNameDict[d]
+                dFeatures = self.dicts.featureDict[d]
+                for df in dFeatures.iterkeys():
+                    if self.featureCounts.has_key(df):
+                        if self.featureCounts[df] > 20:
+                            continue
+                    else:
+                        print df
+                    try:
+                        fMap[df] += self.alpha * (1.0 - fMap[df])
+                    except:
+                        fMap[df] = self.alpha
+            """
+            #print 'fNew',len(fMap)
+            
+    def expand(self,features):
+        #print self.corpus[:50]        
+        #print corpus
+        #tfidfmodel = models.TfidfModel(self.corpus, normalize=True)        
+        #print features.keys()        
+        #tfidfcorpus = [tfidfmodel[x] for x in self.corpus]
+        #newFeatures = LSI[[(x,1) for x in features.keys()]]
+        newFeatures = self.LSIModel[[(x,1) for x in features.keys()]]
+        print features
+        print newFeatures
+        #print newFeatures
+        
+        """
+        newFeatures = dict(features)
+        for f in features.keys():
+            try:
+                fC = self.featureCounts[f]
+            except:
+                fC = 0.5
+            newFeatures[f] = log(float(8+self.counter) / fC)
+        #nrOfFeatures = float(len(features))
+        addedCount = 0
+        alpha = 0.2
+        #"""
+        
+        """
+        consideredFeatures = []
+        while len(newFeatures) < 30:
+            #alpha = alpha * 0.5
+            minF = None
+            minFrequence = 1000000
+            for f in newFeatures.iterkeys():
+                if f in consideredFeatures:
+                    continue
+                try:
+                    if self.featureCounts[f] < minFrequence:
+                        minF = f
+                except:
+                    pass
+            if minF == None:
+                break
+            # Expand minimal feature
+            consideredFeatures.append(minF)
+            for expF in self.featureMap[minF]:
+                if not newFeatures.has_key(expF):
+                    fC = self.featureCounts[minF]
+                    newFeatures[expF] = alpha*log(float(8+self.counter) / fC)
+        #print features, newFeatures
+        #"""
+        """
+        for f in features.iterkeys():
+            try:
+                self.featureCounts[f] += 1
+            except:
+                self.featureCounts[f] = 0            
+            if self.featureCounts[f] > 10:
+                continue            
+            addedCount += 1
+            try:
+                fmap = self.featureMap[f]
+            except:
+                self.featureMap[f] = {}
+                fmap = {}
+            for nf,nv in fmap.iteritems():
+                try:
+                    newFeatures[nf] += nv
+                except:
+                    newFeatures[nf] = nv
+        if addedCount > 0: 
+            for f,w in newFeatures.iteritems():
+                newFeatures[f] = float(w)/addedCount
+        #"""                    
+        """
+        deleteF = []
+        for f,w in newFeatures.iteritems():
+            if w < 0.1:
+                deleteF.append(f)
+        for f in deleteF:
+            del newFeatures[f]
+        """
+        #print 'fold',len(features)
+        #print 'fnew',len(newFeatures)
+        return dict(newFeatures)
+
+if __name__ == "__main__":
+    pass
+    
+        
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/KNN.py	Fri Sep 13 09:31:45 2013 +0200
@@ -0,0 +1,99 @@
+'''
+Created on Aug 21, 2013
+
+@author: daniel
+'''
+
+from cPickle import dump,load
+from numpy import array
+from math import sqrt,log
+
+def cosine(f1,f2):
+    f1Norm = 0.0
+    for f in f1.keys():
+        f1Norm += f1[f] * f1[f]
+    #assert f1Norm = sum(map(lambda x,y: x*y,f1.itervalues(),f1.itervalues()))
+    f1Norm = sqrt(f1Norm) 
+    
+    f2Norm = 0.0
+    for f in f2.keys():
+        f2Norm += f2[f] * f2[f]
+    f2Norm = sqrt(f2Norm)         
+   
+    dotProduct = 0.0
+    featureIntersection = set(f1.keys()) & set(f2.keys())
+    for f in featureIntersection:
+            dotProduct += f1[f] * f2[f]
+    cosine = dotProduct / (f1Norm * f2Norm)        
+    return 1.0 - cosine
+
+def euclidean(f1,f2):
+    diffSum = 0.0        
+    featureUnion = set(f1.keys()) | set(f2.keys())
+    for f in featureUnion:
+        try:
+            f1Val = f1[f]
+        except:
+            f1Val = 0.0
+        try:
+            f2Val = f2[f]
+        except:
+            f2Val = 0.0
+        diff = f1Val - f2Val
+        diffSum += diff * diff
+        #if f in f1.keys():
+        #    diffSum += log(2+self.pointCount/self.featureCounts[f]) * diff * diff
+        #else:
+        #    diffSum += diff * diff            
+    #print diffSum,f1,f2
+    return diffSum
+
+class KNN(object):
+    '''
+    A basic KNN ranker.
+    '''
+
+    def __init__(self,dicts,metric=cosine):
+        '''
+        Constructor
+        '''
+        self.points = dicts.featureDict
+        self.metric = metric
+
+    def initializeModel(self,_trainData,_dicts):  
+        """
+        Build basic model from training data.
+        """
+        pass
+    
+    def update(self,dataPoint,features,dependencies):
+        assert self.points[dataPoint] == features
+        
+    def overwrite(self,problemId,newDependencies,dicts):
+        # Taken care of by dicts
+        pass
+    
+    def delete(self,dataPoint,features,dependencies):
+        # Taken care of by dicts
+        pass      
+    
+    def predict(self,features,accessibles,dicts):
+        predictions = map(lambda x: self.metric(features,self.points[x]),accessibles)
+        predictions = array(predictions)
+        perm = predictions.argsort()
+        return array(accessibles)[perm],predictions[perm]
+    
+    def save(self,fileName):
+        OStream = open(fileName, 'wb')
+        dump((self.points,self.metric),OStream)
+        OStream.close()
+
+    def load(self,fileName):
+        OStream = open(fileName, 'rb')
+        self.points,self.metric = load(OStream)
+        OStream.close()
+
+if __name__ == '__main__':
+    pass    
+        
+        
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/KNNs.py	Fri Sep 13 09:31:45 2013 +0200
@@ -0,0 +1,105 @@
+'''
+Created on Aug 21, 2013
+
+@author: daniel
+'''
+
+from math import log
+from KNN import KNN,cosine
+from numpy import array
+
+class KNNAdaptPointFeatures(KNN):
+    
+    def __init__(self,dicts,metric=cosine,alpha = 0.05):
+        self.points = dicts.featureDict
+        self.metric = self.euclidean    
+        self.alpha = alpha
+        self.count = 0
+        self.featureCount = {}
+
+    def initializeModel(self,trainData,dicts):  
+        """
+        Build basic model from training data.
+        """
+        IS = open(dicts.accFile,'r')
+        for line in IS:
+            line = line.split(':')
+            name = line[0]
+            nameId = dicts.nameIdDict[name]
+            features = dicts.featureDict[nameId]
+            dependencies = dicts.dependenciesDict[nameId] 
+            self.update(nameId, features, dependencies)
+        IS.close()
+        
+    def update(self,dataPoint,features,dependencies):
+        self.count += 1
+        for f in features.iterkeys():
+            try:
+                self.featureCount[f] += 1
+            except:
+                self.featureCount[f] = 1
+        for d in dependencies:
+            dFeatures = self.points[d]
+            featureUnion = set(dFeatures.keys()) | set(features.keys())
+            for f in featureUnion:
+                try:
+                    pVal = features[f]
+                except:
+                    pVal = 0.0
+                try:
+                    dVal = dFeatures[f]
+                except:
+                    dVal = 0.0
+                newDVal = dVal + self.alpha * (pVal - dVal)                
+                dFeatures[f] = newDVal           
+        
+    def euclidean(self,f1,f2):
+        diffSum = 0.0        
+        f1Set = set(f1.keys())
+        featureUnion = f1Set | set(f2.keys())
+        for f in featureUnion:
+            if not self.featureCount.has_key(f):
+                continue
+            if self.featureCount[f] == 1:
+                continue
+            try:
+                f1Val = f1[f]
+            except:
+                f1Val = 0.0
+            try:
+                f2Val = f2[f]
+            except:
+                f2Val = 0.0
+            diff = f1Val - f2Val
+            diffSum += diff * diff
+            if f in f1Set:
+                diffSum += log(2+self.count/self.featureCount[f]) * diff * diff
+            else:
+                diffSum += diff * diff            
+        #print diffSum,f1,f2
+        return diffSum 
+
+class KNNUrban(KNN):
+    def __init__(self,dicts,metric=cosine,nrOfNeighbours = 40):
+        self.points = dicts.featureDict
+        self.metric = metric    
+        self.nrOfNeighbours = nrOfNeighbours # Ignored at the moment
+    
+    def predict(self,features,accessibles,dicts):
+        predictions = map(lambda x: self.metric(features,self.points[x]),accessibles)
+        pDict = dict(zip(accessibles,predictions))
+        for a,p in zip(accessibles,predictions):
+            aDeps = dicts.dependenciesDict[a]
+            for d in aDeps:
+                pDict[d] -= p 
+        predictions = []
+        names = []
+        for n,p in pDict.items():
+            predictions.append(p)
+            names.append(n)        
+        predictions = array(predictions)
+        perm = predictions.argsort()
+        return array(names)[perm],predictions[perm]
+    
+    
+         
\ No newline at end of file
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/dictionaries.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/dictionaries.py	Fri Sep 13 09:31:45 2013 +0200
@@ -1,18 +1,13 @@
 #     Title:      HOL/Tools/Sledgehammer/MaSh/src/dictionaries.py
 #     Author:     Daniel Kuehlwein, ICIS, Radboud University Nijmegen
-#     Copyright   2012
+#     Copyright   2012-2013
 #
 # Persistent dictionaries: accessibility, dependencies, and features.
 
-'''
-Created on Jul 12, 2012
-
-@author: daniel
-'''
-
+import logging,sys
 from os.path import join
 from Queue import Queue
-from readData import create_accessible_dict,create_dependencies_dict,create_feature_dict
+from readData import create_accessible_dict,create_dependencies_dict
 from cPickle import load,dump
 
 class Dictionaries(object):
@@ -32,20 +27,17 @@
         self.dependenciesDict = {}
         self.accessibleDict = {}
         self.expandedAccessibles = {}
-        # For SInE features
-        self.useSine = False
-        self.featureCountDict = {} 
-        self.triggerFeaturesDict = {} 
-        self.featureTriggeredFormulasDict = {}
+        self.accFile =  ''
         self.changed = True
 
     """
     Init functions. nameIdDict, idNameDict, featureIdDict, articleDict get filled!
     """
-    def init_featureDict(self,featureFile,sineFeatures):
-        self.featureDict,self.maxNameId,self.maxFeatureId,self.featureCountDict,self.triggerFeaturesDict,self.featureTriggeredFormulasDict =\
-         create_feature_dict(self.nameIdDict,self.idNameDict,self.maxNameId,self.featureIdDict,self.maxFeatureId,self.featureCountDict,\
-                             self.triggerFeaturesDict,self.featureTriggeredFormulasDict,sineFeatures,featureFile)
+    def init_featureDict(self,featureFile):
+        self.create_feature_dict(featureFile)
+        #self.featureDict,self.maxNameId,self.maxFeatureId,self.featureCountDict,self.triggerFeaturesDict,self.featureTriggeredFormulasDict =\
+        # create_feature_dict(self.nameIdDict,self.idNameDict,self.maxNameId,self.featureIdDict,self.maxFeatureId,self.featureCountDict,\
+        #                     self.triggerFeaturesDict,self.featureTriggeredFormulasDict,sineFeatures,featureFile)
     def init_dependenciesDict(self,depFile):
         self.dependenciesDict = create_dependencies_dict(self.nameIdDict,depFile)
     def init_accessibleDict(self,accFile):
@@ -54,16 +46,37 @@
     def init_all(self,args):
         self.featureFileName = 'mash_features'
         self.accFileName = 'mash_accessibility'
-        self.useSine = args.sineFeatures
         featureFile = join(args.inputDir,self.featureFileName)
         depFile = join(args.inputDir,args.depFile)
-        accFile = join(args.inputDir,self.accFileName)
-        self.init_featureDict(featureFile,self.useSine)
-        self.init_accessibleDict(accFile)
+        self.accFile = join(args.inputDir,self.accFileName)
+        self.init_featureDict(featureFile)
+        self.init_accessibleDict(self.accFile)
         self.init_dependenciesDict(depFile)
         self.expandedAccessibles = {}
         self.changed = True
 
+    def create_feature_dict(self,inputFile):
+        logger = logging.getLogger('create_feature_dict')
+        self.featureDict = {}
+        IS = open(inputFile,'r')
+        for line in IS:
+            line = line.split(':')
+            name = line[0]
+            # Name Id
+            if self.nameIdDict.has_key(name):
+                logger.warning('%s appears twice in the feature file. Aborting.',name)
+                sys.exit(-1)
+            else:
+                self.nameIdDict[name] = self.maxNameId
+                self.idNameDict[self.maxNameId] = name
+                nameId = self.maxNameId
+                self.maxNameId += 1
+            features = self.get_features(line)
+            # Store results
+            self.featureDict[nameId] = features
+        IS.close()
+        return
+
     def get_name_id(self,name):
         """
         Return the Id for a name.
@@ -82,27 +95,23 @@
     def add_feature(self,featureName):
         if not self.featureIdDict.has_key(featureName):
             self.featureIdDict[featureName] = self.maxFeatureId
-            if self.useSine:
-                self.featureCountDict[self.maxFeatureId] = 0
             self.maxFeatureId += 1
             self.changed = True
         fId = self.featureIdDict[featureName]
-        if self.useSine:
-            self.featureCountDict[fId] += 1
         return fId
 
     def get_features(self,line):
-        # Feature Ids
         featureNames = [f.strip() for f in line[1].split()]
-        features = []
+        features = {}
         for fn in featureNames:
             tmp = fn.split('=')
-            weight = 1.0
+            weight = 1.0 
             if len(tmp) == 2:
                 fn = tmp[0]
                 weight = float(tmp[1])
             fId = self.add_feature(tmp[0])
-            features.append((fId,weight))
+            features[fId] = weight
+            #features[fId] = 1.0 ###
         return features
 
     def expand_accessibles(self,acc):
@@ -142,16 +151,6 @@
         self.accessibleDict[nameId] = unExpAcc
         features = self.get_features(line)
         self.featureDict[nameId] = features
-        if self.useSine:
-            # SInE Features
-            minFeatureCount = min([self.featureCountDict[f] for f,_w in features])
-            triggerFeatures = [f for f,_w in features if self.featureCountDict[f] == minFeatureCount]
-            self.triggerFeaturesDict[nameId] = triggerFeatures
-            for f in triggerFeatures:
-                if self.featureTriggeredFormulasDict.has_key(f): 
-                    self.featureTriggeredFormulasDict[f].append(nameId)
-                else:
-                    self.featureTriggeredFormulasDict[f] = [nameId]        
         self.dependenciesDict[nameId] = [self.nameIdDict[d.strip()] for d in line[2].split()]        
         self.changed = True
         return nameId
@@ -219,14 +218,12 @@
         if self.changed:
             dictsStream = open(fileName, 'wb')
             dump((self.accessibleDict,self.dependenciesDict,self.expandedAccessibles,self.featureDict,\
-                self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict,\
-                self.featureCountDict,self.triggerFeaturesDict,self.featureTriggeredFormulasDict,self.useSine),dictsStream)
+                self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict),dictsStream)
             self.changed = False
             dictsStream.close()
     def load(self,fileName):
         dictsStream = open(fileName, 'rb')
         self.accessibleDict,self.dependenciesDict,self.expandedAccessibles,self.featureDict,\
-              self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict,\
-              self.featureCountDict,self.triggerFeaturesDict,self.featureTriggeredFormulasDict,self.useSine = load(dictsStream)
+              self.featureIdDict,self.idNameDict,self.maxFeatureId,self.maxNameId,self.nameIdDict = load(dictsStream)
         self.changed = False
         dictsStream.close()
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/mash.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/mash.py	Fri Sep 13 09:31:45 2013 +0200
@@ -41,7 +41,7 @@
     path = dirname(realpath(__file__))
     spawnDaemon(os.path.join(path,'server.py'))
     serverIsUp=False
-    for _i in range(10):
+    for _i in range(20):
         # Test if server is up
         try:
             sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
@@ -118,11 +118,12 @@
                 OS.write('%s\n' % received)
         OS.close()
         IS.close()
+        
+        # Statistics
+        if args.statistics:
+            received = communicate('avgStats',args.host,args.port)
+            logger.info(received)
 
-    # Statistics
-    if args.statistics:
-        received = communicate('avgStats',args.host,args.port)
-        logger.info(received)
     if args.saveModels:
         communicate('save',args.host,args.port)
 
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/mashTest.py	Thu Sep 12 22:10:17 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,12 +0,0 @@
-'''
-Created on Aug 20, 2013
-
-@author: daniel
-'''
-from mash import mash
-
-if __name__ == "__main__":
-    args = ['--statistics', '--init', '--inputDir', '../data/20130118/Jinja', '--log', '../tmp/auth.log', '--modelFile', '../tmp/m0', '--dictsFile', '../tmp/d0','--NBDefaultPriorWeight', '20.0', '--NBDefVal', '-15.0', '--NBPosWeight', '10.0']
-    mash(args)
-    args = ['-i', '../data/20130118/Jinja/mash_commands', '-p', '../tmp/auth.pred0', '--statistics', '--cutOff', '500', '--log', '../tmp/auth.log','--modelFile', '../tmp/m0', '--dictsFile', '../tmp/d0']
-    mash(args) 
\ No newline at end of file
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/parameters.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/parameters.py	Fri Sep 13 09:31:45 2013 +0200
@@ -22,14 +22,13 @@
     parser.add_argument('--depFile', default='mash_dependencies',
                         help='Name of the file with the premise dependencies. The file must be in inputDir. Default = mash_dependencies')
     
-    parser.add_argument('--algorithm',default='nb',help="Which learning algorithm is used. nb = Naive Bayes,predef=predefined. Default=nb.")
+    parser.add_argument('--algorithm',default='nb',help="Which learning algorithm is used. nb = Naive Bayes,KNN,predef=predefined. Default=nb.")
+    parser.add_argument('--predef',help="File containing the predefined suggestions. Only used when algorithm = predef.")
     # NB Parameters
     parser.add_argument('--NBDefaultPriorWeight',default=20.0,help="Initializes classifiers with value * p |- p. Default=20.0.",type=float)
     parser.add_argument('--NBDefVal',default=-15.0,help="Default value for unknown features. Default=-15.0.",type=float)
     parser.add_argument('--NBPosWeight',default=10.0,help="Weight value for positive features. Default=10.0.",type=float)
-    # TODO: Rename to sineFeatures
-    parser.add_argument('--sineFeatures',default=False,action='store_true',help="Uses a SInE like prior for premise lvl predictions. Default=False.")
-    parser.add_argument('--sineWeight',default=0.5,help="How much the SInE prior is weighted. Default=0.5.",type=float)
+    parser.add_argument('--expandFeatures',default=False,action='store_true',help="Learning-based feature expansion. Default=False.")
     
     parser.add_argument('--statistics',default=False,action='store_true',help="Create and show statistics for the top CUTOFF predictions.\
                         WARNING: This will make the program a lot slower! Default=False.")
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/readData.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/readData.py	Fri Sep 13 09:31:45 2013 +0200
@@ -14,55 +14,6 @@
 
 import sys,logging
 
-def create_feature_dict(nameIdDict,idNameDict,maxNameId,featureIdDict,maxFeatureId,featureCountDict,\
-                        triggerFeaturesDict,featureTriggeredFormulasDict,sineFeatures,inputFile):
-    logger = logging.getLogger('create_feature_dict')
-    featureDict = {}
-    IS = open(inputFile,'r')
-    for line in IS:
-        line = line.split(':')
-        name = line[0]
-        # Name Id
-        if nameIdDict.has_key(name):
-            logger.warning('%s appears twice in the feature file. Aborting.',name)
-            sys.exit(-1)
-        else:
-            nameIdDict[name] = maxNameId
-            idNameDict[maxNameId] = name
-            nameId = maxNameId
-            maxNameId += 1
-        # Feature Ids
-        featureNames = [f.strip() for f in line[1].split()]
-        features = []     
-        minFeatureCount = 9999999   
-        for fn in featureNames:
-            weight = 1.0
-            tmp = fn.split('=')
-            if len(tmp) == 2:
-                fn = tmp[0]
-                weight = float(tmp[1])
-            if not featureIdDict.has_key(fn):
-                featureIdDict[fn] = maxFeatureId
-                featureCountDict[maxFeatureId] = 0
-                maxFeatureId += 1
-            fId = featureIdDict[fn]
-            features.append((fId,weight))
-            if sineFeatures:
-                featureCountDict[fId] += 1
-                minFeatureCount = min(minFeatureCount,featureCountDict[fId])
-        # Store results
-        featureDict[nameId] = features
-        if sineFeatures:
-            triggerFeatures = [f for f,_w in features if featureCountDict[f] == minFeatureCount]
-            triggerFeaturesDict[nameId] = triggerFeatures
-            for f in triggerFeatures:
-                if featureTriggeredFormulasDict.has_key(f): 
-                    featureTriggeredFormulasDict[f].append(nameId)
-                else:
-                    featureTriggeredFormulasDict[f] = [nameId]
-    IS.close()
-    return featureDict,maxNameId,maxFeatureId,featureCountDict,triggerFeaturesDict,featureTriggeredFormulasDict
-
 def create_dependencies_dict(nameIdDict,inputFile):
     logger = logging.getLogger('create_dependencies_dict')
     dependenciesDict = {}
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/server.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/server.py	Fri Sep 13 09:31:45 2013 +0200
@@ -7,10 +7,15 @@
 
 import SocketServer,os,string,logging
 from multiprocessing import Manager
+from threading import Timer
 from time import time
 from dictionaries import Dictionaries
 from parameters import init_parser
 from sparseNaiveBayes import sparseNBClassifier
+from KNN import KNN,euclidean
+from KNNs import KNNAdaptPointFeatures,KNNUrban
+from predefined import Predefined
+#from ExpandFeatures import ExpandFeatures
 from stats import Statistics
 
 
@@ -19,6 +24,21 @@
         SocketServer.ThreadingTCPServer.__init__(self,*args, **kwargs)
         self.manager = Manager()
         self.lock = Manager().Lock()
+        self.idle_timeout = 28800.0 # 8 hours in seconds
+        self.idle_timer = Timer(self.idle_timeout, self.shutdown)
+        self.idle_timer.start()        
+        
+    def save(self):
+        # Save Models
+        self.model.save(self.args.modelFile)
+        self.dicts.save(self.args.dictsFile)
+        if not self.args.saveStats == None:
+            statsFile = os.path.join(self.args.outputDir,self.args.saveStats)
+            self.stats.save(statsFile)   
+               
+    def save_and_shutdown(self):     
+        self.save()          
+        self.shutdown()
 
 class MaShHandler(SocketServer.BaseRequestHandler):
 
@@ -28,25 +48,32 @@
         else:
             argv = argv.split(';')
             self.server.args = init_parser(argv)
-        # Pick model
-        if self.server.args.algorithm == 'nb':
-            self.server.model = sparseNBClassifier(self.server.args.NBDefaultPriorWeight,self.server.args.NBPosWeight,self.server.args.NBDefVal)
-        else: # Default case
-            self.server.model = sparseNBClassifier(self.server.args.NBDefaultPriorWeight,self.server.args.NBPosWeight,self.server.args.NBDefVal)
         # Load all data
-        # TODO: rewrite dicts for concurrency and without sine
         self.server.dicts = Dictionaries()
         if os.path.isfile(self.server.args.dictsFile):
             self.server.dicts.load(self.server.args.dictsFile)            
         elif self.server.args.init:
             self.server.dicts.init_all(self.server.args)
+        # Pick model
+        if self.server.args.algorithm == 'nb':
+            self.server.model = sparseNBClassifier(self.server.args.NBDefaultPriorWeight,self.server.args.NBPosWeight,self.server.args.NBDefVal)
+        elif self.server.args.algorithm == 'KNN':
+            #self.server.model = KNN(self.server.dicts)
+            self.server.model = KNNAdaptPointFeatures(self.server.dicts)
+        elif self.server.args.algorithm == 'predef':
+            self.server.model = Predefined(self.server.args.predef)
+        else: # Default case
+            self.server.model = sparseNBClassifier(self.server.args.NBDefaultPriorWeight,self.server.args.NBPosWeight,self.server.args.NBDefVal)
+#        if self.server.args.expandFeatures:
+#            self.server.expandFeatures = ExpandFeatures(self.server.dicts)
+#            self.server.expandFeatures.initialize(self.server.dicts)
         # Create Model
         if os.path.isfile(self.server.args.modelFile):
             self.server.model.load(self.server.args.modelFile)          
         elif self.server.args.init:
             trainData = self.server.dicts.featureDict.keys()
             self.server.model.initializeModel(trainData,self.server.dicts)
-            
+           
         if self.server.args.statistics:
             self.server.stats = Statistics(self.server.args.cutOff)
             self.server.statementCounter = 1
@@ -77,6 +104,8 @@
                 self.server.logger.debug('Poor predictions: %s',bp)
             self.server.statementCounter += 1
 
+#        if self.server.args.expandFeatures:
+#            self.server.expandFeatures.update(self.server.dicts.featureDict[problemId],self.server.dicts.dependenciesDict[problemId])
         # Update Dependencies, p proves p
         self.server.dicts.dependenciesDict[problemId] = [problemId]+self.server.dicts.dependenciesDict[problemId]
         self.server.model.update(problemId,self.server.dicts.featureDict[problemId],self.server.dicts.dependenciesDict[problemId])
@@ -92,22 +121,25 @@
         self.server.computeStats = True
         if self.server.args.algorithm == 'predef':
             return
-        name,features,accessibles,hints,numberOfPredictions = self.server.dicts.parse_problem(self.data)  
+        name,features,accessibles,hints,numberOfPredictions = self.server.dicts.parse_problem(self.data)
         if numberOfPredictions == None:
             numberOfPredictions = self.server.args.numberOfPredictions
         if not hints == []:
             self.server.model.update('hints',features,hints)
-        
+#        if self.server.args.expandFeatures:
+#            features = self.server.expandFeatures.expand(features)
         # Create predictions
         self.server.logger.debug('Starting computation for line %s',self.server.callCounter)
-        predictionsFeatures = features                    
-        self.server.predictions,predictionValues = self.server.model.predict(predictionsFeatures,accessibles,self.server.dicts)
+                
+        self.server.predictions,predictionValues = self.server.model.predict(features,accessibles,self.server.dicts)
         assert len(self.server.predictions) == len(predictionValues)
         self.server.logger.debug('Time needed: '+str(round(time()-self.startTime,2)))
 
         # Output        
         predictionNames = [str(self.server.dicts.idNameDict[p]) for p in self.server.predictions[:numberOfPredictions]]
-        predictionValues = [str(x) for x in predictionValues[:numberOfPredictions]]
+        #predictionValues = [str(x) for x in predictionValues[:numberOfPredictions]]
+        #predictionsStringList = ['%s=%s' % (predictionNames[i],predictionValues[i]) for i in range(len(predictionNames))]
+        #predictionsString = string.join(predictionsStringList,' ')
         predictionsString = string.join(predictionNames,' ')
         outString = '%s: %s' % (name,predictionsString)
         self.request.sendall(outString)
@@ -115,27 +147,18 @@
     def shutdown(self,saveModels=True):
         self.request.sendall('Shutting down server.')
         if saveModels:
-            self.save()
+            self.server.save()
         self.server.shutdown()
     
-    def save(self):
-        # Save Models
-        self.server.model.save(self.server.args.modelFile)
-        self.server.dicts.save(self.server.args.dictsFile)
-        if not self.server.args.saveStats == None:
-            statsFile = os.path.join(self.server.args.outputDir,self.server.args.saveStats)
-            self.server.stats.save(statsFile)
-    
     def handle(self):
         # self.request is the TCP socket connected to the client
         self.data = self.request.recv(4194304).strip()
         self.server.lock.acquire()
-        #print "{} wrote:".format(self.client_address[0])
         self.startTime = time()  
         if self.data == 'shutdown':
             self.shutdown()         
         elif self.data == 'save':
-            self.save()       
+            self.server.save()       
         elif self.data.startswith('i'):            
             self.init(self.data[2:])
         elif self.data.startswith('!'):
@@ -153,15 +176,16 @@
         else:
             self.request.sendall('Unspecified input format: \n%s',self.data)
         self.server.callCounter += 1
+        # Update idle shutdown timer
+        self.server.idle_timer.cancel()
+        self.server.idle_timer = Timer(self.server.idle_timeout, self.server.save_and_shutdown)
+        self.server.idle_timer.start()        
         self.server.lock.release()
 
 if __name__ == "__main__":
     HOST, PORT = "localhost", 9255
-    #print 'Started Server'
-    # Create the server, binding to localhost on port 9999
     SocketServer.TCPServer.allow_reuse_address = True
     server = ThreadingTCPServer((HOST, PORT), MaShHandler)
-    #server = SocketServer.TCPServer((HOST, PORT), MaShHandler)
 
     # Activate the server; this will keep running until you
     # interrupt the program with Ctrl-C
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/sparseNaiveBayes.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/sparseNaiveBayes.py	Fri Sep 13 09:31:45 2013 +0200
@@ -36,7 +36,7 @@
             dFeatureCounts = {}
             # Add p proves p with weight self.defaultPriorWeight
             if not self.defaultPriorWeight == 0:            
-                for f,_w in dicts.featureDict[d]:
+                for f in dicts.featureDict[d].iterkeys():
                     dFeatureCounts[f] = self.defaultPriorWeight
             self.counts[d] = [self.defaultPriorWeight,dFeatureCounts]
 
@@ -44,7 +44,7 @@
             for dep in keyDeps:
                 self.counts[dep][0] += 1
                 depFeatures = dicts.featureDict[key]
-                for f,_w in depFeatures:
+                for f in depFeatures.iterkeys():
                     if self.counts[dep][1].has_key(f):
                         self.counts[dep][1][f] += 1
                     else:
@@ -59,12 +59,12 @@
             dFeatureCounts = {}            
             # Give p |- p a higher weight
             if not self.defaultPriorWeight == 0:               
-                for f,_w in features:
+                for f in features.iterkeys():
                     dFeatureCounts[f] = self.defaultPriorWeight
             self.counts[dataPoint] = [self.defaultPriorWeight,dFeatureCounts]            
         for dep in dependencies:
             self.counts[dep][0] += 1
-            for f,_w in features:
+            for f in features.iterkeys():
                 if self.counts[dep][1].has_key(f):
                     self.counts[dep][1][f] += 1
                 else:
@@ -97,12 +97,14 @@
         """
         tau = 0.05 # Jasmin, change value here
         predictions = []
+        #observedFeatures = [f for f,_w in features]
+        observedFeatures = features.keys()
         for a in accessibles:
             posA = self.counts[a][0]
             fA = set(self.counts[a][1].keys())
             fWeightsA = self.counts[a][1]
             resultA = log(posA)
-            for f,w in features:
+            for f,w in features.iteritems():
                 # DEBUG
                 #w = 1.0
                 if f in fA:
@@ -114,9 +116,10 @@
                 else:
                     resultA += w*self.defVal
             if not tau == 0.0:
-                observedFeatures = [f for f,_w in features]
                 missingFeatures = list(fA.difference(observedFeatures))
-                sumOfWeights = sum([log(float(fWeightsA[x])/posA) for x in missingFeatures])
+                #sumOfWeights = sum([log(float(fWeightsA[x])/posA) for x in missingFeatures])  # slower
+                sumOfWeights = sum([log(float(fWeightsA[x])) for x in missingFeatures]) - log(posA) * len(missingFeatures) #DEFAULT
+                #sumOfWeights = sum([log(float(fWeightsA[x])/self.totalFeatureCounts[x]) for x in missingFeatures]) - log(posA) * len(missingFeatures)
                 resultA -= tau * sumOfWeights
             predictions.append(resultA)
         predictions = array(predictions)
--- a/src/HOL/Tools/Sledgehammer/MaSh/src/stats.py	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/MaSh/src/stats.py	Fri Sep 13 09:31:45 2013 +0200
@@ -97,7 +97,7 @@
                     badPreds.append(dep)
                     recall100 = len(predictions)+1
                     positives+=1
-            self.logger.debug('Dependencies missing for %s in accessibles! Estimating Statistics.',\
+            self.logger.debug('Dependencies missing for %s in cutoff predictions! Estimating Statistics.',\
                               string.join([str(dep) for dep in missing],','))
 
         if positives == 0 or negatives == 0:
@@ -113,7 +113,7 @@
         self.badPreds = badPreds
         self.avgAvailable += available
         self.avgDepNr += depNr
-        self.logger.info('Statement: %s: AUC: %s \t Needed: %s \t Recall100: %s \t Available: %s \t cutOff:%s',\
+        self.logger.info('Statement: %s: AUC: %s \t Needed: %s \t Recall100: %s \t Available: %s \t cutOff: %s',\
                           statementCounter,round(100*auc,2),depNr,recall100,available,self.cutOff)
 
     def printAvg(self):
@@ -135,7 +135,7 @@
         else:
             medianrecall100 = float(sorted(self.recall100Median)[nrDataPoints/2] + sorted(self.recall100Median)[nrDataPoints/2 + 1])/2
 
-        returnString = 'avgAUC: %s \t medianAUC: %s \t avgDepNr: %s \t avgRecall100: %s \t medianRecall100: %s \t cutOff:%s' %\
+        returnString = 'avgAUC: %s \t medianAUC: %s \t avgDepNr: %s \t avgRecall100: %s \t medianRecall100: %s \t cutOff: %s' %\
                          (round(100*self.avgAUC/self.problems,2),round(100*medianAUC,2),round(self.avgDepNr/self.problems,2),round(self.avgRecall100/self.problems,2),round(medianrecall100,2),self.cutOff)
         self.logger.info(returnString)
         return returnString
@@ -143,44 +143,6 @@
         """
         self.logger.info('avgAUC: %s \t medianAUC: %s \t avgDepNr: %s \t avgRecall100: %s \t medianRecall100: %s \t cutOff:%s', \
                          round(100*self.avgAUC/self.problems,2),round(100*medianAUC,2),round(self.avgDepNr/self.problems,2),round(self.avgRecall100/self.problems,2),round(medianrecall100,2),self.cutOff)
-
-        #try:
-        #if True:
-        if False:
-            from matplotlib.pyplot import plot,figure,show,xlabel,ylabel,axis,hist
-            avgRecall = [float(x)/self.problems for x in self.recallData]
-            figure('Recall')
-            plot(range(self.cutOff),avgRecall)
-            ylabel('Average Recall')
-            xlabel('Highest ranked premises')
-            axis([0,self.cutOff,0.0,1.0])
-            figure('100%Recall')
-            plot(range(self.cutOff),self.recall100Data)
-            ylabel('100%Recall')
-            xlabel('Highest ranked premises')
-            axis([0,self.cutOff,0,self.problems])
-            figure('AUC Histogram')
-            hist(self.aucData,bins=100)
-            ylabel('Problems')
-            xlabel('AUC')
-            maxCount = max(self.premiseOccurenceCounter.values())
-            minCount = min(self.premiseOccurenceCounter.values())
-            figure('Dependency Occurances')
-            hist(self.premiseOccurenceCounter.values(),bins=range(minCount,maxCount+2),align = 'left')
-            #ylabel('Occurences')
-            xlabel('Number of Times a Dependency Occurs')
-            figure('Dependency Appearance in Problems after Introduction.')
-            hist(self.depAppearances,bins=50)
-            figure('Dependency Appearance in Problems after Introduction in Percent.')
-            xAxis = range(max(self.depAppearances)+1)
-            yAxis = [0] * (max(self.depAppearances)+1)
-            for val in self.depAppearances:
-                yAxis[val] += 1
-            yAxis = [float(x)/len(self.firstDepAppearance.keys()) for x in yAxis]
-            plot(xAxis,yAxis)
-            show()
-        #except:
-        #    self.logger.warning('Matplotlib module missing. Skipping graphs.')
         """
 
     def save(self,fileName):
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_fact.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -187,10 +187,20 @@
    "ext_cases", "eq.simps", "eq.refl", "nchotomy", "case_cong",
    "weak_case_cong", "nat_of_char_simps", "nibble.simps",
    "nibble.distinct"]
-  |> not (ho_atp orelse (Config.get ctxt instantiate_inducts)) ?
+  |> not (ho_atp orelse Config.get ctxt instantiate_inducts) ?
         append ["induct", "inducts"]
   |> map (prefix Long_Name.separator)
 
+(* The maximum apply depth of any "metis" call in "Metis_Examples" (on
+   2007-10-31) was 11. *)
+val max_apply_depth = 18
+
+fun apply_depth (f $ t) = Int.max (apply_depth f, apply_depth t + 1)
+  | apply_depth (Abs (_, _, t)) = apply_depth t
+  | apply_depth _ = 0
+
+fun is_too_complex t = apply_depth t > max_apply_depth
+
 (* FIXME: Ad hoc list *)
 val technical_prefixes =
   ["ATP", "Code_Evaluation", "Datatype", "Enum", "Lazy_Sequence",
@@ -229,7 +239,8 @@
       | is_interesting_subterm (Free _) = true
       | is_interesting_subterm _ = false
     fun interest_of_bool t =
-      if exists_Const (is_technical_const orf is_low_level_class_const) t then
+      if exists_Const (is_technical_const orf is_low_level_class_const orf
+                       type_has_top_sort o snd) t then
         Deal_Breaker
       else if exists_type (exists_subtype (curry (op =) @{typ prop})) t orelse
               not (exists_subterm is_interesting_subterm t) then
@@ -437,10 +448,21 @@
 
 fun fact_of_raw_fact ((name, stature), th) = ((name (), stature), th)
 
+fun fact_count facts = Facts.fold_static (K (Integer.add 1)) facts 0
+
+(* gracefully handle huge background theories *)
+val max_facts_for_complex_check = 25000
+
 fun all_facts ctxt generous ho_atp reserved add_ths chained css =
   let
     val thy = Proof_Context.theory_of ctxt
     val global_facts = Global_Theory.facts_of thy
+    val is_too_complex =
+      if generous orelse
+         fact_count global_facts >= max_facts_for_complex_check then
+        K false
+      else
+        is_too_complex
     val local_facts = Proof_Context.facts_of ctxt
     val named_locals = local_facts |> Facts.dest_static []
     val assms = Assumption.all_assms_of ctxt
@@ -473,7 +495,8 @@
             #> fold_rev (fn th => fn (j, accum) =>
                    (j - 1,
                     if not (member Thm.eq_thm_prop add_ths th) andalso
-                       is_likely_tautology_too_meta_or_too_technical th then
+                       (is_likely_tautology_too_meta_or_too_technical th orelse
+                        is_too_complex (prop_of th)) then
                       accum
                     else
                       let
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mash.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -165,12 +165,12 @@
     val model_dir = File.shell_path (mash_model_dir ())
     val core = "--inputFile " ^ cmd_file ^ " --predictions " ^ sugg_file
     val command =
-      "cd \"$ISABELLE_SLEDGEHAMMER_MASH\"/src; " ^
-      "./mash.py --quiet" ^
-      " --outputDir " ^ model_dir ^
-      " --modelFile=" ^ model_dir ^ "/model.pickle" ^
-      " --dictsFile=" ^ model_dir ^ "/dict.pickle" ^
-      " --log " ^ log_file ^ " " ^ core ^
+      "cd \"$ISABELLE_SLEDGEHAMMER_MASH\"/src; \
+      \PYTHONDONTWRITEBYTECODE=y ./mash.py --quiet\
+      \ --outputDir " ^ model_dir ^
+      " --modelFile=" ^ model_dir ^ "/model.pickle\
+      \ --dictsFile=" ^ model_dir ^ "/dict.pickle\
+      \ --log " ^ log_file ^ " " ^ core ^
       (if extra_args = [] then "" else " " ^ space_implode " " extra_args) ^
       " >& " ^ err_file ^
       (if background then " &" else "")
@@ -218,9 +218,15 @@
   Date.fmt ".%Y%m%d_%H%M%S__" (Date.fromTimeLocal (Time.now ())) ^
   serial_string ()
 
+(* Avoid scientific notation *)
+fun safe_str_of_real r =
+  if r < 0.00001 then "0.00001"
+  else if r >= 1000000.0 then "1000000"
+  else Markup.print_real r
+
 fun encode_feature (name, weight) =
   encode_str name ^
-  (if Real.== (weight, 1.0) then "" else "=" ^ Markup.print_real weight)
+  (if Real.== (weight, 1.0) then "" else "=" ^ safe_str_of_real weight)
 
 val encode_features = map encode_feature #> space_implode " "
 
@@ -445,7 +451,8 @@
 
 end
 
-fun mash_unlearn ctxt ({overlord, ...} : params) = clear_state ctxt overlord
+fun mash_unlearn ctxt ({overlord, ...} : params) =
+  (clear_state ctxt overlord; Output.urgent_message "Reset MaSh.")
 
 
 (*** Isabelle helpers ***)
@@ -944,14 +951,17 @@
   fold (fn s => Symtab.map_default (s, 0) (Integer.add 1))
        (Term.add_const_names t [])
 
-fun mash_suggested_facts ctxt ({overlord, ...} : params) prover max_facts hyp_ts
-                         concl_t facts =
+fun mash_suggested_facts ctxt ({debug, overlord, ...} : params) prover max_facts
+                         hyp_ts concl_t facts =
   let
     val thy = Proof_Context.theory_of ctxt
+    val thy_name = Context.theory_name thy
     val facts = facts |> sort (crude_thm_ord o pairself snd o swap)
     val chained = facts |> filter (fn ((_, (scope, _)), _) => scope = Chained)
     val num_facts = length facts
     val const_tab = fold (add_const_counts o prop_of o snd) facts Symtab.empty
+    fun fact_has_right_theory (_, th) =
+      thy_name = Context.theory_name (theory_of_thm th)
     fun chained_or_extra_features_of factor (((_, stature), th), weight) =
       [prop_of th]
       |> features_of ctxt prover (theory_of_thm th) num_facts const_tab stature
@@ -964,8 +974,8 @@
             let
               val parents = maximal_wrt_access_graph access_G facts
               val goal_feats =
-                features_of ctxt prover thy num_facts const_tab
-                            (Local, General) (concl_t :: hyp_ts)
+                features_of ctxt prover thy num_facts const_tab (Local, General)
+                            (concl_t :: hyp_ts)
               val chained_feats =
                 chained
                 |> map (rpair 1.0)
@@ -974,12 +984,14 @@
               val extra_feats =
                 facts
                 |> take (Int.max (0, num_extra_feature_facts - length chained))
+                |> filter fact_has_right_theory
                 |> weight_facts_steeply
                 |> map (chained_or_extra_features_of extra_feature_factor)
                 |> rpair [] |-> fold (union (eq_fst (op =)))
               val feats =
                 fold (union (eq_fst (op =))) [chained_feats, extra_feats]
                      goal_feats
+                |> debug ? sort (Real.compare o swap o pairself snd)
               val hints =
                 chained |> filter (is_fact_in_graph access_G o snd)
                         |> map (nickname_of_thm o snd)
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_mepo.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -15,7 +15,6 @@
 
   val trace : bool Config.T
   val pseudo_abs_name : string
-  val pseudo_skolem_prefix : string
   val mepo_suggested_facts :
     Proof.context -> params -> string -> int -> relevance_fudge option
     -> term list -> term -> raw_fact list -> fact list
@@ -30,12 +29,11 @@
 open Sledgehammer_Provers
 
 val trace =
-  Attrib.setup_config_bool @{binding sledgehammer_filter_iter_trace} (K false)
+  Attrib.setup_config_bool @{binding sledgehammer_mepo_trace} (K false)
 fun trace_msg ctxt msg = if Config.get ctxt trace then tracing (msg ()) else ()
 
 val sledgehammer_prefix = "Sledgehammer" ^ Long_Name.separator
 val pseudo_abs_name = sledgehammer_prefix ^ "abs"
-val pseudo_skolem_prefix = sledgehammer_prefix ^ "sko"
 val theory_const_suffix = Long_Name.separator ^ " 1"
 
 fun order_of_type (Type (@{type_name fun}, [T1, T2])) =
@@ -88,9 +86,7 @@
 
 (* Add a pconstant to the table, but a [] entry means a standard
    connective, which we ignore.*)
-fun add_pconst_to_table also_skolem (s, p) =
-  if (not also_skolem andalso String.isPrefix pseudo_skolem_prefix s) then I
-  else Symtab.map_default (s, [p]) (insert (op =) p)
+fun add_pconst_to_table (s, p) = Symtab.map_default (s, [p]) (insert (op =) p)
 
 (* Set constants tend to pull in too many irrelevant facts. We limit the damage
    by treating them more or less as if they were built-in but add their
@@ -98,20 +94,15 @@
 val set_consts = [@{const_name Collect}, @{const_name Set.member}]
 val set_thms = @{thms Collect_mem_eq mem_Collect_eq Collect_cong}
 
-fun add_pconsts_in_term thy is_built_in_const also_skolems pos =
+fun add_pconsts_in_term thy is_built_in_const =
   let
-    val flip = Option.map not
-    (* We include free variables, as well as constants, to handle locales. For
-       each quantifiers that must necessarily be skolemized by the automatic
-       prover, we introduce a fresh constant to simulate the effect of
-       Skolemization. *)
     fun do_const const ext_arg (x as (s, _)) ts =
       let val (built_in, ts) = is_built_in_const x ts in
         if member (op =) set_consts s then
           fold (do_term ext_arg) ts
         else
           (not built_in
-           ? add_pconst_to_table also_skolems (rich_pconst thy const x))
+           ? add_pconst_to_table (rich_pconst thy const x))
           #> fold (do_term false) ts
       end
     and do_term ext_arg t =
@@ -123,61 +114,45 @@
          (* Since lambdas on the right-hand side of equalities are usually
             extensionalized later by "abs_extensionalize_term", we don't
             penalize them here. *)
-         ? add_pconst_to_table true (pseudo_abs_name,
-                                     PType (order_of_type T + 1, [])))
+         ? add_pconst_to_table (pseudo_abs_name,
+                                PType (order_of_type T + 1, [])))
         #> fold (do_term false) (t' :: ts)
       | (_, ts) => fold (do_term false) ts
-    fun do_quantifier will_surely_be_skolemized abs_T body_t =
-      do_formula pos body_t
-      #> (if also_skolems andalso will_surely_be_skolemized then
-            add_pconst_to_table true (pseudo_skolem_prefix ^ serial_string (),
-                                      PType (order_of_type abs_T, []))
-          else
-            I)
     and do_term_or_formula ext_arg T =
-      if T = HOLogic.boolT then do_formula NONE else do_term ext_arg
-    and do_formula pos t =
+      if T = HOLogic.boolT then do_formula else do_term ext_arg
+    and do_formula t =
       case t of
-        Const (@{const_name all}, _) $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME false) T t'
-      | @{const "==>"} $ t1 $ t2 =>
-        do_formula (flip pos) t1 #> do_formula pos t2
+        Const (@{const_name all}, _) $ Abs (_, T, t') => do_formula t'
+      | @{const "==>"} $ t1 $ t2 => do_formula t1 #> do_formula t2
       | Const (@{const_name "=="}, Type (_, [T, _])) $ t1 $ t2 =>
         do_term_or_formula false T t1 #> do_term_or_formula true T t2
-      | @{const Trueprop} $ t1 => do_formula pos t1
+      | @{const Trueprop} $ t1 => do_formula t1
       | @{const False} => I
       | @{const True} => I
-      | @{const Not} $ t1 => do_formula (flip pos) t1
-      | Const (@{const_name All}, _) $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME false) T t'
-      | Const (@{const_name Ex}, _) $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME true) T t'
-      | @{const HOL.conj} $ t1 $ t2 => fold (do_formula pos) [t1, t2]
-      | @{const HOL.disj} $ t1 $ t2 => fold (do_formula pos) [t1, t2]
-      | @{const HOL.implies} $ t1 $ t2 =>
-        do_formula (flip pos) t1 #> do_formula pos t2
+      | @{const Not} $ t1 => do_formula t1
+      | Const (@{const_name All}, _) $ Abs (_, T, t') => do_formula t'
+      | Const (@{const_name Ex}, _) $ Abs (_, T, t') => do_formula t'
+      | @{const HOL.conj} $ t1 $ t2 => do_formula t1 #> do_formula t2
+      | @{const HOL.disj} $ t1 $ t2 => do_formula t1 #> do_formula t2
+      | @{const HOL.implies} $ t1 $ t2 => do_formula t1 #> do_formula t2
       | Const (@{const_name HOL.eq}, Type (_, [T, _])) $ t1 $ t2 =>
         do_term_or_formula false T t1 #> do_term_or_formula true T t2
       | Const (@{const_name If}, Type (_, [_, Type (_, [T, _])]))
         $ t1 $ t2 $ t3 =>
-        do_formula NONE t1 #> fold (do_term_or_formula false T) [t2, t3]
-      | Const (@{const_name Ex1}, _) $ Abs (_, T, t') =>
-        do_quantifier (is_some pos) T t'
+        do_formula t1 #> fold (do_term_or_formula false T) [t2, t3]
+      | Const (@{const_name Ex1}, _) $ Abs (_, T, t') => do_formula t'
       | Const (@{const_name Ball}, _) $ t1 $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME false) T
-                      (HOLogic.mk_imp (incr_boundvars 1 t1 $ Bound 0, t'))
+        do_formula (t1 $ Bound ~1) #> do_formula t'
       | Const (@{const_name Bex}, _) $ t1 $ Abs (_, T, t') =>
-        do_quantifier (pos = SOME true) T
-                      (HOLogic.mk_conj (incr_boundvars 1 t1 $ Bound 0, t'))
+        do_formula (t1 $ Bound ~1) #> do_formula t'
       | (t0 as Const (_, @{typ bool})) $ t1 =>
-        do_term false t0 #> do_formula pos t1  (* theory constant *)
+        do_term false t0 #> do_formula t1  (* theory constant *)
       | _ => do_term false t
-  in do_formula pos end
+  in do_formula end
 
 fun pconsts_in_fact thy is_built_in_const t =
   Symtab.fold (fn (s, pss) => fold (cons o pair s) pss)
-              (Symtab.empty |> add_pconsts_in_term thy is_built_in_const true
-                                                   (SOME true) t) []
+              (Symtab.empty |> add_pconsts_in_term thy is_built_in_const t) []
 
 (* Inserts a dummy "constant" referring to the theory name, so that relevance
    takes the given theory into account. *)
@@ -263,13 +238,11 @@
   if String.isSubstring "." s then 1.0 else local_const_multiplier
 
 (* Computes a constant's weight, as determined by its frequency. *)
-fun generic_pconst_weight local_const_multiplier abs_weight skolem_weight
-                          theory_const_weight chained_const_weight weight_for f
-                          const_tab chained_const_tab (c as (s, PType (m, _))) =
+fun generic_pconst_weight local_const_multiplier abs_weight theory_const_weight
+        chained_const_weight weight_for f const_tab chained_const_tab
+        (c as (s, PType (m, _))) =
   if s = pseudo_abs_name then
     abs_weight
-  else if String.isPrefix pseudo_skolem_prefix s then
-    skolem_weight
   else if String.isSuffix theory_const_suffix s then
     theory_const_weight
   else
@@ -284,19 +257,18 @@
 fun rel_pconst_weight ({local_const_multiplier, abs_rel_weight,
                         theory_const_rel_weight, ...} : relevance_fudge)
                       const_tab =
-  generic_pconst_weight local_const_multiplier abs_rel_weight 0.0
+  generic_pconst_weight local_const_multiplier abs_rel_weight
                         theory_const_rel_weight 0.0 rel_weight_for I const_tab
                         Symtab.empty
 
 fun irrel_pconst_weight (fudge as {local_const_multiplier, abs_irrel_weight,
-                                   skolem_irrel_weight,
                                    theory_const_irrel_weight,
                                    chained_const_irrel_weight, ...})
                         const_tab chained_const_tab =
   generic_pconst_weight local_const_multiplier abs_irrel_weight
-                        skolem_irrel_weight theory_const_irrel_weight
-                        chained_const_irrel_weight (irrel_weight_for fudge) swap
-                        const_tab chained_const_tab
+                        theory_const_irrel_weight chained_const_irrel_weight
+                        (irrel_weight_for fudge) swap const_tab
+                        chained_const_tab
 
 fun stature_bonus ({intro_bonus, ...} : relevance_fudge) (_, Intro) =
     intro_bonus
@@ -308,11 +280,10 @@
   | stature_bonus _ _ = 0.0
 
 fun is_odd_const_name s =
-  s = pseudo_abs_name orelse String.isPrefix pseudo_skolem_prefix s orelse
-  String.isSuffix theory_const_suffix s
+  s = pseudo_abs_name orelse String.isSuffix theory_const_suffix s
 
-fun fact_weight fudge stature const_tab rel_const_tab rel_const_iter_tab
-                chained_const_tab fact_consts =
+fun fact_weight fudge stature const_tab rel_const_tab chained_const_tab
+                fact_consts =
   case fact_consts |> List.partition (pconst_hyper_mem I rel_const_tab)
                    ||> filter_out (pconst_hyper_mem swap rel_const_tab) of
     ([], _) => 0.0
@@ -358,7 +329,7 @@
 fun if_empty_replace_with_scope thy is_built_in_const facts sc tab =
   if Symtab.is_empty tab then
     Symtab.empty
-    |> fold (add_pconsts_in_term thy is_built_in_const false (SOME false))
+    |> fold (add_pconsts_in_term thy is_built_in_const)
             (map_filter (fn ((_, (sc', _)), th) =>
                             if sc' = sc then SOME (prop_of th) else NONE) facts)
   else
@@ -387,7 +358,9 @@
 (* High enough so that it isn't wrongly considered as very relevant (e.g., for E
    weights), but low enough so that it is unlikely to be truncated away if few
    facts are included. *)
-val special_fact_index = 45
+val special_fact_index = 45 (* FUDGE *)
+
+val really_hopeless_get_kicked_out_iter = 5 (* FUDGE *)
 
 fun relevance_filter ctxt thres0 decay max_facts is_built_in_const
         (fudge as {threshold_divisor, ridiculous_threshold, ...}) facts hyp_ts
@@ -395,40 +368,38 @@
   let
     val thy = Proof_Context.theory_of ctxt
     val const_tab = fold (count_fact_consts thy fudge) facts Symtab.empty
-    val add_pconsts = add_pconsts_in_term thy is_built_in_const false o SOME
+    val add_pconsts = add_pconsts_in_term thy is_built_in_const
     val chained_ts =
       facts |> map_filter (fn ((_, (Chained, _)), th) => SOME (prop_of th)
                             | _ => NONE)
-    val chained_const_tab = Symtab.empty |> fold (add_pconsts true) chained_ts
+    val chained_const_tab = Symtab.empty |> fold add_pconsts chained_ts
     val goal_const_tab =
       Symtab.empty
-      |> fold (add_pconsts true) hyp_ts
-      |> add_pconsts false concl_t
+      |> fold add_pconsts hyp_ts
+      |> add_pconsts concl_t
       |> (fn tab => if Symtab.is_empty tab then chained_const_tab else tab)
       |> fold (if_empty_replace_with_scope thy is_built_in_const facts)
               [Chained, Assum, Local]
-    val goal_const_iter_tab = goal_const_tab |> Symtab.map (K (K ~1))
-    fun iter j remaining_max thres rel_const_tab rel_const_iter_tab hopeless
-             hopeful =
+    fun iter j remaining_max thres rel_const_tab hopeless hopeful =
       let
+        val hopeless =
+          hopeless |> j = really_hopeless_get_kicked_out_iter
+                      ? filter_out (fn (_, w) => w < 0.001)
         fun relevant [] _ [] =
             (* Nothing has been added this iteration. *)
             if j = 0 andalso thres >= ridiculous_threshold then
               (* First iteration? Try again. *)
               iter 0 max_facts (thres / threshold_divisor) rel_const_tab
-                   rel_const_iter_tab hopeless hopeful
+                   hopeless hopeful
             else
               []
           | relevant candidates rejects [] =
             let
               val (accepts, more_rejects) =
                 take_most_relevant ctxt max_facts remaining_max fudge candidates
-              val sps = maps (snd o fst) accepts;
+              val sps = maps (snd o fst) accepts
               val rel_const_tab' =
-                rel_const_tab |> fold (add_pconst_to_table false) sps
-              val rel_const_iter_tab' =
-                rel_const_iter_tab
-                |> fold (fn (s, _) => Symtab.default (s, j)) sps
+                rel_const_tab |> fold add_pconst_to_table sps
               fun is_dirty (s, _) =
                 Symtab.lookup rel_const_tab' s <> Symtab.lookup rel_const_tab s
               val (hopeful_rejects, hopeless_rejects) =
@@ -457,7 +428,7 @@
                  []
                else
                  iter (j + 1) remaining_max thres rel_const_tab'
-                      rel_const_iter_tab' hopeless_rejects hopeful_rejects)
+                      hopeless_rejects hopeful_rejects)
             end
           | relevant candidates rejects
                      (((ax as (((_, stature), _), fact_consts)), cached_weight)
@@ -468,7 +439,7 @@
                   SOME w => w
                 | NONE =>
                   fact_weight fudge stature const_tab rel_const_tab
-                              rel_const_iter_tab chained_const_tab fact_consts
+                              chained_const_tab fact_consts
             in
               if weight >= thres then
                 relevant ((ax, weight) :: candidates) rejects hopeful
@@ -509,7 +480,7 @@
          |> insert_into_facts accepts
   in
     facts |> map_filter (pair_consts_fact thy is_built_in_const fudge)
-          |> iter 0 max_facts thres0 goal_const_tab goal_const_iter_tab []
+          |> iter 0 max_facts thres0 goal_const_tab []
           |> insert_special_facts
           |> tap (fn accepts => trace_msg ctxt (fn () =>
                       "Total relevant: " ^ string_of_int (length accepts)))
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_minimize.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_minimize.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -97,7 +97,7 @@
     print silent
           (fn () =>
               case outcome of
-                SOME failure => string_of_failure failure
+                SOME failure => string_of_atp_failure failure
               | NONE =>
                 "Found proof" ^
                  (if length used_facts = length facts then ""
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_provers.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_provers.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -8,7 +8,7 @@
 
 signature SLEDGEHAMMER_PROVERS =
 sig
-  type failure = ATP_Proof.failure
+  type atp_failure = ATP_Proof.atp_failure
   type stature = ATP_Problem_Generate.stature
   type type_enc = ATP_Problem_Generate.type_enc
   type fact = Sledgehammer_Fact.fact
@@ -53,7 +53,6 @@
      higher_order_irrel_weight : real,
      abs_rel_weight : real,
      abs_irrel_weight : real,
-     skolem_irrel_weight : real,
      theory_const_rel_weight : real,
      theory_const_irrel_weight : real,
      chained_const_irrel_weight : real,
@@ -76,7 +75,7 @@
      factss : (string * fact list) list}
 
   type prover_result =
-    {outcome : failure option,
+    {outcome : atp_failure option,
      used_facts : (string * stature) list,
      used_from : fact list,
      run_time : Time.time,
@@ -252,6 +251,9 @@
 fun is_appropriate_prop_of_prover ctxt name =
   if is_unit_equational_atp ctxt name then is_unit_equality else K true
 
+val atp_irrelevant_const_tab =
+  Symtab.make (map (rpair ()) atp_irrelevant_consts)
+
 fun is_built_in_const_of_prover ctxt name =
   if is_smt_prover ctxt name then
     let val ctxt = ctxt |> select_smt_solver name in
@@ -264,7 +266,7 @@
            (false, ts)
     end
   else
-    fn (s, _) => fn ts => (member (op =) atp_irrelevant_consts s, ts)
+    fn (s, _) => fn ts => (Symtab.defined atp_irrelevant_const_tab s, ts)
 
 (* FUDGE *)
 val atp_relevance_fudge =
@@ -273,7 +275,6 @@
    higher_order_irrel_weight = 1.05,
    abs_rel_weight = 0.5,
    abs_irrel_weight = 2.0,
-   skolem_irrel_weight = 0.05,
    theory_const_rel_weight = 0.5,
    theory_const_irrel_weight = 0.25,
    chained_const_irrel_weight = 0.25,
@@ -295,7 +296,6 @@
    higher_order_irrel_weight = #higher_order_irrel_weight atp_relevance_fudge,
    abs_rel_weight = #abs_rel_weight atp_relevance_fudge,
    abs_irrel_weight = #abs_irrel_weight atp_relevance_fudge,
-   skolem_irrel_weight = #skolem_irrel_weight atp_relevance_fudge,
    theory_const_rel_weight = #theory_const_rel_weight atp_relevance_fudge,
    theory_const_irrel_weight = #theory_const_irrel_weight atp_relevance_fudge,
    chained_const_irrel_weight = #chained_const_irrel_weight atp_relevance_fudge,
@@ -368,7 +368,6 @@
    higher_order_irrel_weight : real,
    abs_rel_weight : real,
    abs_irrel_weight : real,
-   skolem_irrel_weight : real,
    theory_const_rel_weight : real,
    theory_const_irrel_weight : real,
    chained_const_irrel_weight : real,
@@ -391,7 +390,7 @@
    factss : (string * fact list) list}
 
 type prover_result =
-  {outcome : failure option,
+  {outcome : atp_failure option,
    used_facts : (string * stature) list,
    used_from : fact list,
    run_time : Time.time,
@@ -879,8 +878,10 @@
                      val failure =
                        UnsoundProof (is_type_enc_sound type_enc, facts)
                    in
-                     if debug then (warning (string_of_failure failure); NONE)
-                     else SOME failure
+                     if debug then
+                       (warning (string_of_atp_failure failure); NONE)
+                     else
+                       SOME failure
                    end
                  | NONE => NONE)
               | _ => outcome
@@ -983,7 +984,7 @@
         end
       | SOME failure =>
         ([], Lazy.value (Failed_to_Play plain_metis),
-         fn _ => string_of_failure failure, "")
+         fn _ => string_of_atp_failure failure, "")
   in
     {outcome = outcome, used_facts = used_facts, used_from = used_from,
      run_time = run_time, preplay = preplay, message = message,
@@ -1125,7 +1126,7 @@
               if debug then
                 quote name ^ " invoked with " ^
                 num_of_facts fact_filter num_facts ^ ": " ^
-                string_of_failure (failure_of_smt_failure (the outcome)) ^
+                string_of_atp_failure (failure_of_smt_failure (the outcome)) ^
                 " Retrying with " ^ num_of_facts new_fact_filter new_num_facts ^
                 "..."
                 |> Output.urgent_message
@@ -1181,7 +1182,7 @@
            "")
       | SOME failure =>
         (Lazy.value (Failed_to_Play plain_metis),
-         fn _ => string_of_failure failure, "")
+         fn _ => string_of_atp_failure failure, "")
   in
     {outcome = outcome, used_facts = used_facts, used_from = used_from,
      run_time = run_time, preplay = preplay, message = message,
@@ -1229,7 +1230,7 @@
       in
         {outcome = SOME failure, used_facts = [], used_from = [],
          run_time = Time.zeroTime, preplay = Lazy.value play,
-         message = fn _ => string_of_failure failure, message_tail = ""}
+         message = fn _ => string_of_atp_failure failure, message_tail = ""}
       end
   end
 
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_reconstruct.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_reconstruct.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -7,22 +7,22 @@
 
 signature SLEDGEHAMMER_RECONSTRUCT =
 sig
-  type 'a proof = 'a ATP_Proof.proof
+  type 'a atp_proof = 'a ATP_Proof.atp_proof
   type stature = ATP_Problem_Generate.stature
   type one_line_params = Sledgehammer_Print.one_line_params
 
   type isar_params =
     bool * bool * Time.time option * bool * real * int * real * bool * bool
     * string Symtab.table * (string * stature) list vector
-    * (string * term) list * int Symtab.table * string proof * thm
+    * (string * term) list * int Symtab.table * string atp_proof * thm
 
-  val lam_trans_of_atp_proof : string proof -> string -> string
-  val is_typed_helper_used_in_atp_proof : string proof -> bool
+  val lam_trans_of_atp_proof : string atp_proof -> string -> string
+  val is_typed_helper_used_in_atp_proof : string atp_proof -> bool
   val used_facts_in_atp_proof :
-    Proof.context -> (string * stature) list vector -> string proof ->
+    Proof.context -> (string * stature) list vector -> string atp_proof ->
     (string * stature) list
   val used_facts_in_unsound_atp_proof :
-    Proof.context -> (string * stature) list vector -> 'a proof ->
+    Proof.context -> (string * stature) list vector -> 'a atp_proof ->
     string list option
   val isar_proof_text :
     Proof.context -> bool option -> isar_params -> one_line_params -> string
@@ -50,7 +50,7 @@
 open Sledgehammer_Minimize_Isar
 
 structure String_Redirect = ATP_Proof_Redirect(
-  type key = step_name
+  type key = atp_step_name
   val ord = fn ((s, _ : string list), (s', _)) => fast_string_ord (s, s')
   val string_of = fst)
 
@@ -410,7 +410,7 @@
 type isar_params =
   bool * bool * Time.time option * bool * real * int * real * bool * bool
   * string Symtab.table * (string * stature) list vector
-  * (string * term) list * int Symtab.table * string proof * thm
+  * (string * term) list * int Symtab.table * string atp_proof * thm
 
 fun isar_proof_text ctxt isar_proofs
     (debug, verbose, preplay_timeout, preplay_trace, isar_compress,
--- a/src/HOL/Tools/Sledgehammer/sledgehammer_run.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Tools/Sledgehammer/sledgehammer_run.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -246,7 +246,8 @@
              factss = factss}
           fun learn prover =
             mash_learn_proof ctxt params prover (prop_of goal) all_facts
-          val launch = launch_prover params mode output_result minimize_command only learn
+          val launch =
+            launch_prover params mode output_result minimize_command only learn
         in
           if mode = Auto_Try then
             (unknownN, state)
--- a/src/HOL/Transcendental.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/Transcendental.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -57,7 +57,7 @@
   x}, then it sums absolutely for @{term z} with @{term "\<bar>z\<bar> < \<bar>x\<bar>"}.*}
 
 lemma powser_insidea:
-  fixes x z :: "'a::real_normed_field"
+  fixes x z :: "'a::real_normed_div_algebra"
   assumes 1: "summable (\<lambda>n. f n * x ^ n)"
     and 2: "norm z < norm x"
   shows "summable (\<lambda>n. norm (f n * z ^ n))"
@@ -95,7 +95,7 @@
   proof -
     from 2 have "norm (norm (z * inverse x)) < 1"
       using x_neq_0
-      by (simp add: nonzero_norm_divide divide_inverse [symmetric])
+      by (simp add: norm_mult nonzero_norm_inverse divide_inverse [where 'a=real, symmetric])
     hence "summable (\<lambda>n. norm (z * inverse x) ^ n)"
       by (rule summable_geometric)
     hence "summable (\<lambda>n. K * norm (z * inverse x) ^ n)"
@@ -110,7 +110,7 @@
 qed
 
 lemma powser_inside:
-  fixes f :: "nat \<Rightarrow> 'a::{real_normed_field,banach}"
+  fixes f :: "nat \<Rightarrow> 'a::{real_normed_div_algebra,banach}"
   shows
     "summable (\<lambda>n. f n * (x ^ n)) \<Longrightarrow> norm z < norm x \<Longrightarrow>
       summable (\<lambda>n. f n * (z ^ n))"
@@ -2495,35 +2495,47 @@
      "[|(0::real) < x;0 < x1; x1 * y < x * u |] ==> y * inverse x < u * inverse x1"
   by (auto dest: real_mult_inverse_cancel simp add: mult_ac)
 
-lemma realpow_num_eq_if:
-  fixes m :: "'a::power"
-  shows "m ^ n = (if n=0 then 1 else m * m ^ (n - 1))"
-  by (cases n) auto
-
-lemma cos_two_less_zero [simp]: "cos (2) < 0"
-  apply (cut_tac x = 2 in cos_paired)
-  apply (drule sums_minus)
-  apply (rule neg_less_iff_less [THEN iffD1])
-  apply (frule sums_unique, auto)
-  apply (rule_tac y =
-   "\<Sum>n=0..< Suc(Suc(Suc 0)). - (-1 ^ n / (real(fact (2*n))) * 2 ^ (2*n))"
-         in order_less_trans)
-  apply (simp (no_asm) add: fact_num_eq_if_nat realpow_num_eq_if del: fact_Suc)
-  apply (simp (no_asm) add: mult_assoc del: setsum_op_ivl_Suc)
-  apply (rule sumr_pos_lt_pair)
-  apply (erule sums_summable, safe)
-  unfolding One_nat_def
-  apply (simp (no_asm) add: divide_inverse real_0_less_add_iff mult_assoc [symmetric]
-              del: fact_Suc)
-  apply (simp add: inverse_eq_divide less_divide_eq del: fact_Suc)
-  apply (subst fact_Suc [of "Suc (Suc (Suc (Suc (Suc (Suc (Suc (4 * d)))))))"])
-  apply (simp only: real_of_nat_mult)
-  apply (rule mult_strict_mono, force)
-    apply (rule_tac [3] real_of_nat_ge_zero)
-   prefer 2 apply force
-  apply (rule real_of_nat_less_iff [THEN iffD2])
-  apply (rule fact_less_mono_nat, auto)
-  done
+lemmas realpow_num_eq_if = power_eq_if
+
+lemma cos_two_less_zero [simp]:
+  "cos 2 < 0"
+proof -
+  note fact_Suc [simp del]
+  from cos_paired
+  have "(\<lambda>n. - (-1 ^ n / real (fact (2 * n)) * 2 ^ (2 * n))) sums - cos 2"
+    by (rule sums_minus)
+  then have *: "(\<lambda>n. - (-1 ^ n * 2 ^ (2 * n) / real (fact (2 * n)))) sums - cos 2"
+    by simp
+  then have **: "summable (\<lambda>n. - (-1 ^ n * 2 ^ (2 * n) / real (fact (2 * n))))"
+    by (rule sums_summable)
+  have "0 < (\<Sum>n = 0..<Suc (Suc (Suc 0)). - (-1 ^ n * 2 ^ (2 * n) / real (fact (2 * n))))"
+    by (simp add: fact_num_eq_if_nat realpow_num_eq_if)
+  moreover have "(\<Sum>n = 0..<Suc (Suc (Suc 0)). - (-1 ^ n  * 2 ^ (2 * n) / real (fact (2 * n))))
+    < (\<Sum>n. - (-1 ^ n * 2 ^ (2 * n) / real (fact (2 * n))))"
+  proof -
+    { fix d
+      have "4 * real (fact (Suc (Suc (Suc (Suc (Suc (Suc (4 * d))))))))
+       < real (Suc (Suc (Suc (Suc (Suc (Suc (Suc (Suc (4 * d)))))))) *
+           fact (Suc (Suc (Suc (Suc (Suc (Suc (Suc (4 * d)))))))))"
+        by (simp only: real_of_nat_mult) (auto intro!: mult_strict_mono fact_less_mono_nat)
+      then have "4 * real (fact (Suc (Suc (Suc (Suc (Suc (Suc (4 * d))))))))
+        < real (fact (Suc (Suc (Suc (Suc (Suc (Suc (Suc (Suc (4 * d))))))))))"
+        by (simp only: fact_Suc [of "Suc (Suc (Suc (Suc (Suc (Suc (Suc (4 * d)))))))"])
+      then have "4 * inverse (real (fact (Suc (Suc (Suc (Suc (Suc (Suc (Suc (Suc (4 * d)))))))))))
+        < inverse (real (fact (Suc (Suc (Suc (Suc (Suc (Suc (4 * d)))))))))"
+        by (simp add: inverse_eq_divide less_divide_eq)
+    }
+    note *** = this
+    from ** show ?thesis by (rule sumr_pos_lt_pair)
+      (simp add: divide_inverse real_0_less_add_iff mult_assoc [symmetric] ***)
+  qed
+  ultimately have "0 < (\<Sum>n. - (-1 ^ n * 2 ^ (2 * n) / real (fact (2 * n))))"
+    by (rule order_less_trans)
+  moreover from * have "- cos 2 = (\<Sum>n. - (-1 ^ n * 2 ^ (2 * n) / real (fact (2 * n))))"
+    by (rule sums_unique)
+  ultimately have "0 < - cos 2" by simp
+  then show ?thesis by simp
+qed
 
 lemmas cos_two_neq_zero [simp] = cos_two_less_zero [THEN less_imp_neq]
 lemmas cos_two_le_zero [simp] = cos_two_less_zero [THEN order_less_imp_le]
--- a/src/HOL/ex/Adhoc_Overloading_Examples.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/ex/Adhoc_Overloading_Examples.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -1,5 +1,5 @@
-(*  Title:  HOL/ex/Adhoc_Overloading_Examples.thy
-    Author: Christian Sternagel
+(*  Title:      HOL/ex/Adhoc_Overloading_Examples.thy
+    Author:     Christian Sternagel
 *)
 
 header {* Ad Hoc Overloading *}
--- a/src/HOL/ex/Sqrt.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/HOL/ex/Sqrt.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -31,12 +31,12 @@
   have "p dvd m \<and> p dvd n"
   proof
     from eq have "p dvd m\<^sup>2" ..
-    with `prime p` pos2 show "p dvd m" by (rule prime_dvd_power_nat)
+    with `prime p` show "p dvd m" by (rule prime_dvd_power_nat)
     then obtain k where "m = p * k" ..
     with eq have "p * n\<^sup>2 = p\<^sup>2 * k\<^sup>2" by (auto simp add: power2_eq_square mult_ac)
     with p have "n\<^sup>2 = p * k\<^sup>2" by (simp add: power2_eq_square)
     then have "p dvd n\<^sup>2" ..
-    with `prime p` pos2 show "p dvd n" by (rule prime_dvd_power_nat)
+    with `prime p` show "p dvd n" by (rule prime_dvd_power_nat)
   qed
   then have "p dvd gcd m n" ..
   with gcd have "p dvd 1" by simp
@@ -71,12 +71,12 @@
   also have "\<dots> * n\<^sup>2 = p * n\<^sup>2" by simp
   finally have eq: "m\<^sup>2 = p * n\<^sup>2" ..
   then have "p dvd m\<^sup>2" ..
-  with `prime p` pos2 have dvd_m: "p dvd m" by (rule prime_dvd_power_nat)
+  with `prime p` have dvd_m: "p dvd m" by (rule prime_dvd_power_nat)
   then obtain k where "m = p * k" ..
   with eq have "p * n\<^sup>2 = p\<^sup>2 * k\<^sup>2" by (auto simp add: power2_eq_square mult_ac)
   with p have "n\<^sup>2 = p * k\<^sup>2" by (simp add: power2_eq_square)
   then have "p dvd n\<^sup>2" ..
-  with `prime p` pos2 have "p dvd n" by (rule prime_dvd_power_nat)
+  with `prime p` have "p dvd n" by (rule prime_dvd_power_nat)
   with dvd_m have "p dvd gcd m n" by (rule gcd_greatest_nat)
   with gcd have "p dvd 1" by simp
   then have "p \<le> 1" by (simp add: dvd_imp_le)
@@ -103,4 +103,3 @@
 qed
 
 end
-
--- a/src/Provers/blast.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Provers/blast.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -78,8 +78,8 @@
 (* options *)
 
 val (depth_limit, setup_depth_limit) = Attrib.config_int @{binding blast_depth_limit} (K 20);
-val (trace, setup_trace) = Attrib.config_bool @{binding blast_trace} (K false);
-val (stats, setup_stats) = Attrib.config_bool @{binding blast_stats} (K false);
+val (trace, _) = Attrib.config_bool @{binding blast_trace} (K false);
+val (stats, _) = Attrib.config_bool @{binding blast_stats} (K false);
 
 
 datatype term =
@@ -1298,8 +1298,6 @@
 
 val setup =
   setup_depth_limit #>
-  setup_trace #>
-  setup_stats #>
   Method.setup @{binding blast}
     (Scan.lift (Scan.option Parse.nat) --| Method.sections Classical.cla_modifiers >>
       (fn NONE => SIMPLE_METHOD' o blast_tac
--- a/src/Pure/Concurrent/simple_thread.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Concurrent/simple_thread.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -25,7 +25,7 @@
 fun fork interrupts body =
   Thread.fork (fn () =>
     exception_trace (fn () =>
-      body () handle exn => if Exn.is_interrupt exn then () else reraise exn),
+      body () handle exn => if Exn.is_interrupt exn then () (*sic!*) else reraise exn),
     attributes interrupts);
 
 fun join thread =
--- a/src/Pure/General/name_space.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/General/name_space.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -30,6 +30,7 @@
   val extern: Proof.context -> T -> string -> xstring
   val extern_ord: Proof.context -> T -> string * string -> order
   val markup_extern: Proof.context -> T -> string -> Markup.T * xstring
+  val pretty: Proof.context -> T -> string -> Pretty.T
   val hide: bool -> string -> T -> T
   val merge: T * T -> T
   type naming
@@ -194,6 +195,8 @@
 
 fun markup_extern ctxt space name = (markup space name, extern ctxt space name);
 
+fun pretty ctxt space name = Pretty.mark_str (markup_extern ctxt space name);
+
 
 (* modify internals *)
 
--- a/src/Pure/Isar/class.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Isar/class.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -182,14 +182,14 @@
 
     fun prt_param (c, ty) =
       Pretty.block
-       [Pretty.mark_str (Name_Space.markup_extern ctxt const_space c), Pretty.str " ::",
+       [Name_Space.pretty ctxt const_space c, Pretty.str " ::",
         Pretty.brk 1, Syntax.pretty_typ ctxt (Type.strip_sorts_dummy ty)];
 
     fun prt_entry class =
       Pretty.block
         ([Pretty.command "class", Pretty.brk 1,
-          Pretty.mark_str (Name_Space.markup_extern ctxt class_space class), Pretty.str ":",
-          Pretty.fbrk, Pretty.block [Pretty.str "supersort: ", prt_supersort class]] @
+          Name_Space.pretty ctxt class_space class, Pretty.str ":", Pretty.fbrk,
+          Pretty.block [Pretty.str "supersort: ", prt_supersort class]] @
           (case try (Axclass.get_info thy) class of
             NONE => []
           | SOME {params, ...} =>
--- a/src/Pure/Isar/keyword.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Isar/keyword.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -23,6 +23,7 @@
   val thy_script: T
   val thy_goal: T
   val qed: T
+  val qed_script: T
   val qed_block: T
   val qed_global: T
   val prf_heading2: T
@@ -103,6 +104,7 @@
 val thy_script = kind "thy_script";
 val thy_goal = kind "thy_goal";
 val qed = kind "qed";
+val qed_script = kind "qed_script";
 val qed_block = kind "qed_block";
 val qed_global = kind "qed_global";
 val prf_heading2 = kind "prf_heading2";
@@ -121,7 +123,7 @@
 
 val kinds =
   [control, diag, thy_begin, thy_end, thy_heading1, thy_heading2, thy_heading3, thy_heading4,
-    thy_load, thy_decl, thy_script, thy_goal, qed, qed_block, qed_global,
+    thy_load, thy_decl, thy_script, thy_goal, qed, qed_script, qed_block, qed_global,
     prf_heading2, prf_heading3, prf_heading4, prf_goal, prf_block, prf_open,
     prf_close, prf_chain, prf_decl, prf_asm, prf_asm_goal, prf_asm_goal_script, prf_script];
 
@@ -242,7 +244,7 @@
     thy_load, thy_decl, thy_script, thy_goal];
 
 val is_proof = command_category
-  [qed, qed_block, qed_global, prf_heading2, prf_heading3, prf_heading4,
+  [qed, qed_script, qed_block, qed_global, prf_heading2, prf_heading3, prf_heading4,
     prf_goal, prf_block, prf_open, prf_close, prf_chain, prf_decl,
     prf_asm, prf_asm_goal, prf_asm_goal_script, prf_script];
 
@@ -252,7 +254,7 @@
 
 val is_theory_goal = command_category [thy_goal];
 val is_proof_goal = command_category [prf_goal, prf_asm_goal, prf_asm_goal_script];
-val is_qed = command_category [qed, qed_block];
+val is_qed = command_category [qed, qed_script, qed_block];
 val is_qed_global = command_category [qed_global];
 
 end;
--- a/src/Pure/Isar/keyword.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Isar/keyword.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -25,6 +25,7 @@
   val THY_SCRIPT = "thy_script"
   val THY_GOAL = "thy_goal"
   val QED = "qed"
+  val QED_SCRIPT = "qed_script"
   val QED_BLOCK = "qed_block"
   val QED_GLOBAL = "qed_global"
   val PRF_HEADING2 = "prf_heading2"
@@ -53,10 +54,12 @@
   val theory1 = Set(THY_BEGIN, THY_END)
   val theory2 = Set(THY_DECL, THY_GOAL)
   val proof =
-    Set(QED, QED_BLOCK, QED_GLOBAL, PRF_HEADING2, PRF_HEADING3, PRF_HEADING4, PRF_GOAL, PRF_BLOCK,
-      PRF_OPEN, PRF_CHAIN, PRF_DECL, PRF_ASM, PRF_ASM_GOAL, PRF_ASM_GOAL_SCRIPT, PRF_SCRIPT)
+    Set(QED, QED_SCRIPT, QED_BLOCK, QED_GLOBAL, PRF_HEADING2, PRF_HEADING3, PRF_HEADING4,
+      PRF_GOAL, PRF_BLOCK, PRF_OPEN, PRF_CHAIN, PRF_DECL, PRF_ASM, PRF_ASM_GOAL,
+      PRF_ASM_GOAL_SCRIPT, PRF_SCRIPT)
   val proof1 =
-    Set(QED, QED_BLOCK, QED_GLOBAL, PRF_GOAL, PRF_BLOCK, PRF_OPEN, PRF_CLOSE, PRF_CHAIN, PRF_DECL)
+    Set(QED, QED_SCRIPT, QED_BLOCK, QED_GLOBAL, PRF_GOAL, PRF_BLOCK, PRF_OPEN, PRF_CLOSE,
+      PRF_CHAIN, PRF_DECL)
   val proof2 = Set(PRF_ASM, PRF_ASM_GOAL, PRF_ASM_GOAL_SCRIPT)
   val improper = Set(THY_SCRIPT, PRF_SCRIPT)
 }
--- a/src/Pure/Pure.thy	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Pure.thy	Fri Sep 13 09:31:45 2013 +0200
@@ -68,7 +68,8 @@
   and "}" :: prf_close % "proof"
   and "next" :: prf_block % "proof"
   and "qed" :: qed_block % "proof"
-  and "by" ".." "." "done" "sorry" :: "qed" % "proof"
+  and "by" ".." "." "sorry" :: "qed" % "proof"
+  and "done" :: "qed_script" % "proof"
   and "oops" :: qed_global % "proof"
   and "defer" "prefer" "apply" :: prf_script % "proof"
   and "apply_end" :: prf_script % "proof" == ""
--- a/src/Pure/System/gui_setup.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/System/gui_setup.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -44,7 +44,7 @@
     // values
     text.append("JVM name: " + Platform.jvm_name + "\n")
     text.append("JVM platform: " + Platform.jvm_platform + "\n")
-    text.append("JVM home: " + java.lang.System.getProperty("java.home") + "\n")
+    text.append("JVM home: " + java.lang.System.getProperty("java.home", "") + "\n")
     try {
       Isabelle_System.init()
       if (Platform.is_windows)
--- a/src/Pure/System/isabelle_system.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/System/isabelle_system.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -21,7 +21,7 @@
 
   def jdk_home(): String =
   {
-    val java_home = System.getProperty("java.home")
+    val java_home = System.getProperty("java.home", "")
     val home = new JFile(java_home)
     val parent = home.getParent
     if (home.getName == "jre" && parent != null &&
@@ -74,9 +74,9 @@
       set_cygwin_root()
       val env0 = sys.env + ("ISABELLE_JDK_HOME" -> posix_path(jdk_home()))
 
-      val user_home = System.getProperty("user.home")
+      val user_home = System.getProperty("user.home", "")
       val env =
-        if (user_home == null || env0.isDefinedAt("HOME")) env0
+        if (user_home == "" || env0.isDefinedAt("HOME")) env0
         else env0 + ("HOME" -> user_home)
 
       val system_home =
@@ -84,8 +84,8 @@
         else
           env.get("ISABELLE_HOME") match {
             case None | Some("") =>
-              val path = System.getProperty("isabelle.home")
-              if (path == null || path == "") error("Unknown Isabelle home directory")
+              val path = System.getProperty("isabelle.home", "")
+              if (path == "") error("Unknown Isabelle home directory")
               else path
             case Some(path) => path
           }
--- a/src/Pure/System/platform.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/System/platform.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -16,8 +16,8 @@
 {
   /* main OS variants */
 
-  val is_macos = System.getProperty("os.name") == "Mac OS X"
-  val is_windows = System.getProperty("os.name").startsWith("Windows")
+  val is_macos = System.getProperty("os.name", "") == "Mac OS X"
+  val is_windows = System.getProperty("os.name", "").startsWith("Windows")
 
 
   /* Platform identifiers */
@@ -35,7 +35,7 @@
   lazy val jvm_platform: String =
   {
     val arch =
-      System.getProperty("os.arch") match {
+      System.getProperty("os.arch", "") match {
         case X86() => "x86"
         case X86_64() => "x86_64"
         case Sparc() => "sparc"
@@ -43,7 +43,7 @@
         case _ => error("Failed to determine CPU architecture")
       }
     val os =
-      System.getProperty("os.name") match {
+      System.getProperty("os.name", "") match {
         case Solaris() => "solaris"
         case Linux() => "linux"
         case Darwin() => "darwin"
@@ -56,6 +56,6 @@
 
   /* JVM name */
 
-  val jvm_name: String = System.getProperty("java.vm.name")
+  val jvm_name: String = System.getProperty("java.vm.name", "")
 }
 
--- a/src/Pure/Tools/build_dialog.scala	Thu Sep 12 22:10:17 2013 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*  Title:      Pure/Tools/build_dialog.scala
-    Author:     Makarius
-
-Dialog for session build process.
-*/
-
-package isabelle
-
-
-import java.awt.{GraphicsEnvironment, Point, Font}
-
-import scala.swing.{ScrollPane, Button, CheckBox, FlowPanel,
-  BorderPanel, MainFrame, TextArea, SwingApplication, Component, Label}
-import scala.swing.event.ButtonClicked
-
-
-object Build_Dialog
-{
-  /* command line entry point */
-
-  def main(args: Array[String])
-  {
-    GUI.init_laf()
-    try {
-      args.toList match {
-        case
-          logic_option ::
-          logic ::
-          Properties.Value.Boolean(system_mode) ::
-          include_dirs =>
-            val options = Options.init()
-            val dirs = include_dirs.map(Path.explode(_))
-            val session =
-              Isabelle_System.default_logic(logic,
-                if (logic_option != "") options.string(logic_option) else "")
-
-            val system_dialog = new System_Dialog
-            dialog(options, system_dialog, system_mode, dirs, session)
-            system_dialog.join_exit
-
-        case _ => error("Bad arguments:\n" + cat_lines(args))
-      }
-    }
-    catch {
-      case exn: Throwable =>
-        GUI.error_dialog(null, "Isabelle build failure", GUI.scrollable_text(Exn.message(exn)))
-        sys.exit(2)
-    }
-  }
-
-
-  /* dialog */
-
-  def dialog(
-    options: Options,
-    system_dialog: System_Dialog,
-    system_mode: Boolean,
-    dirs: List[Path],
-    session: String)
-  {
-    val more_dirs = dirs.map((false, _))
-
-    if (Build.build(options = options, build_heap = true, no_build = true,
-        more_dirs = more_dirs, sessions = List(session)) == 0)
-      system_dialog.return_code(0)
-    else {
-      system_dialog.title("Isabelle build (" + Isabelle_System.getenv("ML_IDENTIFIER") + ")")
-      system_dialog.echo("Build started for Isabelle/" + session + " ...")
-
-      val (out, rc) =
-        try {
-          ("",
-            Build.build(options = options, progress = system_dialog,
-              build_heap = true, more_dirs = more_dirs,
-              system_mode = system_mode, sessions = List(session)))
-        }
-        catch { case exn: Throwable => (Exn.message(exn) + "\n", 2) }
-
-      system_dialog.echo(out + (if (rc == 0) "OK\n" else "Return code: " + rc + "\n"))
-      system_dialog.return_code(rc)
-    }
-  }
-}
-
--- a/src/Pure/Tools/keywords.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Tools/keywords.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -27,6 +27,7 @@
     "thy_decl" -> "theory-decl",
     "thy_script" -> "theory-script",
     "thy_goal" -> "theory-goal",
+    "qed_script" -> "qed",
     "qed_block" -> "qed-block",
     "qed_global" -> "qed-global",
     "prf_heading2" -> "proof-heading",
--- a/src/Pure/Tools/main.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/Tools/main.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -7,7 +7,6 @@
 package isabelle
 
 
-import javax.swing.SwingUtilities
 import java.lang.{System, ClassLoader}
 import java.io.{File => JFile, BufferedReader, InputStreamReader}
 import java.nio.file.Files
@@ -40,13 +39,32 @@
         if (mode == "none")
           system_dialog.return_code(0)
         else {
+          val options = Options.init()
           val system_mode = mode == "" || mode == "system"
-          val dirs = Path.split(Isabelle_System.getenv("JEDIT_SESSION_DIRS"))
-          val options = Options.init()
+          val more_dirs = Path.split(Isabelle_System.getenv("JEDIT_SESSION_DIRS")).map((false, _))
           val session = Isabelle_System.default_logic(
             Isabelle_System.getenv("JEDIT_LOGIC"),
             options.string("jedit_logic"))
-          Build_Dialog.dialog(options, system_dialog, system_mode, dirs, session)
+
+          if (Build.build(options = options, build_heap = true, no_build = true,
+              more_dirs = more_dirs, sessions = List(session)) == 0)
+            system_dialog.return_code(0)
+          else {
+            system_dialog.title("Isabelle build (" + Isabelle_System.getenv("ML_IDENTIFIER") + ")")
+            system_dialog.echo("Build started for Isabelle/" + session + " ...")
+
+            val (out, rc) =
+              try {
+                ("",
+                  Build.build(options = options, progress = system_dialog,
+                    build_heap = true, more_dirs = more_dirs,
+                    system_mode = system_mode, sessions = List(session)))
+              }
+              catch { case exn: Throwable => (Exn.message(exn) + "\n", 2) }
+
+            system_dialog.echo(out + (if (rc == 0) "OK\n" else "Return code: " + rc + "\n"))
+            system_dialog.return_code(rc)
+          }
         }
       }
       catch { case exn: Throwable => exit_error(exn) }
--- a/src/Pure/build-jars	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Pure/build-jars	Fri Sep 13 09:31:45 2013 +0200
@@ -72,7 +72,6 @@
   Thy/thy_load.scala
   Thy/thy_syntax.scala
   Tools/build.scala
-  Tools/build_dialog.scala
   Tools/doc.scala
   Tools/keywords.scala
   Tools/main.scala
@@ -141,19 +140,10 @@
 [ "$#" -ne 0 ] && usage
 
 
-## dependencies
-
-declare -a JFREECHART_JARS=()
-for NAME in $JFREECHART_JAR_NAMES
-do
-  JFREECHART_JARS["${#JFREECHART_JARS[@]}"]="$JFREECHART_HOME/lib/$NAME"
-done
-
-
 ## build
 
 TARGET_DIR="$ISABELLE_HOME/lib/classes"
-TARGET="$TARGET_DIR/ext/Pure.jar"
+TARGET="$TARGET_DIR/Pure.jar"
 
 declare -a PIDE_SOURCES=()
 declare -a PURE_SOURCES=()
@@ -204,14 +194,10 @@
 
   SCALAC_OPTIONS="$ISABELLE_SCALA_BUILD_OPTIONS -d classes"
 
-  JFXRT="$ISABELLE_JDK_HOME/jre/lib/jfxrt.jar"
-
   (
-    for X in "$JFXRT" "${JFREECHART_JARS[@]}" "$XZ_JAVA_HOME/lib/xz.jar" classes
-    do
-      CLASSPATH="$CLASSPATH:$X"
-    done
-    CLASSPATH="$(jvmpath "$CLASSPATH")"
+    classpath "$ISABELLE_JDK_HOME/jre/lib/jfxrt.jar"
+    classpath classes
+    export CLASSPATH="$(jvmpath "$ISABELLE_CLASSPATH")"
 
     if [ "$TEST_PIDE" = true ]; then
       isabelle_scala scalac $SCALAC_OPTIONS "${PIDE_SOURCES[@]}" || \
@@ -224,7 +210,7 @@
     fi
   ) || exit "$?"
 
-  mkdir -p "$TARGET_DIR/ext" || fail "Failed to create directory $TARGET_DIR/ext"
+  mkdir -p "$TARGET_DIR" || fail "Failed to create directory $TARGET_DIR"
 
   pushd classes >/dev/null
 
@@ -239,13 +225,10 @@
 
   cp "$SCALA_HOME/lib/scala-compiler.jar" \
     "$SCALA_HOME/lib/scala-library.jar" \
-    "$SCALA_HOME/lib/scala-swing.jar" "$TARGET_DIR/ext"
-
-  [ -e "$SCALA_HOME/lib/scala-actors.jar" ] && \
-    cp "$SCALA_HOME/lib/scala-actors.jar" "$TARGET_DIR/ext"
-
-  [ -e "$SCALA_HOME/lib/scala-reflect.jar" ] && \
-    cp "$SCALA_HOME/lib/scala-reflect.jar" "$TARGET_DIR/ext"
+    "$SCALA_HOME/lib/scala-swing.jar" \
+    "$SCALA_HOME/lib/scala-actors.jar" \
+    "$SCALA_HOME/lib/scala-reflect.jar" \
+    "$TARGET_DIR"
 
   popd >/dev/null
 
--- a/src/Tools/Graphview/lib/Tools/graphview	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Tools/Graphview/lib/Tools/graphview	Fri Sep 13 09:31:45 2013 +0200
@@ -94,10 +94,10 @@
 
 pushd "$GRAPHVIEW_HOME" >/dev/null || failed
 
-PURE_JAR="$ISABELLE_HOME/lib/classes/ext/Pure.jar"
+PURE_JAR="$ISABELLE_HOME/lib/classes/Pure.jar"
 
 TARGET_DIR="$ISABELLE_HOME/lib/classes"
-TARGET="$TARGET_DIR/ext/Graphview.jar"
+TARGET="$TARGET_DIR/Graphview.jar"
 
 declare -a UPDATED=()
 
@@ -139,12 +139,12 @@
   rm -rf classes && mkdir classes
 
   (
-    #workaround for scalac
+    #workaround for scalac 2.10.2
     function stty() { :; }
     export -f stty
 
-    CLASSPATH="$CLASSPATH:$PURE_JAR"
-    CLASSPATH="$(jvmpath "$CLASSPATH")"
+    classpath "$PURE_JAR"
+    export CLASSPATH="$(jvmpath "$ISABELLE_CLASSPATH")"
     exec "$SCALA_HOME/bin/scalac" $ISABELLE_SCALA_BUILD_OPTIONS -d classes "${SOURCES[@]}"
   ) || fail "Failed to compile sources"
 
--- a/src/Tools/adhoc_overloading.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Tools/adhoc_overloading.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -1,5 +1,5 @@
-(* Author: Alexander Krauss, TU Muenchen
-   Author: Christian Sternagel, University of Innsbruck
+(*  Author:     Alexander Krauss, TU Muenchen
+    Author:     Christian Sternagel, University of Innsbruck
 
 Adhoc overloading of constants based on their types.
 *)
@@ -21,6 +21,7 @@
 
 val show_variants = Attrib.setup_config_bool @{binding show_variants} (K false);
 
+
 (* errors *)
 
 fun err_duplicate_variant oconst =
@@ -32,19 +33,27 @@
 fun err_not_overloaded oconst =
   error ("Constant " ^ quote oconst ^ " is not declared as overloaded");
 
-fun err_unresolved_overloading ctxt (c, T) t instances =
-  let val ctxt' = Config.put show_variants true ctxt
+fun err_unresolved_overloading ctxt0 (c, T) t instances =
+  let
+    val ctxt = Config.put show_variants true ctxt0
+    val const_space = Proof_Context.const_space ctxt
+    val prt_const =
+      Pretty.block [Name_Space.pretty ctxt const_space c, Pretty.str " ::", Pretty.brk 1,
+        Pretty.quote (Syntax.pretty_typ ctxt T)]
   in
-    cat_lines (
-      "Unresolved overloading of constant" ::
-      quote c ^ " :: " ^ quote (Syntax.string_of_typ ctxt' T) ::
-      "in term " ::
-      quote (Syntax.string_of_term ctxt' t) ::
-      (if null instances then "no instances" else "multiple instances:") ::
-    map (Markup.markup Markup.item o Syntax.string_of_term ctxt') instances)
-    |> error
+    error (Pretty.string_of (Pretty.chunks [
+      Pretty.block [
+        Pretty.str "Unresolved adhoc overloading of constant", Pretty.brk 1,
+        prt_const, Pretty.brk 1, Pretty.str "in term", Pretty.brk 1,
+        Pretty.block [Pretty.quote (Syntax.pretty_term ctxt t)]],
+      Pretty.block (
+        (if null instances then [Pretty.str "no instances"]
+        else Pretty.fbreaks (
+          Pretty.str "multiple instances:" ::
+          map (Pretty.mark Markup.item o Syntax.pretty_term ctxt) instances)))]))
   end;
 
+
 (* generic data *)
 
 fun variants_eq ((v1, T1), (v2, T2)) =
@@ -133,6 +142,7 @@
   val generic_remove_variant = generic_variant false;
 end;
 
+
 (* check / uncheck *)
 
 fun unifiable_with thy T1 T2 =
@@ -178,6 +188,7 @@
       | (cT :: _) => err_unresolved_overloading ctxt cT t (the_candidates cT));
   in map check_unresolved end;
 
+
 (* setup *)
 
 val _ = Context.>>
@@ -185,6 +196,7 @@
    #> Syntax_Phases.term_check 1 "adhoc_overloading_unresolved_check" reject_unresolved
    #> Syntax_Phases.term_uncheck 0 "adhoc_overloading" uncheck);
 
+
 (* commands *)
 
 fun generic_adhoc_overloading_cmd add =
--- a/src/Tools/jEdit/lib/Tools/jedit	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Tools/jEdit/lib/Tools/jedit	Fri Sep 13 09:31:45 2013 +0200
@@ -198,8 +198,8 @@
   fi
 fi
 
-PURE_JAR="$ISABELLE_HOME/lib/classes/ext/Pure.jar"
-GRAPHVIEW_JAR="$ISABELLE_HOME/lib/classes/ext/Graphview.jar"
+PURE_JAR="$ISABELLE_HOME/lib/classes/Pure.jar"
+GRAPHVIEW_JAR="$ISABELLE_HOME/lib/classes/Graphview.jar"
 
 pushd "$JEDIT_HOME" >/dev/null || failed
 
@@ -216,12 +216,6 @@
   "$ISABELLE_JEDIT_BUILD_HOME/contrib/jsr305-2.0.0.jar"
 )
 
-declare -a JFREECHART_JARS=()
-for NAME in $JFREECHART_JAR_NAMES
-do
-  JFREECHART_JARS["${#JFREECHART_JARS[@]}"]="$JFREECHART_HOME/lib/$NAME"
-done
-
 
 # target
 
@@ -238,8 +232,8 @@
   else
     if [ -n "$ISABELLE_JEDIT_BUILD_HOME" ]; then
       declare -a DEPS=(
-        "$JEDIT_JAR" "${JEDIT_JARS[@]}" "${JFREECHART_JARS[@]}" "$XZ_JAVA_HOME/lib/xz.jar"
-        "$PURE_JAR" "$GRAPHVIEW_JAR" "${SOURCES[@]}" "${RESOURCES[@]}"
+        "$JEDIT_JAR" "${JEDIT_JARS[@]}" "$PURE_JAR" "$GRAPHVIEW_JAR"
+        "${SOURCES[@]}" "${RESOURCES[@]}"
       )
     elif [ -e "$ISABELLE_HOME/Admin/build" ]; then
       declare -a DEPS=("$PURE_JAR" "$GRAPHVIEW_JAR" "${SOURCES[@]}" "${RESOURCES[@]}")
@@ -293,16 +287,15 @@
 
   cp -p -R -f "${JEDIT_JARS[@]}" dist/jars/. || failed
   (
-    #workaround for scalac
+    #workaround for scalac 2.10.2
     function stty() { :; }
     export -f stty
 
-    for JAR in "$JEDIT_JAR" "${JEDIT_JARS[@]}" "${JFREECHART_JARS[@]}" \
-      "$XZ_JAVA_HOME/lib/xz.jar" "$PURE_JAR" "$GRAPHVIEW_JAR" "$SCALA_HOME/lib/scala-compiler.jar"
+    for JAR in "$JEDIT_JAR" "${JEDIT_JARS[@]}" "$PURE_JAR" "$GRAPHVIEW_JAR"
     do
-      CLASSPATH="$CLASSPATH:$JAR"
+      classpath "$JAR"
     done
-    CLASSPATH="$(jvmpath "$CLASSPATH")"
+    export CLASSPATH="$(jvmpath "$ISABELLE_CLASSPATH")"
     exec "$SCALA_HOME/bin/scalac" $ISABELLE_SCALA_BUILD_OPTIONS -d dist/classes "${SOURCES[@]}"
   ) || fail "Failed to compile sources"
 
@@ -317,9 +310,9 @@
 
 ## main
 
-if [ "$BUILD_ONLY" = false ]; then
+if [ "$BUILD_ONLY" = false ]
+then
   export JEDIT_SESSION_DIRS JEDIT_LOGIC JEDIT_PRINT_MODE JEDIT_BUILD_MODE
-
-  exec "$ISABELLE_TOOL" java "${JAVA_ARGS[@]}" \
-    -classpath "$(jvmpath "$JEDIT_HOME/dist/jedit.jar")" isabelle.Main "${ARGS[@]}"
+  classpath "$JEDIT_HOME/dist/jedit.jar"
+  exec "$ISABELLE_TOOL" java "${JAVA_ARGS[@]}" isabelle.Main "${ARGS[@]}"
 fi
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Tools/jEdit/patches/jedit/numeric_keypad	Fri Sep 13 09:31:45 2013 +0200
@@ -0,0 +1,50 @@
+--- 5.1.0/jEdit/org/gjt/sp/jedit/gui/KeyEventWorkaround.java	2013-07-28 19:03:38.000000000 +0200
++++ 5.1.0/jEdit-patched/org/gjt/sp/jedit/gui/KeyEventWorkaround.java	2013-09-10 21:55:21.220043663 +0200
+@@ -129,7 +129,7 @@
+ 		case KeyEvent.VK_OPEN_BRACKET :
+ 		case KeyEvent.VK_BACK_SLASH   :
+ 		case KeyEvent.VK_CLOSE_BRACKET:
+-	/*	case KeyEvent.VK_NUMPAD0 :
++		case KeyEvent.VK_NUMPAD0 :
+ 		case KeyEvent.VK_NUMPAD1 :
+ 		case KeyEvent.VK_NUMPAD2 :
+ 		case KeyEvent.VK_NUMPAD3 :
+@@ -144,7 +144,7 @@
+ 		case KeyEvent.VK_SEPARATOR:
+ 		case KeyEvent.VK_SUBTRACT   :
+ 		case KeyEvent.VK_DECIMAL    :
+-		case KeyEvent.VK_DIVIDE     :*/
++		case KeyEvent.VK_DIVIDE     :
+ 		case KeyEvent.VK_BACK_QUOTE:
+ 		case KeyEvent.VK_QUOTE:
+ 		case KeyEvent.VK_DEAD_GRAVE:
+@@ -202,28 +202,7 @@
+ 	//{{{ isNumericKeypad() method
+ 	public static boolean isNumericKeypad(int keyCode)
+ 	{
+-		switch(keyCode)
+-		{
+-		case KeyEvent.VK_NUMPAD0:
+-		case KeyEvent.VK_NUMPAD1:
+-		case KeyEvent.VK_NUMPAD2:
+-		case KeyEvent.VK_NUMPAD3:
+-		case KeyEvent.VK_NUMPAD4:
+-		case KeyEvent.VK_NUMPAD5:
+-		case KeyEvent.VK_NUMPAD6:
+-		case KeyEvent.VK_NUMPAD7:
+-		case KeyEvent.VK_NUMPAD8:
+-		case KeyEvent.VK_NUMPAD9:
+-		case KeyEvent.VK_MULTIPLY:
+-		case KeyEvent.VK_ADD:
+-		/* case KeyEvent.VK_SEPARATOR: */
+-		case KeyEvent.VK_SUBTRACT:
+-		case KeyEvent.VK_DECIMAL:
+-		case KeyEvent.VK_DIVIDE:
+-			return true;
+-		default:
+-			return false;
+-		}
++		return false;
+ 	} //}}}
+ 
+ 	//{{{ processKeyEvent() method
--- a/src/Tools/jEdit/src/rendering.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Tools/jEdit/src/rendering.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -75,6 +75,7 @@
     Map[String, Byte](
       Keyword.THY_END -> KEYWORD2,
       Keyword.THY_SCRIPT -> LABEL,
+      Keyword.QED_SCRIPT -> DIGIT,
       Keyword.PRF_SCRIPT -> DIGIT,
       Keyword.PRF_ASM -> KEYWORD3,
       Keyword.PRF_ASM_GOAL -> KEYWORD3,
--- a/src/Tools/jEdit/src/scala_console.scala	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Tools/jEdit/src/scala_console.scala	Fri Sep 13 09:31:45 2013 +0200
@@ -46,7 +46,14 @@
         find_files(new JFile(start),
           entry => entry.isFile && entry.getName.endsWith(".jar")).map(_.getAbsolutePath)
       else Nil
-    val path = find_jars(jEdit.getSettingsDirectory) ::: find_jars(jEdit.getJEditHome)
+
+    val initial_class_path =
+      Library.space_explode(JFile.pathSeparatorChar, System.getProperty("java.class.path", ""))
+
+    val path =
+      initial_class_path :::
+      find_jars(jEdit.getSettingsDirectory) :::
+      find_jars(jEdit.getJEditHome)
     path.mkString(JFile.pathSeparator)
   }
 
--- a/src/Tools/subtyping.ML	Thu Sep 12 22:10:17 2013 +0200
+++ b/src/Tools/subtyping.ML	Fri Sep 13 09:31:45 2013 +0200
@@ -1046,9 +1046,9 @@
     val tmaps =
       sort (Name_Space.extern_ord ctxt type_space o pairself #1)
         (Symtab.dest (tmaps_of ctxt));
-    fun show_map (x, (t, _)) =
+    fun show_map (c, (t, _)) =
       Pretty.block
-       [Pretty.mark_str (Name_Space.markup_extern ctxt type_space x), Pretty.str ":",
+       [Name_Space.pretty ctxt type_space c, Pretty.str ":",
         Pretty.brk 1, Pretty.quote (Syntax.pretty_term ctxt t)];
   in
    [Pretty.big_list "coercions between base types:" (map show_coercion simple),