Merge
authorpaulson <lp15@cam.ac.uk>
Tue, 10 Nov 2015 14:43:29 +0000
changeset 61610 4f54d2759a0b
parent 61609 77b453bd616f (current diff)
parent 61608 a0487caabb4a (diff)
child 61611 a9c0572109af
Merge
src/HOL/Decision_Procs/Approximation.thy
src/HOL/Decision_Procs/Ferrack.thy
src/HOL/Decision_Procs/MIR.thy
src/HOL/Library/Extended_Real.thy
src/HOL/Library/Formal_Power_Series.thy
src/HOL/Multivariate_Analysis/Complex_Analysis_Basics.thy
src/HOL/Multivariate_Analysis/Complex_Transcendental.thy
src/HOL/Multivariate_Analysis/PolyRoots.thy
src/HOL/Multivariate_Analysis/Weierstrass.thy
src/HOL/Probability/Binary_Product_Measure.thy
src/HOL/Probability/Giry_Monad.thy
src/HOL/Probability/Lebesgue_Measure.thy
src/HOL/Probability/Probability_Mass_Function.thy
src/HOL/Probability/Projective_Limit.thy
src/HOL/Probability/Radon_Nikodym.thy
src/HOL/Probability/Sigma_Algebra.thy
src/Pure/Concurrent/simple_thread.ML
src/Pure/Concurrent/simple_thread.scala
--- a/Admin/components/components.sha1	Tue Nov 10 14:18:41 2015 +0000
+++ b/Admin/components/components.sha1	Tue Nov 10 14:43:29 2015 +0000
@@ -31,6 +31,9 @@
 eccff31931fb128c1dd522cfc85495c9b66e67af  Haskabelle-2015.tar.gz
 683acd94761ef460cca1a628f650355370de5afb  hol-light-bundle-0.5-126.tar.gz
 20b53cfc3ffc5b15c1eabc91846915b49b4c0367  isabelle_fonts-20151021.tar.gz
+736844204b2ef83974cd9f0a215738b767958c41  isabelle_fonts-20151104.tar.gz
+9502c1aea938021f154adadff254c5c55da344bd  isabelle_fonts-20151106.tar.gz
+f5c63689a394b974ac0d365debda577c6fa31c07  isabelle_fonts-20151107.tar.gz
 8d83e433c1419e0c0cc5fd1762903d11b4a5752c  jdk-6u31.tar.gz
 38d2d2a91c66714c18430e136e7e5191af3996e6  jdk-7u11.tar.gz
 d765bc4ad2f34d494429b2a8c1563c49db224944  jdk-7u13.tar.gz
--- a/Admin/components/main	Tue Nov 10 14:18:41 2015 +0000
+++ b/Admin/components/main	Tue Nov 10 14:43:29 2015 +0000
@@ -4,7 +4,7 @@
 e-1.8
 exec_process-1.0.3
 Haskabelle-2015
-isabelle_fonts-20151021
+isabelle_fonts-20151107
 jdk-8u66
 jedit_build-20151023
 jfreechart-1.0.14-1
--- a/Admin/polyml/future/ROOT.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/Admin/polyml/future/ROOT.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -101,7 +101,7 @@
 use "General/properties.ML";
 use "General/timing.ML";
 
-use "Concurrent/simple_thread.ML";
+use "Concurrent/standard_thread.ML";
 use "Concurrent/synchronized.ML";
 use "General/markup.ML";
 use "Concurrent/single_assignment.ML";
--- a/NEWS	Tue Nov 10 14:18:41 2015 +0000
+++ b/NEWS	Tue Nov 10 14:43:29 2015 +0000
@@ -22,9 +22,17 @@
 * Toplevel theorem statement 'proposition' is another alias for
 'theorem'.
 
+* Syntax for formal comments "-- text" now also supports the symbolic
+form "\<comment> text". Command-line tool "isabelle update_cartouches -c" helps
+to update old sources.
+
 
 *** Prover IDE -- Isabelle/Scala/jEdit ***
 
+* Completion of symbols via prefix of \<name> or \<^name> or \name is
+always possible, independently of the language context. It is never
+implicit: a popup will show up unconditionally.
+
 * Improved scheduling for urgent print tasks (e.g. command state output,
 interactive queries) wrt. long-running background tasks.
 
@@ -46,9 +54,12 @@
 
 * The State panel manages explicit proof state output, with jEdit action
 "isabelle.update-state" (shortcut S+ENTER) to trigger update according
-to cursor position. Option "editor_output_state" controls implicit proof
-state output in the Output panel: suppressing this reduces resource
-requirements of prover time and GUI space.
+to cursor position.
+
+* The Output panel no longer shows proof state output by default. This
+reduces resource requirements of prover time and GUI space.
+INCOMPATIBILITY, use the State panel instead or enable option
+"editor_output_state".
 
 * Action "isabelle-emph" (with keyboard shortcut C+e LEFT) controls
 emphasized text style; the effect is visible in document output, not in
@@ -75,12 +86,14 @@
 
 * There is a new short form for antiquotations with a single argument
 that is a cartouche: \<^name>\<open>...\<close> is equivalent to @{name \<open>...\<close>} and
-\<open>...\<close> without control symbol is equivalent to @{cartouche \<open>...\<close>}. The
+\<open>...\<close> without control symbol is equivalent to @{cartouche \<open>...\<close>}.
+\<^name> without following cartouche is equivalent to @{name}. The
 standard Isabelle fonts provide glyphs to render important control
 symbols, e.g. "\<^verbatim>", "\<^emph>", "\<^bold>".
 
-* System option "document_symbols" determines completion of Isabelle
-symbols within document source.
+* Antiquotations @{noindent}, @{smallskip}, @{medskip}, @{bigskip} with
+corresponding control symbols \<^noindent>, \<^smallskip>, \<^medskip>, \<^bigskip> specify spacing formally, using
+standard LaTeX macros of the same names.
 
 * Antiquotation @{cartouche} in Isabelle/Pure is the same as @{text}.
 Consequently, \<open>...\<close> without any decoration prints literal quasi-formal
@@ -109,13 +122,6 @@
   \<^enum>  enumerate
   \<^descr>  description
 
-* Text may contain control symbols for markup and formatting as follows:
-
-  \<^noindent>   \noindent
-  \<^smallskip>   \smallskip
-  \<^medskip>   \medskip
-  \<^bigskip>   \bigskip
-
 * Command 'text_raw' has been clarified: input text is processed as in
 'text' (with antiquotations and control symbols). The key difference is
 the lack of the surrounding isabelle markup environment in output.
@@ -124,6 +130,9 @@
 recursively, adding appropriate text style markup. These are typically
 used in the short form \<^emph>\<open>...\<close> and \<^bold>\<open>...\<close>.
 
+* Document antiquotation @{footnote} outputs LaTeX source recursively,
+marked as \footnote{}. This is typically used in the short form \<^footnote>\<open>...\<close>.
+
 
 *** Isar ***
 
@@ -267,6 +276,14 @@
 
 *** Pure ***
 
+* Qualifiers in locale expressions default to mandatory ('!') regardless
+of the command. Previously, for 'locale' and 'sublocale' the default was
+optional ('?'). The old synatx '!' has been discontinued.
+INCOMPATIBILITY, remove '!' and add '?' as required.
+
+* Keyword 'rewrites' identifies rewrite morphisms in interpretation
+commands.  Previously, the keyword was 'where'.  INCOMPATIBILITY.
+
 * Command 'print_definitions' prints dependencies of definitional
 specifications. This functionality used to be part of 'print_theory'.
 
@@ -485,11 +502,18 @@
 
 * Imperative_HOL: obsolete theory Legacy_Mrec has been removed.
 
-* Library/Omega_Words_Fun: Infinite words modeled as functions nat => 'a.
+* Library/Omega_Words_Fun: Infinite words modeled as functions nat =>
+'a.
+
+* HOL-Statespace: command 'statespace' uses mandatory qualifier for
+import of parent, as for general 'locale' expressions. INCOMPATIBILITY,
+remove '!' and add '?' as required.
 
 
 *** ML ***
 
+* Antiquotation @{undefined} or \<^undefined> inlines (raise Match).
+
 * The auxiliary module Pure/display.ML has been eliminated. Its
 elementary thm print operations are now in Pure/more_thm.ML and thus
 called Thm.pretty_thm, Thm.string_of_thm etc. INCOMPATIBILITY.
@@ -528,10 +552,14 @@
 
 *** System ***
 
+* Global session timeout is multiplied by timeout_scale factor. This
+allows to adjust large-scale tests (e.g. AFP) to overall hardware
+performance.
+
 * Property values in etc/symbols may contain spaces, if written with the
 replacement character "␣" (Unicode point 0x2324).  For example:
 
-  \<star>  code: 0x0022c6  group: operator  font: Deja␣Vu␣Sans␣Mono
+    \<star>  code: 0x0022c6  group: operator  font: Deja␣Vu␣Sans␣Mono
 
 * Command-line tool "isabelle update_then" expands old Isar command
 conflations:
@@ -568,6 +596,9 @@
 * Bash shell function "jvmpath" has been renamed to "platform_path": it
 is relevant both for Poly/ML and JVM processes.
 
+* Heap images are 10-15% smaller due to less wasteful persistent theory
+content (using ML type theory_id instead of theory);
+
 
 
 New in Isabelle2015 (May 2015)
--- a/etc/options	Tue Nov 10 14:18:41 2015 +0000
+++ b/etc/options	Tue Nov 10 14:43:29 2015 +0000
@@ -92,6 +92,9 @@
 option timeout : real = 0
   -- "timeout for session build job (seconds > 0)"
 
+option timeout_scale : real = 1.0
+  -- "scale factor for session timeout"
+
 option process_output_limit : int = 100
   -- "build process output limit in million characters (0 = unlimited)"
 
@@ -140,7 +143,7 @@
 public option editor_continuous_checking : bool = true
   -- "continuous checking of proof document (visible and required parts)"
 
-public option editor_output_state : bool = true
+public option editor_output_state : bool = false
   -- "implicit output of proof state"
 
 option editor_execution_delay : real = 0.02
@@ -164,5 +167,3 @@
 public option completion_limit : int = 40
   -- "limit for completion within the formal context"
 
-public option document_symbols : bool = false
-  -- "completion of Isabelle symbols within document source"
--- a/etc/symbols	Tue Nov 10 14:18:41 2015 +0000
+++ b/etc/symbols	Tue Nov 10 14:43:29 2015 +0000
@@ -349,9 +349,11 @@
 \<some>                 code: 0x0003f5
 \<hole>                 code: 0x002311
 \<newline>              code: 0x0023ce
+\<comment>              code: 0x002015  font: IsabelleText
 \<open>                 code: 0x002039  group: punctuation  font: IsabelleText  abbrev: <<
 \<close>                code: 0x00203a  group: punctuation  font: IsabelleText  abbrev: >>
 \<here>                 code: 0x002302  font: IsabelleText
+\<^undefined>           code: 0x002756  group: control  font: IsabelleText
 \<^noindent>            code: 0x0021e4  group: control  font: IsabelleText
 \<^smallskip>           code: 0x002508  group: control  font: IsabelleText
 \<^medskip>             code: 0x002509  group: control  font: IsabelleText
@@ -359,6 +361,7 @@
 \<^item>                code: 0x0025aa  group: control  font: IsabelleText
 \<^enum>                code: 0x0025b8  group: control  font: IsabelleText
 \<^descr>               code: 0x0027a7  group: control  font: IsabelleText
+\<^footnote>            code: 0x00204b  group: control  font: IsabelleText
 \<^verbatim>            code: 0x0025a9  group: control  font: IsabelleText
 \<^emph>                code: 0x002217  group: control  font: IsabelleText
 \<^bold>                code: 0x002759  group: control  font: IsabelleText
--- a/lib/Tools/update_cartouches	Tue Nov 10 14:18:41 2015 +0000
+++ b/lib/Tools/update_cartouches	Tue Nov 10 14:43:29 2015 +0000
@@ -15,6 +15,7 @@
   echo "Usage: isabelle $PRG [FILES|DIRS...]"
   echo
   echo "  Options are:"
+  echo "    -c           replace comment marker \"--\" by symbol \"\\<comment>\""
   echo "    -t           replace @{text} antiquotations within text tokens"
   echo
   echo "  Recursively find .thy files and update theory syntax to use cartouches"
@@ -30,11 +31,15 @@
 
 # options
 
+COMMENT="false"
 TEXT="false"
 
-while getopts "t" OPT
+while getopts "ct" OPT
 do
   case "$OPT" in
+    c)
+      COMMENT="true"
+      ;;
     t)
       TEXT="true"
       ;;
@@ -57,4 +62,4 @@
 ## main
 
 find $SPECS -name \*.thy -print0 | \
-  xargs -0 "$ISABELLE_TOOL" java isabelle.Update_Cartouches "$TEXT"
+  xargs -0 "$ISABELLE_TOOL" java isabelle.Update_Cartouches "$COMMENT" "$TEXT"
--- a/lib/fonts/IsabelleText.sfd	Tue Nov 10 14:18:41 2015 +0000
+++ b/lib/fonts/IsabelleText.sfd	Tue Nov 10 14:43:29 2015 +0000
@@ -19,7 +19,7 @@
 OS2_WeightWidthSlopeOnly: 0
 OS2_UseTypoMetrics: 1
 CreationTime: 1050361371
-ModificationTime: 1445439176
+ModificationTime: 1446896286
 PfmFamily: 17
 TTFWeight: 400
 TTFWidth: 5
@@ -2241,11 +2241,11 @@
 DisplaySize: -96
 AntiAlias: 1
 FitToEm: 1
-WinInfo: 9558 18 16
+WinInfo: 9864 18 16
 BeginPrivate: 0
 EndPrivate
 TeXData: 1 0 0 631296 315648 210432 572416 -1048576 210432 783286 444596 497025 792723 393216 433062 380633 303038 157286 324010 404750 52429 2506097 1059062 262144
-BeginChars: 1114189 1393
+BeginChars: 1114189 1398
 
 StartChar: u10000
 Encoding: 65536 65536 0
@@ -16604,69 +16604,32 @@
 
 StartChar: endash
 Encoding: 8211 8211 178
-Width: 1233
-Flags: W
-TtInstrs:
-PUSHB_7
- 2
- 182
- 0
- 253
- 4
- 1
- 0
-MDAP[rnd]
-MDRP[rnd,white]
-IUP[x]
-SVTCA[y-axis]
-SRP0
-MIRP[rp0,min,rnd,grey]
-MIRP[min,rnd,grey]
-IUP[y]
-EndTTInstrs
-LayerCount: 2
-Fore
-SplineSet
-0 633 m 1,0,-1
- 1233 633 l 1,1,-1
- 1233 492 l 1,2,-1
- 0 492 l 1,3,-1
- 0 633 l 1,0,-1
-EndSplineSet
-Validated: 1
+Width: 1024
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+100 633 m 1,0,-1
+ 924 633 l 1,1,-1
+ 924 489 l 1,2,-1
+ 100 489 l 1,3,-1
+ 100 633 l 1,0,-1
+EndSplineSet
 EndChar
 
 StartChar: emdash
 Encoding: 8212 8212 179
-Width: 1233
-Flags: W
-TtInstrs:
-PUSHB_6
- 2
- 182
- 0
- 4
- 1
- 0
-MDAP[rnd]
-MDRP[rnd,grey]
-IUP[x]
-SVTCA[y-axis]
-SRP0
-MDRP[rp0,rnd,grey]
-MIRP[min,rnd,grey]
-IUP[y]
-EndTTInstrs
-LayerCount: 2
-Fore
-SplineSet
-0 633 m 1,0,-1
- 1233 633 l 1,1,-1
- 1233 492 l 1,2,-1
- 0 492 l 1,3,-1
- 0 633 l 1,0,-1
-EndSplineSet
-Validated: 1
+Width: 2048
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+100 633 m 1,0,-1
+ 1948 633 l 1,1,-1
+ 1948 489 l 1,2,-1
+ 100 489 l 1,3,-1
+ 100 633 l 1,0,-1
+EndSplineSet
 EndChar
 
 StartChar: quotedblleft
@@ -61936,5 +61899,151 @@
  6 -78 l 1,112,-1
 EndSplineSet
 EndChar
+
+StartChar: afii00208
+Encoding: 8213 8213 1393
+Width: 2048
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+0 633 m 1,0,-1
+ 2048 633 l 1,1,-1
+ 2048 489 l 1,2,-1
+ 0 489 l 1,3,-1
+ 0 633 l 1,0,-1
+EndSplineSet
+EndChar
+
+StartChar: uni204B
+Encoding: 8267 8267 1394
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+651 1493 m 2,0,1
+ 866 1493 866 1493 996.5 1377 c 128,-1,2
+ 1127 1261 1127 1261 1127 1071 c 0,3,4
+ 1127 887 1127 887 1009 776.5 c 128,-1,5
+ 891 666 891 666 676 649 c 1,6,-1
+ 676 -197 l 1,7,-1
+ 535 -197 l 1,8,-1
+ 535 1370 l 1,9,-1
+ 344 1370 l 1,10,-1
+ 344 -197 l 1,11,-1
+ 203 -197 l 1,12,-1
+ 203 1493 l 1,13,-1
+ 651 1493 l 2,0,1
+EndSplineSet
+EndChar
+
+StartChar: uni2B1A
+Encoding: 11034 11034 1395
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+1227 126 m 1,0,-1
+ 1227 -78 l 1,1,-1
+ 1022 -78 l 1,2,-1
+ 1022 36 l 1,3,-1
+ 1112 36 l 1,4,-1
+ 1112 126 l 1,5,-1
+ 1227 126 l 1,0,-1
+1227 454 m 1,6,-1
+ 1227 280 l 1,7,-1
+ 1112 280 l 1,8,-1
+ 1112 454 l 1,9,-1
+ 1227 454 l 1,6,-1
+1227 788 m 1,10,-1
+ 1227 609 l 1,11,-1
+ 1112 609 l 1,12,-1
+ 1112 788 l 1,13,-1
+ 1227 788 l 1,10,-1
+868 36 m 1,14,-1
+ 868 -78 l 1,15,-1
+ 694 -78 l 1,16,-1
+ 694 36 l 1,17,-1
+ 868 36 l 1,14,-1
+540 36 m 1,18,-1
+ 540 -78 l 1,19,-1
+ 360 -78 l 1,20,-1
+ 360 36 l 1,21,-1
+ 540 36 l 1,18,-1
+120 126 m 1,22,-1
+ 120 36 l 1,23,-1
+ 206 36 l 1,24,-1
+ 206 -78 l 1,25,-1
+ 6 -78 l 1,26,-1
+ 6 126 l 1,27,-1
+ 120 126 l 1,22,-1
+120 454 m 1,28,-1
+ 120 280 l 1,29,-1
+ 6 280 l 1,30,-1
+ 6 454 l 1,31,-1
+ 120 454 l 1,28,-1
+120 788 m 1,32,-1
+ 120 609 l 1,33,-1
+ 6 609 l 1,34,-1
+ 6 788 l 1,35,-1
+ 120 788 l 1,32,-1
+1227 1142 m 1,36,-1
+ 1227 942 l 1,37,-1
+ 1112 942 l 1,38,-1
+ 1112 1028 l 1,39,-1
+ 1022 1028 l 1,40,-1
+ 1022 1142 l 1,41,-1
+ 1227 1142 l 1,36,-1
+868 1142 m 1,42,-1
+ 868 1028 l 1,43,-1
+ 694 1028 l 1,44,-1
+ 694 1142 l 1,45,-1
+ 868 1142 l 1,42,-1
+540 1142 m 1,46,-1
+ 540 1028 l 1,47,-1
+ 360 1028 l 1,48,-1
+ 360 1142 l 1,49,-1
+ 540 1142 l 1,46,-1
+206 1142 m 1,50,-1
+ 206 1028 l 1,51,-1
+ 120 1028 l 1,52,-1
+ 120 942 l 1,53,-1
+ 6 942 l 1,54,-1
+ 6 1142 l 1,55,-1
+ 206 1142 l 1,50,-1
+EndSplineSet
+EndChar
+
+StartChar: uni2756
+Encoding: 10070 10070 1397
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+44 569 m 1,0,-1
+ 303 828 l 1,1,-1
+ 563 569 l 1,2,-1
+ 303 309 l 1,3,-1
+ 44 569 l 1,0,-1
+670 569 m 1,4,-1
+ 930 827 l 1,5,-1
+ 1189 569 l 1,6,-1
+ 930 308 l 1,7,-1
+ 670 569 l 1,4,-1
+358 879 m 1,8,-1
+ 618 1140 l 1,9,-1
+ 878 879 l 1,10,-1
+ 618 620 l 1,11,-1
+ 358 879 l 1,8,-1
+358 254 m 1,12,-1
+ 618 514 l 1,13,-1
+ 878 254 l 1,14,-1
+ 618 -6 l 1,15,-1
+ 358 254 l 1,12,-1
+EndSplineSet
+EndChar
 EndChars
 EndSplineFont
--- a/lib/fonts/IsabelleTextBold.sfd	Tue Nov 10 14:18:41 2015 +0000
+++ b/lib/fonts/IsabelleTextBold.sfd	Tue Nov 10 14:43:29 2015 +0000
@@ -20,7 +20,7 @@
 OS2_WeightWidthSlopeOnly: 0
 OS2_UseTypoMetrics: 1
 CreationTime: 1050374980
-ModificationTime: 1445439141
+ModificationTime: 1446896348
 PfmFamily: 17
 TTFWeight: 700
 TTFWidth: 5
@@ -1678,10 +1678,10 @@
 DisplaySize: -96
 AntiAlias: 1
 FitToEm: 1
-WinInfo: 9534 21 15
+WinInfo: 9996 21 15
 BeginPrivate: 0
 EndPrivate
-BeginChars: 1114115 1385
+BeginChars: 1114115 1389
 
 StartChar: .notdef
 Encoding: 1114112 -1 0
@@ -42258,63 +42258,31 @@
 
 StartChar: endash
 Encoding: 8211 8211 999
-Width: 1233
-Flags: W
-TtInstrs:
-PUSHB_5
- 2
- 0
- 4
- 1
- 0
-MDAP[rnd]
-MDRP[min,rnd,white]
-IUP[x]
-SVTCA[y-axis]
-SRP0
-MDRP[rp0,rnd,grey]
-MDRP[min,rnd,grey]
-IUP[y]
-EndTTInstrs
-LayerCount: 2
-Fore
-SplineSet
-0 690 m 1,0,-1
- 1233 690 l 1,1,-1
- 1233 444 l 1,2,-1
- 0 444 l 1,3,-1
- 0 690 l 1,0,-1
+Width: 1024
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+110 690 m 1,0,-1
+ 914 690 l 1,1,-1
+ 914 432 l 1,2,-1
+ 110 432 l 1,3,-1
+ 110 690 l 1,0,-1
 EndSplineSet
 EndChar
 
 StartChar: emdash
 Encoding: 8212 8212 1000
-Width: 1233
-Flags: W
-TtInstrs:
-PUSHB_5
- 2
- 0
- 4
- 1
- 0
-MDAP[rnd]
-MDRP[min,rnd,grey]
-IUP[x]
-SVTCA[y-axis]
-SRP0
-MDRP[rp0,rnd,grey]
-MDRP[min,rnd,grey]
-IUP[y]
-EndTTInstrs
-LayerCount: 2
-Fore
-SplineSet
-0 690 m 1,0,-1
- 1233 690 l 1,1,-1
- 1233 444 l 1,2,-1
- 0 444 l 1,3,-1
- 0 690 l 1,0,-1
+Width: 2048
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+110 690 m 1,0,-1
+ 1938 690 l 1,1,-1
+ 1938 432 l 1,2,-1
+ 110 432 l 1,3,-1
+ 110 690 l 1,0,-1
 EndSplineSet
 EndChar
 
@@ -68173,5 +68141,151 @@
  6 -78 l 1,112,-1
 EndSplineSet
 EndChar
+
+StartChar: afii00208
+Encoding: 8213 8213 1385
+Width: 2048
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+0 690 m 1,0,-1
+ 2048 690 l 1,1,-1
+ 2048 432 l 1,2,-1
+ 0 432 l 1,3,-1
+ 0 690 l 1,0,-1
+EndSplineSet
+EndChar
+
+StartChar: uni204B
+Encoding: 8267 8267 1386
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+688 1493 m 2,0,1
+ 903 1493 903 1493 1033 1377 c 128,-1,2
+ 1163 1261 1163 1261 1163 1071 c 0,3,4
+ 1163 893 1163 893 1052.5 783.5 c 128,-1,5
+ 942 674 942 674 737 649 c 1,6,-1
+ 737 -197 l 1,7,-1
+ 549 -197 l 1,8,-1
+ 549 1346 l 1,9,-1
+ 359 1346 l 1,10,-1
+ 359 -197 l 1,11,-1
+ 168 -197 l 1,12,-1
+ 168 1493 l 1,13,-1
+ 688 1493 l 2,0,1
+EndSplineSet
+EndChar
+
+StartChar: uni2B1A
+Encoding: 11034 11034 1387
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+1227 126 m 1,0,-1
+ 1227 -78 l 1,1,-1
+ 1022 -78 l 1,2,-1
+ 1022 36 l 1,3,-1
+ 1112 36 l 1,4,-1
+ 1112 126 l 1,5,-1
+ 1227 126 l 1,0,-1
+1227 454 m 1,6,-1
+ 1227 280 l 1,7,-1
+ 1112 280 l 1,8,-1
+ 1112 454 l 1,9,-1
+ 1227 454 l 1,6,-1
+1227 788 m 1,10,-1
+ 1227 609 l 1,11,-1
+ 1112 609 l 1,12,-1
+ 1112 788 l 1,13,-1
+ 1227 788 l 1,10,-1
+868 36 m 1,14,-1
+ 868 -78 l 1,15,-1
+ 694 -78 l 1,16,-1
+ 694 36 l 1,17,-1
+ 868 36 l 1,14,-1
+540 36 m 1,18,-1
+ 540 -78 l 1,19,-1
+ 360 -78 l 1,20,-1
+ 360 36 l 1,21,-1
+ 540 36 l 1,18,-1
+120 126 m 1,22,-1
+ 120 36 l 1,23,-1
+ 206 36 l 1,24,-1
+ 206 -78 l 1,25,-1
+ 6 -78 l 1,26,-1
+ 6 126 l 1,27,-1
+ 120 126 l 1,22,-1
+120 454 m 1,28,-1
+ 120 280 l 1,29,-1
+ 6 280 l 1,30,-1
+ 6 454 l 1,31,-1
+ 120 454 l 1,28,-1
+120 788 m 1,32,-1
+ 120 609 l 1,33,-1
+ 6 609 l 1,34,-1
+ 6 788 l 1,35,-1
+ 120 788 l 1,32,-1
+1227 1142 m 1,36,-1
+ 1227 942 l 1,37,-1
+ 1112 942 l 1,38,-1
+ 1112 1028 l 1,39,-1
+ 1022 1028 l 1,40,-1
+ 1022 1142 l 1,41,-1
+ 1227 1142 l 1,36,-1
+868 1142 m 1,42,-1
+ 868 1028 l 1,43,-1
+ 694 1028 l 1,44,-1
+ 694 1142 l 1,45,-1
+ 868 1142 l 1,42,-1
+540 1142 m 1,46,-1
+ 540 1028 l 1,47,-1
+ 360 1028 l 1,48,-1
+ 360 1142 l 1,49,-1
+ 540 1142 l 1,46,-1
+206 1142 m 1,50,-1
+ 206 1028 l 1,51,-1
+ 120 1028 l 1,52,-1
+ 120 942 l 1,53,-1
+ 6 942 l 1,54,-1
+ 6 1142 l 1,55,-1
+ 206 1142 l 1,50,-1
+EndSplineSet
+EndChar
+
+StartChar: uni2756
+Encoding: 10070 10070 1388
+Width: 1233
+Flags: W
+LayerCount: 2
+Fore
+SplineSet
+44 569 m 1,0,-1
+ 303 828 l 1,1,-1
+ 563 569 l 1,2,-1
+ 303 309 l 1,3,-1
+ 44 569 l 1,0,-1
+670 569 m 1,4,-1
+ 930 827 l 1,5,-1
+ 1189 569 l 1,6,-1
+ 930 308 l 1,7,-1
+ 670 569 l 1,4,-1
+358 879 m 1,8,-1
+ 618 1140 l 1,9,-1
+ 878 879 l 1,10,-1
+ 618 620 l 1,11,-1
+ 358 879 l 1,8,-1
+358 254 m 1,12,-1
+ 618 514 l 1,13,-1
+ 878 254 l 1,14,-1
+ 618 -6 l 1,15,-1
+ 358 254 l 1,12,-1
+EndSplineSet
+EndChar
 EndChars
 EndSplineFont
--- a/lib/texinputs/isabelle.sty	Tue Nov 10 14:18:41 2015 +0000
+++ b/lib/texinputs/isabelle.sty	Tue Nov 10 14:43:29 2015 +0000
@@ -39,11 +39,6 @@
 \DeclareRobustCommand{\isactrlesup}{\egroup\egroup\endmath\egroup}
 \newcommand{\isactrlbold}[1]{{\bfseries\upshape\boldmath#1}}
 
-\def\isactrlnoindent{\noindent}
-\def\isactrlsmallskip{\smallskip}
-\def\isactrlmedskip{\medskip}
-\def\isactrlbigskip{\bigskip}
-
 \newcommand{\isaantiqcontrol}[1]{\isatt{{\char`\\}{\char`\<}{\char`\^}#1{\char`\>}}}
 \newenvironment{isaantiq}{{\isacharat\isacharbraceleft}}{{\isacharbraceright}}
 
--- a/lib/texinputs/isabellesym.sty	Tue Nov 10 14:18:41 2015 +0000
+++ b/lib/texinputs/isabellesym.sty	Tue Nov 10 14:43:29 2015 +0000
@@ -358,3 +358,4 @@
 \newcommand{\isasymclose}{\isatext{\raise.3ex\hbox{$\scriptscriptstyle\rangle$}}}
 \newcommand{\isasymhole}{\isatext{\rm\wasylozenge}}  %requires wasysym
 \newcommand{\isasymnewline}{\isatext{\fbox{$\hookleftarrow$}}}
+\newcommand{\isasymcomment}{\isatext{---}}
--- a/src/Doc/Classes/Classes.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Classes/Classes.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -457,7 +457,7 @@
   "replicate 0 _ = []"
   | "replicate (Suc n) xs = xs @ replicate n xs"
 
-interpretation %quote list_monoid: monoid append "[]" where
+interpretation %quote list_monoid: monoid append "[]" rewrites
   "monoid.pow_nat append [] = replicate"
 proof -
   interpret monoid append "[]" ..
--- a/src/Doc/Codegen/Further.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Codegen/Further.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -202,7 +202,7 @@
   The interpretation itself is enriched with an equation @{text "t = c"}:
 \<close>
 
-interpretation %quote fun_power: power "(\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n)" where
+interpretation %quote fun_power: power "(\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n)" rewrites
   "power.powers (\<lambda>n f. f ^^ n) = funpows"
   by unfold_locales
     (simp_all add: fun_eq_iff funpow_mult mult.commute funpows_def)
--- a/src/Doc/Eisbach/Manual.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Eisbach/Manual.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -14,8 +14,8 @@
   terms, facts, or other methods.
 
   \<^medskip>
-  The syntax diagram below refers to some syntactic categories that
-  are further defined in @{cite "isabelle-isar-ref"}.
+  The syntax diagram below refers to some syntactic categories that are
+  further defined in @{cite "isabelle-isar-ref"}.
 
   @{rail \<open>
     @@{command method} name args @'=' method
@@ -56,11 +56,10 @@
       by prop_solver\<^sub>1
 
 text \<open>
-  In this example, the facts \<open>impI\<close> and \<open>conjE\<close> are static. They
-  are evaluated once when the method is defined and cannot be changed later.
-  This makes the method stable in the sense of \<^emph>\<open>static scoping\<close>: naming
-  another fact \<open>impI\<close> in a later context won't affect the behaviour of
-  \<open>prop_solver\<^sub>1\<close>.
+  In this example, the facts \<open>impI\<close> and \<open>conjE\<close> are static. They are evaluated
+  once when the method is defined and cannot be changed later. This makes the
+  method stable in the sense of \<^emph>\<open>static scoping\<close>: naming another fact \<open>impI\<close>
+  in a later context won't affect the behaviour of \<open>prop_solver\<^sub>1\<close>.
 \<close>
 
 
@@ -100,11 +99,11 @@
 subsection \<open>Named theorems\<close>
 
 text \<open>
-  A \<open>named theorem\<close> is a fact whose contents are produced dynamically
-  within the current proof context. The Isar command @{command_ref
-  "named_theorems"} provides simple access to this concept: it declares a
-  dynamic fact with corresponding \<^emph>\<open>attribute\<close> for managing
-  this particular data slot in the context.
+  A \<^emph>\<open>named theorem\<close> is a fact whose contents are produced dynamically within
+  the current proof context. The Isar command @{command_ref "named_theorems"}
+  provides simple access to this concept: it declares a dynamic fact with
+  corresponding \<^emph>\<open>attribute\<close> for managing this particular data slot in the
+  context.
 \<close>
 
     named_theorems intros
@@ -112,7 +111,8 @@
 text \<open>
   So far \<open>intros\<close> refers to the empty fact. Using the Isar command
   @{command_ref "declare"} we may apply declaration attributes to the context.
-  Below we declare both \<open>conjI\<close> and \<open>impI\<close> as \<open>intros\<close>, adding them to the named theorem slot.
+  Below we declare both \<open>conjI\<close> and \<open>impI\<close> as \<open>intros\<close>, adding them to the
+  named theorem slot.
 \<close>
 
     declare conjI [intros] and impI [intros]
@@ -136,8 +136,8 @@
 text \<open>
   Often these named theorems need to be augmented on the spot, when a method
   is invoked. The @{keyword_def "declares"} keyword in the signature of
-  @{command method} adds the common method syntax \<open>method decl: facts\<close>
-  for each named theorem \<open>decl\<close>.
+  @{command method} adds the common method syntax \<open>method decl: facts\<close> for
+  each named theorem \<open>decl\<close>.
 \<close>
 
     method prop_solver\<^sub>4 declares intros elims =
@@ -170,11 +170,12 @@
 section \<open>Higher-order methods\<close>
 
 text \<open>
-  The \<^emph>\<open>structured concatenation\<close> combinator ``\<open>method\<^sub>1 ;
-  method\<^sub>2\<close>'' was introduced in Isabelle2015, motivated by development of
-  Eisbach. It is similar to ``\<open>method\<^sub>1, method\<^sub>2\<close>'', but \<open>method\<^sub>2\<close> is invoked on on \<^emph>\<open>all\<close> subgoals that have newly emerged from
-  \<open>method\<^sub>1\<close>. This is useful to handle cases where the number of
-  subgoals produced by a method is determined dynamically at run-time.
+  The \<^emph>\<open>structured concatenation\<close> combinator ``\<open>method\<^sub>1 ; method\<^sub>2\<close>'' was
+  introduced in Isabelle2015, motivated by development of Eisbach. It is
+  similar to ``\<open>method\<^sub>1, method\<^sub>2\<close>'', but \<open>method\<^sub>2\<close> is invoked on on \<^emph>\<open>all\<close>
+  subgoals that have newly emerged from \<open>method\<^sub>1\<close>. This is useful to handle
+  cases where the number of subgoals produced by a method is determined
+  dynamically at run-time.
 \<close>
 
     method conj_with uses rule =
@@ -190,18 +191,17 @@
   method combinators with prefix syntax. For example, to more usefully exploit
   Isabelle's backtracking, the explicit requirement that a method solve all
   produced subgoals is frequently useful. This can easily be written as a
-  \<^emph>\<open>higher-order method\<close> using ``\<open>;\<close>''. The @{keyword "methods"}
-  keyword denotes method parameters that are other proof methods to be invoked
-  by the method being defined.
+  \<^emph>\<open>higher-order method\<close> using ``\<open>;\<close>''. The @{keyword "methods"} keyword
+  denotes method parameters that are other proof methods to be invoked by the
+  method being defined.
 \<close>
 
     method solve methods m = (m ; fail)
 
 text \<open>
-  Given some method-argument \<open>m\<close>, \<open>solve \<open>m\<close>\<close> applies the
-  method \<open>m\<close> and then fails whenever \<open>m\<close> produces any new unsolved
-  subgoals --- i.e. when \<open>m\<close> fails to completely discharge the goal it
-  was applied to.
+  Given some method-argument \<open>m\<close>, \<open>solve \<open>m\<close>\<close> applies the method \<open>m\<close> and then
+  fails whenever \<open>m\<close> produces any new unsolved subgoals --- i.e. when \<open>m\<close>
+  fails to completely discharge the goal it was applied to.
 \<close>
 
 
@@ -222,41 +222,43 @@
         (erule notE ; solve \<open>prop_solver\<close>))+
 
 text \<open>
-  The only non-trivial part above is the final alternative \<open>(erule notE
-  ; solve \<open>prop_solver\<close>)\<close>. Here, in the case that all other alternatives
-  fail, the method takes one of the assumptions @{term "\<not> P"} of the current
-  goal and eliminates it with the rule \<open>notE\<close>, causing the goal to be
-  proved to become @{term P}. The method then recursively invokes itself on
-  the remaining goals. The job of the recursive call is to demonstrate that
-  there is a contradiction in the original assumptions (i.e.\ that @{term P}
-  can be derived from them). Note this recursive invocation is applied with
-  the @{method solve} method combinator to ensure that a contradiction will
-  indeed be shown. In the case where a contradiction cannot be found,
-  backtracking will occur and a different assumption @{term "\<not> Q"} will be
-  chosen for elimination.
+  The only non-trivial part above is the final alternative \<open>(erule notE ;
+  solve \<open>prop_solver\<close>)\<close>. Here, in the case that all other alternatives fail,
+  the method takes one of the assumptions @{term "\<not> P"} of the current goal
+  and eliminates it with the rule \<open>notE\<close>, causing the goal to be proved to
+  become @{term P}. The method then recursively invokes itself on the
+  remaining goals. The job of the recursive call is to demonstrate that there
+  is a contradiction in the original assumptions (i.e.\ that @{term P} can be
+  derived from them). Note this recursive invocation is applied with the
+  @{method solve} method combinator to ensure that a contradiction will indeed
+  be shown. In the case where a contradiction cannot be found, backtracking
+  will occur and a different assumption @{term "\<not> Q"} will be chosen for
+  elimination.
 
   Note that the recursive call to @{method prop_solver} does not have any
-  parameters passed to it. Recall that fact parameters, e.g.\ \<open>intros\<close>, \<open>elims\<close>, and \<open>subst\<close>, are managed by declarations
-  in the current proof context. They will therefore be passed to any recursive
-  call to @{method prop_solver} and, more generally, any invocation of a
-  method which declares these named theorems.
+  parameters passed to it. Recall that fact parameters, e.g.\ \<open>intros\<close>,
+  \<open>elims\<close>, and \<open>subst\<close>, are managed by declarations in the current proof
+  context. They will therefore be passed to any recursive call to @{method
+  prop_solver} and, more generally, any invocation of a method which declares
+  these named theorems.
 
   \<^medskip>
   After declaring some standard rules to the context, the @{method
   prop_solver} becomes capable of solving non-trivial propositional
-  tautologies.\<close>
+  tautologies.
+\<close>
 
     lemmas [intros] =
-      conjI  --  \<open>@{thm conjI}\<close>
-      impI  --  \<open>@{thm impI}\<close>
-      disjCI  --  \<open>@{thm disjCI}\<close>
-      iffI  --  \<open>@{thm iffI}\<close>
-      notI  --  \<open>@{thm notI}\<close>
+      conjI  \<comment>  \<open>@{thm conjI}\<close>
+      impI  \<comment>  \<open>@{thm impI}\<close>
+      disjCI  \<comment>  \<open>@{thm disjCI}\<close>
+      iffI  \<comment>  \<open>@{thm iffI}\<close>
+      notI  \<comment>  \<open>@{thm notI}\<close>
 
     lemmas [elims] =
-      impCE  --  \<open>@{thm impCE}\<close>
-      conjE  --  \<open>@{thm conjE}\<close>
-      disjE  --  \<open>@{thm disjE}\<close>
+      impCE  \<comment>  \<open>@{thm impCE}\<close>
+      conjE  \<comment>  \<open>@{thm conjE}\<close>
+      disjE  \<comment>  \<open>@{thm disjE}\<close>
 
     lemma "(A \<or> B) \<and> (A \<longrightarrow> C) \<and> (B \<longrightarrow> C) \<longrightarrow> C"
       by prop_solver
@@ -271,15 +273,15 @@
   result of backtracking. When designing more sophisticated proof methods this
   proves too restrictive and difficult to manage conceptually.
 
-  To address this, we introduce the @{method_def "match"} method, which
-  provides more direct access to the higher-order matching facility at the
-  core of Isabelle. It is implemented as a separate proof method (in
-  Isabelle/ML), and thus can be directly applied to proofs, however it is most
-  useful when applied in the context of writing Eisbach method definitions.
+  To address this, we introduce the @{method_def match} method, which provides
+  more direct access to the higher-order matching facility at the core of
+  Isabelle. It is implemented as a separate proof method (in Isabelle/ML), and
+  thus can be directly applied to proofs, however it is most useful when
+  applied in the context of writing Eisbach method definitions.
 
   \<^medskip>
-  The syntax diagram below refers to some syntactic categories that
-  are further defined in @{cite "isabelle-isar-ref"}.
+  The syntax diagram below refers to some syntactic categories that are
+  further defined in @{cite "isabelle-isar-ref"}.
 
   @{rail \<open>
     @@{method match} kind @'in' (pattern '\<Rightarrow>' cartouche + '\<bar>')
@@ -296,10 +298,10 @@
   \<close>}
 
   Matching allows methods to introspect the goal state, and to implement more
-  explicit control flow. In the basic case, a term or fact \<open>ts\<close> is given
-  to match against as a \<^emph>\<open>match target\<close>, along with a collection of
-  pattern-method pairs \<open>(p, m)\<close>: roughly speaking, when the pattern
-  \<open>p\<close> matches any member of \<open>ts\<close>, the \<^emph>\<open>inner\<close> method \<open>m\<close> will be executed.
+  explicit control flow. In the basic case, a term or fact \<open>ts\<close> is given to
+  match against as a \<^emph>\<open>match target\<close>, along with a collection of
+  pattern-method pairs \<open>(p, m)\<close>: roughly speaking, when the pattern \<open>p\<close>
+  matches any member of \<open>ts\<close>, the \<^emph>\<open>inner\<close> method \<open>m\<close> will be executed.
 \<close>
 
     lemma
@@ -310,11 +312,11 @@
         by (match X in I: "Q \<longrightarrow> P" and I': "Q" \<Rightarrow> \<open>insert mp [OF I I']\<close>)
 
 text \<open>
-  In this example we have a structured Isar proof, with the named
-  assumption \<open>X\<close> and a conclusion @{term "P"}. With the match method
-  we can find the local facts @{term "Q \<longrightarrow> P"} and @{term "Q"}, binding them to
-  separately as \<open>I\<close> and \<open>I'\<close>. We then specialize the
-  modus-ponens rule @{thm mp [of Q P]} to these facts to solve the goal.
+  In this example we have a structured Isar proof, with the named assumption
+  \<open>X\<close> and a conclusion @{term "P"}. With the match method we can find the
+  local facts @{term "Q \<longrightarrow> P"} and @{term "Q"}, binding them to separately as
+  \<open>I\<close> and \<open>I'\<close>. We then specialize the modus-ponens rule @{thm mp [of Q P]} to
+  these facts to solve the goal.
 \<close>
 
 
@@ -324,15 +326,14 @@
   In the previous example we were able to match against an assumption out of
   the Isar proof state. In general, however, proof subgoals can be
   \<^emph>\<open>unstructured\<close>, with goal parameters and premises arising from rule
-  application. To address this, @{method match} uses \<^emph>\<open>subgoal focusing\<close>
-  to produce structured goals out of
-  unstructured ones. In place of fact or term, we may give the
-  keyword @{keyword_def "premises"} as the match target. This causes a subgoal
-  focus on the first subgoal, lifting local goal parameters to fixed term
-  variables and premises into hypothetical theorems. The match is performed
-  against these theorems, naming them and binding them as appropriate.
-  Similarly giving the keyword @{keyword_def "conclusion"} matches against the
-  conclusion of the first subgoal.
+  application. To address this, @{method match} uses \<^emph>\<open>subgoal focusing\<close> to
+  produce structured goals out of unstructured ones. In place of fact or term,
+  we may give the keyword @{keyword_def "premises"} as the match target. This
+  causes a subgoal focus on the first subgoal, lifting local goal parameters
+  to fixed term variables and premises into hypothetical theorems. The match
+  is performed against these theorems, naming them and binding them as
+  appropriate. Similarly giving the keyword @{keyword_def "conclusion"}
+  matches against the conclusion of the first subgoal.
 
   An unstructured version of the previous example can then be similarly solved
   through focusing.
@@ -358,16 +359,16 @@
   now-bound @{term A} (bound to @{term P}) against the conclusion (also @{term
   P}), finally applying the specialized rule to solve the goal.
 
-  Schematic terms like \<open>?P\<close> may also be used to specify match
-  variables, but the result of the match is not bound, and thus cannot be used
-  in the inner method body.
+  Schematic terms like \<open>?P\<close> may also be used to specify match variables, but
+  the result of the match is not bound, and thus cannot be used in the inner
+  method body.
 
   \<^medskip>
-  In the following example we extract the predicate of an
-  existentially quantified conclusion in the current subgoal and search the
-  current premises for a matching fact. If both matches are successful, we
-  then instantiate the existential introduction rule with both the witness and
-  predicate, solving with the matched premise.
+  In the following example we extract the predicate of an existentially
+  quantified conclusion in the current subgoal and search the current premises
+  for a matching fact. If both matches are successful, we then instantiate the
+  existential introduction rule with both the witness and predicate, solving
+  with the matched premise.
 \<close>
 
     method solve_ex =
@@ -378,15 +379,14 @@
 text \<open>
   The first @{method match} matches the pattern @{term "\<exists>x. Q x"} against the
   current conclusion, binding the term @{term "Q"} in the inner match. Next
-  the pattern \<open>Q y\<close> is matched against all premises of the current
-  subgoal. In this case @{term "Q"} is fixed and @{term "y"} may be
-  instantiated. Once a match is found, the local fact \<open>U\<close> is bound to
-  the matching premise and the variable @{term "y"} is bound to the matching
-  witness. The existential introduction rule \<open>exI:\<close>~@{thm exI} is then
-  instantiated with @{term "y"} as the witness and @{term "Q"} as the
-  predicate, with its proof obligation solved by the local fact U (using the
-  Isar attribute @{attribute OF}). The following example is a trivial use of
-  this method.
+  the pattern \<open>Q y\<close> is matched against all premises of the current subgoal. In
+  this case @{term "Q"} is fixed and @{term "y"} may be instantiated. Once a
+  match is found, the local fact \<open>U\<close> is bound to the matching premise and the
+  variable @{term "y"} is bound to the matching witness. The existential
+  introduction rule \<open>exI:\<close>~@{thm exI} is then instantiated with @{term "y"} as
+  the witness and @{term "Q"} as the predicate, with its proof obligation
+  solved by the local fact U (using the Isar attribute @{attribute OF}). The
+  following example is a trivial use of this method.
 \<close>
 
     lemma "halts p \<Longrightarrow> \<exists>x. halts x"
@@ -419,13 +419,12 @@
   with a universal quantifier in the premises that matches the type of @{term
   y}. Since @{keyword "premises"} causes a focus, however, there are no
   subgoal premises to be found and thus @{method my_allE_bad} will always
-  fail. If focusing instead left the premises in place, using methods
-  like @{method erule} would lead to unintended behaviour, specifically during
+  fail. If focusing instead left the premises in place, using methods like
+  @{method erule} would lead to unintended behaviour, specifically during
   backtracking. In our example, @{method erule} could choose an alternate
-  premise while backtracking, while leaving \<open>I\<close> bound to the original
-  match. In the case of more complex inner methods, where either \<open>I\<close> or
-  bound terms are used, this would almost certainly not be the intended
-  behaviour.
+  premise while backtracking, while leaving \<open>I\<close> bound to the original match.
+  In the case of more complex inner methods, where either \<open>I\<close> or bound terms
+  are used, this would almost certainly not be the intended behaviour.
 
   An alternative implementation would be to specialize the elimination rule to
   the bound term and apply it directly.
@@ -444,13 +443,13 @@
   premise, it is not likely the intended behaviour. Repeated application of
   this method will produce an infinite stream of duplicate specialized
   premises, due to the original premise never being removed. To address this,
-  matched premises may be declared with the @{attribute "thin"} attribute.
-  This will hide the premise from subsequent inner matches, and remove it from
-  the list of premises when the inner method has finished and the subgoal is
+  matched premises may be declared with the @{attribute thin} attribute. This
+  will hide the premise from subsequent inner matches, and remove it from the
+  list of premises when the inner method has finished and the subgoal is
   unfocused. It can be considered analogous to the existing \<open>thin_tac\<close>.
 
-  To complete our example, the correct implementation of the method
-  will @{attribute "thin"} the premise from the match and then apply it to the
+  To complete our example, the correct implementation of the method will
+  @{attribute thin} the premise from the match and then apply it to the
   specialized elimination rule.\<close>
 
     method my_allE for y :: 'a =
@@ -460,13 +459,14 @@
     lemma "\<forall>x. P x \<Longrightarrow> \<forall>x. Q x \<Longrightarrow> P y \<and> Q y"
       by (my_allE y)+ (rule conjI)
 
+
 subsubsection \<open>Inner focusing\<close>
 
 text \<open>
-  Premises are \<^emph>\<open>accumulated\<close> for the purposes of subgoal focusing.
-  In contrast to using standard methods like @{method frule} within
-  focused match, another @{method match} will have access to all the premises
-  of the outer focus.
+  Premises are \<^emph>\<open>accumulated\<close> for the purposes of subgoal focusing. In
+  contrast to using standard methods like @{method frule} within focused
+  match, another @{method match} will have access to all the premises of the
+  outer focus.
 \<close>
 
     lemma "A \<Longrightarrow> B \<Longrightarrow> A \<and> B"
@@ -475,25 +475,23 @@
 
 text \<open>
   In this example, the inner @{method match} can find the focused premise
-  @{term B}. In contrast, the @{method assumption} method would fail here
-  due to @{term B} not being logically accessible.
+  @{term B}. In contrast, the @{method assumption} method would fail here due
+  to @{term B} not being logically accessible.
 \<close>
 
-    lemma
-    "A \<Longrightarrow> A \<and> (B \<longrightarrow> B)"
+    lemma "A \<Longrightarrow> A \<and> (B \<longrightarrow> B)"
       by (match premises in H: A \<Rightarrow> \<open>intro conjI, rule H, rule impI,
             match premises (local) in A \<Rightarrow> \<open>fail\<close>
                                  \<bar> H': B \<Rightarrow> \<open>rule H'\<close>\<close>)
 
 text \<open>
-  In this example, the only premise that exists in the first focus is
-  @{term "A"}. Prior to the inner match, the rule \<open>impI\<close> changes
-  the goal @{term "B \<longrightarrow> B"} into @{term "B \<Longrightarrow> B"}. A standard premise
-  match would also include @{term A} as an original premise of the outer
-  match. The \<open>local\<close> argument limits the match to
-  newly focused premises.
+  In this example, the only premise that exists in the first focus is @{term
+  "A"}. Prior to the inner match, the rule \<open>impI\<close> changes the goal @{term "B \<longrightarrow>
+  B"} into @{term "B \<Longrightarrow> B"}. A standard premise match would also include @{term
+  A} as an original premise of the outer match. The \<open>local\<close> argument limits
+  the match to newly focused premises.
+\<close>
 
-\<close>
 
 section \<open>Attributes\<close>
 
@@ -547,8 +545,7 @@
 text \<open>
   The @{attribute of} attribute behaves similarly. It is worth noting,
   however, that the positional instantiation of @{attribute of} occurs against
-  the position of the variables as they are declared \<^emph>\<open>in the match
-  pattern\<close>.
+  the position of the variables as they are declared \<^emph>\<open>in the match pattern\<close>.
 \<close>
 
     lemma
@@ -559,15 +556,16 @@
             \<open>rule I [of x y]\<close>)
 
 text \<open>
-  In this example, the order of schematics in \<open>asm\<close> is actually \<open>?y ?x\<close>, but we instantiate our matched rule in the opposite order. This is
-  because the effective rule @{term I} was bound from the match, which
-  declared the @{typ 'a} slot first and the @{typ 'b} slot second.
+  In this example, the order of schematics in \<open>asm\<close> is actually \<open>?y ?x\<close>, but
+  we instantiate our matched rule in the opposite order. This is because the
+  effective rule @{term I} was bound from the match, which declared the @{typ
+  'a} slot first and the @{typ 'b} slot second.
 
   To get the dynamic behaviour of @{attribute of} we can choose to invoke it
-  \<^emph>\<open>unchecked\<close>. This avoids trying to do any type inference for the
-  provided parameters, instead storing them as their most general type and
-  doing type matching at run-time. This, like @{attribute OF}, will throw
-  errors if the expected slots don't exist or there is a type mismatch.
+  \<^emph>\<open>unchecked\<close>. This avoids trying to do any type inference for the provided
+  parameters, instead storing them as their most general type and doing type
+  matching at run-time. This, like @{attribute OF}, will throw errors if the
+  expected slots don't exist or there is a type mismatch.
 \<close>
 
     lemma
@@ -587,11 +585,11 @@
             \<open>prop_solver\<close>)
 
 text \<open>
-  In this example, the pattern \<open>\<And>x :: 'a. ?P x \<Longrightarrow> ?Q x\<close> matches against
-  the only premise, giving an appropriately typed slot for @{term y}. After
-  the match, the resulting rule is instantiated to @{term y} and then declared
-  as an @{attribute intros} rule. This is then picked up by @{method
-  prop_solver} to solve the goal.
+  In this example, the pattern \<open>\<And>x :: 'a. ?P x \<Longrightarrow> ?Q x\<close> matches against the
+  only premise, giving an appropriately typed slot for @{term y}. After the
+  match, the resulting rule is instantiated to @{term y} and then declared as
+  an @{attribute intros} rule. This is then picked up by @{method prop_solver}
+  to solve the goal.
 \<close>
 
 
@@ -600,8 +598,9 @@
 text \<open>
   In all previous examples, @{method match} was only ever searching for a
   single rule or premise. Each local fact would therefore always have a length
-  of exactly one. We may, however, wish to find \<^emph>\<open>all\<close> matching results.
-  To achieve this, we can simply mark a given pattern with the \<open>(multi)\<close> argument.
+  of exactly one. We may, however, wish to find \<^emph>\<open>all\<close> matching results. To
+  achieve this, we can simply mark a given pattern with the \<open>(multi)\<close>
+  argument.
 \<close>
 
     lemma
@@ -612,21 +611,21 @@
       done
 
 text \<open>
-  In the first @{method match}, without the \<open>(multi)\<close> argument, @{term
-  I} is only ever be bound to one of the members of \<open>asms\<close>. This
-  backtracks over both possibilities (see next section), however neither
-  assumption in isolation is sufficient to solve to goal. The use of the
-  @{method solves} combinator ensures that @{method prop_solver} has no effect
-  on the goal when it doesn't solve it, and so the first match leaves the goal
-  unchanged. In the second @{method match}, \<open>I\<close> is bound to all of
-  \<open>asms\<close>, declaring both results as \<open>intros\<close>. With these rules
-  @{method prop_solver} is capable of solving the goal.
+  In the first @{method match}, without the \<open>(multi)\<close> argument, @{term I} is
+  only ever be bound to one of the members of \<open>asms\<close>. This backtracks over
+  both possibilities (see next section), however neither assumption in
+  isolation is sufficient to solve to goal. The use of the @{method solves}
+  combinator ensures that @{method prop_solver} has no effect on the goal when
+  it doesn't solve it, and so the first match leaves the goal unchanged. In
+  the second @{method match}, \<open>I\<close> is bound to all of \<open>asms\<close>, declaring both
+  results as \<open>intros\<close>. With these rules @{method prop_solver} is capable of
+  solving the goal.
 
   Using for-fixed variables in patterns imposes additional constraints on the
-  results. In all previous examples, the choice of using \<open>?P\<close> or a
-  for-fixed @{term P} only depended on whether or not @{term P} was mentioned
-  in another pattern or the inner method. When using a multi-match, however,
-  all for-fixed terms must agree in the results.
+  results. In all previous examples, the choice of using \<open>?P\<close> or a for-fixed
+  @{term P} only depended on whether or not @{term P} was mentioned in another
+  pattern or the inner method. When using a multi-match, however, all
+  for-fixed terms must agree in the results.
 \<close>
 
     lemma
@@ -641,11 +640,11 @@
 text \<open>
   Here we have two seemingly-equivalent applications of @{method match},
   however only the second one is capable of solving the goal. The first
-  @{method match} selects the first and third members of \<open>asms\<close> (those
-  that agree on their conclusion), which is not sufficient. The second
-  @{method match} selects the first and second members of \<open>asms\<close> (those
-  that agree on their assumption), which is enough for @{method prop_solver}
-  to solve the goal.
+  @{method match} selects the first and third members of \<open>asms\<close> (those that
+  agree on their conclusion), which is not sufficient. The second @{method
+  match} selects the first and second members of \<open>asms\<close> (those that agree on
+  their assumption), which is enough for @{method prop_solver} to solve the
+  goal.
 \<close>
 
 
@@ -655,10 +654,10 @@
   Dummy patterns may be given as placeholders for unique schematics in
   patterns. They implicitly receive all currently bound variables as
   arguments, and are coerced into the @{typ prop} type whenever possible. For
-  example, the trivial dummy pattern \<open>_\<close> will match any proposition.
-  In contrast, by default the pattern \<open>?P\<close> is considered to have type
-  @{typ bool}. It will not bind anything with meta-logical connectives (e.g.
-  \<open>_ \<Longrightarrow> _\<close> or \<open>_ &&& _\<close>).
+  example, the trivial dummy pattern \<open>_\<close> will match any proposition. In
+  contrast, by default the pattern \<open>?P\<close> is considered to have type @{typ
+  bool}. It will not bind anything with meta-logical connectives (e.g. \<open>_ \<Longrightarrow> _\<close>
+  or \<open>_ &&& _\<close>).
 \<close>
 
     lemma
@@ -670,19 +669,19 @@
 section \<open>Backtracking\<close>
 
 text \<open>
-  Patterns are considered top-down, executing the inner method \<open>m\<close> of
-  the first pattern which is satisfied by the current match target. By
-  default, matching performs extensive backtracking by attempting all valid
-  variable and fact bindings according to the given pattern. In particular,
-  all unifiers for a given pattern will be explored, as well as each matching
+  Patterns are considered top-down, executing the inner method \<open>m\<close> of the
+  first pattern which is satisfied by the current match target. By default,
+  matching performs extensive backtracking by attempting all valid variable
+  and fact bindings according to the given pattern. In particular, all
+  unifiers for a given pattern will be explored, as well as each matching
   fact. The inner method \<open>m\<close> will be re-executed for each different
   variable/fact binding during backtracking. A successful match is considered
   a cut-point for backtracking. Specifically, once a match is made no other
   pattern-method pairs will be considered.
 
-  The method \<open>foo\<close> below fails for all goals that are conjunctions. Any
-  such goal will match the first pattern, causing the second pattern (that
-  would otherwise match all goals) to never be considered.
+  The method \<open>foo\<close> below fails for all goals that are conjunctions. Any such
+  goal will match the first pattern, causing the second pattern (that would
+  otherwise match all goals) to never be considered.
 \<close>
 
     method foo =
@@ -690,12 +689,12 @@
 
 text \<open>
   The failure of an inner method that is executed after a successful match
-  will cause the entire match to fail. This distinction is important
-  due to the pervasive use of backtracking. When a method is used in a
-  combinator chain, its failure
-  becomes significant because it signals previously applied methods to move to
-  the next result. Therefore, it is necessary for @{method match} to not mask
-  such failure. One can always rewrite a match using the combinators ``\<open>?\<close>'' and ``\<open>|\<close>'' to try subsequent patterns in the case of an
+  will cause the entire match to fail. This distinction is important due to
+  the pervasive use of backtracking. When a method is used in a combinator
+  chain, its failure becomes significant because it signals previously applied
+  methods to move to the next result. Therefore, it is necessary for @{method
+  match} to not mask such failure. One can always rewrite a match using the
+  combinators ``\<open>?\<close>'' and ``\<open>|\<close>'' to try subsequent patterns in the case of an
   inner-method failure. The following proof method, for example, always
   invokes @{method prop_solver} for all goals because its first alternative
   either never matches or (if it does match) always fails.
@@ -710,8 +709,8 @@
 
 text \<open>
   Backtracking may be controlled more precisely by marking individual patterns
-  as \<open>cut\<close>. This causes backtracking to not progress beyond this pattern:
-  once a match is found no others will be considered.
+  as \<open>cut\<close>. This causes backtracking to not progress beyond this pattern: once
+  a match is found no others will be considered.
 \<close>
 
     method foo\<^sub>2 =
@@ -722,10 +721,10 @@
   In this example, once a conjunction is found (@{term "P \<and> Q"}), all possible
   implications of @{term "P"} in the premises are considered, evaluating the
   inner @{method rule} with each consequent. No other conjunctions will be
-  considered, with method failure occurring once all implications of the
-  form \<open>P \<longrightarrow> ?U\<close> have been explored. Here the left-right processing of
-  individual patterns is important, as all patterns after of the cut will
-  maintain their usual backtracking behaviour.
+  considered, with method failure occurring once all implications of the form
+  \<open>P \<longrightarrow> ?U\<close> have been explored. Here the left-right processing of individual
+  patterns is important, as all patterns after of the cut will maintain their
+  usual backtracking behaviour.
 \<close>
 
     lemma "A \<and> B \<Longrightarrow> A \<longrightarrow> D \<Longrightarrow> A \<longrightarrow> C \<Longrightarrow> C"
@@ -735,16 +734,16 @@
       by (foo\<^sub>2 | prop_solver)
 
 text \<open>
-  In this example, the first lemma is solved by \<open>foo\<^sub>2\<close>, by first
-  picking @{term "A \<longrightarrow> D"} for \<open>I'\<close>, then backtracking and ultimately
-  succeeding after picking @{term "A \<longrightarrow> C"}. In the second lemma, however,
-  @{term "C \<and> D"} is matched first, the second pattern in the match cannot be
-  found and so the method fails, falling through to @{method prop_solver}.
+  In this example, the first lemma is solved by \<open>foo\<^sub>2\<close>, by first picking
+  @{term "A \<longrightarrow> D"} for \<open>I'\<close>, then backtracking and ultimately succeeding after
+  picking @{term "A \<longrightarrow> C"}. In the second lemma, however, @{term "C \<and> D"} is
+  matched first, the second pattern in the match cannot be found and so the
+  method fails, falling through to @{method prop_solver}.
 
-  More precise control is also possible by giving a positive
-  number \<open>n\<close> as an argument to \<open>cut\<close>. This will limit the number
-  of backtracking results of that match to be at most \<open>n\<close>.
-  The match argument \<open>(cut 1)\<close> is the same as simply \<open>(cut)\<close>.
+  More precise control is also possible by giving a positive number \<open>n\<close> as an
+  argument to \<open>cut\<close>. This will limit the number of backtracking results of
+  that match to be at most \<open>n\<close>. The match argument \<open>(cut 1)\<close> is the same as
+  simply \<open>(cut)\<close>.
 \<close>
 
 
@@ -769,15 +768,16 @@
 
 text \<open>
   Intuitively it seems like this proof should fail to check. The first match
-  result, which binds @{term I} to the first two members of \<open>asms\<close>,
-  fails the second inner match due to binding @{term P} to @{term A}.
-  Backtracking then attempts to bind @{term I} to the third member of \<open>asms\<close>. This passes all inner matches, but fails when @{method rule} cannot
-  successfully apply this to the current goal. After this, a valid match that
-  is produced by the unifier is one which binds @{term P} to simply \<open>\<lambda>a. A ?x\<close>. The first inner match succeeds because \<open>\<lambda>a. A ?x\<close> does
-  not match @{term A}. The next inner match succeeds because @{term I} has
-  only been bound to the first member of \<open>asms\<close>. This is due to @{method
-  match} considering \<open>\<lambda>a. A ?x\<close> and \<open>\<lambda>a. A ?y\<close> as distinct
-  terms.
+  result, which binds @{term I} to the first two members of \<open>asms\<close>, fails the
+  second inner match due to binding @{term P} to @{term A}. Backtracking then
+  attempts to bind @{term I} to the third member of \<open>asms\<close>. This passes all
+  inner matches, but fails when @{method rule} cannot successfully apply this
+  to the current goal. After this, a valid match that is produced by the
+  unifier is one which binds @{term P} to simply \<open>\<lambda>a. A ?x\<close>. The first inner
+  match succeeds because \<open>\<lambda>a. A ?x\<close> does not match @{term A}. The next inner
+  match succeeds because @{term I} has only been bound to the first member of
+  \<open>asms\<close>. This is due to @{method match} considering \<open>\<lambda>a. A ?x\<close> and \<open>\<lambda>a. A ?y\<close>
+  as distinct terms.
 
   The simplest way to address this is to explicitly disallow term bindings
   which we would consider invalid.
@@ -797,8 +797,8 @@
 text \<open>
   The @{method match} method is not aware of the logical content of match
   targets. Each pattern is simply matched against the shallow structure of a
-  fact or term. Most facts are in \<^emph>\<open>normal form\<close>, which curries premises
-  via meta-implication \<open>_ \<Longrightarrow> _\<close>.
+  fact or term. Most facts are in \<^emph>\<open>normal form\<close>, which curries premises via
+  meta-implication \<open>_ \<Longrightarrow> _\<close>.
 \<close>
 
     lemma
@@ -821,17 +821,17 @@
 text \<open>
   This proof will fail to solve the goal. Our match pattern will only match
   rules which have a single premise, and conclusion @{term C}, so the first
-  member of \<open>asms\<close> is not bound and thus the proof fails. Matching a
-  pattern of the form @{term "P \<Longrightarrow> Q"} against this fact will bind @{term "P"}
-  to @{term "A"} and @{term Q} to @{term "B \<Longrightarrow> C"}. Our pattern, with a
-  concrete @{term "C"} in the conclusion, will fail to match this fact.
+  member of \<open>asms\<close> is not bound and thus the proof fails. Matching a pattern
+  of the form @{term "P \<Longrightarrow> Q"} against this fact will bind @{term "P"} to
+  @{term "A"} and @{term Q} to @{term "B \<Longrightarrow> C"}. Our pattern, with a concrete
+  @{term "C"} in the conclusion, will fail to match this fact.
 
-  To express our desired match, we may \<^emph>\<open>uncurry\<close> our rules before
-  matching against them. This forms a meta-conjunction of all premises in a
-  fact, so that only one implication remains. For example the uncurried
-  version of @{term "A \<Longrightarrow> B \<Longrightarrow> C"} is @{term "A &&& B \<Longrightarrow> C"}. This will now match
-  our desired pattern \<open>_ \<Longrightarrow> C\<close>, and can be \<^emph>\<open>curried\<close> after the
-  match to put it back into normal form.
+  To express our desired match, we may \<^emph>\<open>uncurry\<close> our rules before matching
+  against them. This forms a meta-conjunction of all premises in a fact, so
+  that only one implication remains. For example the uncurried version of
+  @{term "A \<Longrightarrow> B \<Longrightarrow> C"} is @{term "A &&& B \<Longrightarrow> C"}. This will now match our
+  desired pattern \<open>_ \<Longrightarrow> C\<close>, and can be \<^emph>\<open>curried\<close> after the match to put it
+  back into normal form.
 \<close>
 
     lemma
@@ -858,12 +858,12 @@
       done
 
 text \<open>
-  In the first @{method match} we attempt to find a member of \<open>asms\<close>
-  which matches our goal precisely. This fails due to no such member existing.
-  The second match reverses the role of the fact in the match, by first giving
-  a general pattern @{term P}. This bound pattern is then matched against
-  @{term "A y"}. In this case, @{term P} is bound to \<open>A ?x\<close> and so it
-  successfully matches.
+  In the first @{method match} we attempt to find a member of \<open>asms\<close> which
+  matches our goal precisely. This fails due to no such member existing. The
+  second match reverses the role of the fact in the match, by first giving a
+  general pattern @{term P}. This bound pattern is then matched against @{term
+  "A y"}. In this case, @{term P} is bound to \<open>A ?x\<close> and so it successfully
+  matches.
 \<close>
 
 
@@ -883,9 +883,10 @@
           \<open>match (y) in "y :: 'b" for y \<Rightarrow> \<open>rule H [where z = y]\<close>\<close>)
 
 text \<open>
-  In this example the type \<open>'b\<close> is matched to \<open>'a\<close>, however
-  statically they are formally distinct types. The first match binds \<open>'b\<close> while the inner match serves to coerce @{term y} into having the type
-  \<open>'b\<close>. This allows the rule instantiation to successfully apply.
+  In this example the type \<open>'b\<close> is matched to \<open>'a\<close>, however statically they
+  are formally distinct types. The first match binds \<open>'b\<close> while the inner
+  match serves to coerce @{term y} into having the type \<open>'b\<close>. This allows the
+  rule instantiation to successfully apply.
 \<close>
 
 
@@ -922,10 +923,10 @@
 
 text \<open>
   A custom rule attribute is a simple way to extend the functionality of
-  Eisbach methods. The dummy rule attribute notation (\<open>[[ _ ]]\<close>)
-  invokes the given attribute against a dummy fact and evaluates to the result
-  of that attribute. When used as a match target, this can serve as an
-  effective auxiliary function.
+  Eisbach methods. The dummy rule attribute notation (\<open>[[ _ ]]\<close>) invokes the
+  given attribute against a dummy fact and evaluates to the result of that
+  attribute. When used as a match target, this can serve as an effective
+  auxiliary function.
 \<close>
 
     attribute_setup get_split_rule =
--- a/src/Doc/Eisbach/Preface.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Eisbach/Preface.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,10 +5,10 @@
 begin
 
 text \<open>
-  \<^emph>\<open>Eisbach\<close> is a collection of tools which form the basis for defining
-  new proof methods in Isabelle/Isar~@{cite "Wenzel-PhD"}. It can be thought
-  of as a ``proof method language'', but is more precisely an infrastructure
-  for defining new proof methods out of existing ones.
+  \<^emph>\<open>Eisbach\<close> is a collection of tools which form the basis for defining new
+  proof methods in Isabelle/Isar~@{cite "Wenzel-PhD"}. It can be thought of as
+  a ``proof method language'', but is more precisely an infrastructure for
+  defining new proof methods out of existing ones.
 
   The core functionality of Eisbach is provided by the Isar @{command method}
   command. Here users may define new methods by combining existing ones with
@@ -27,8 +27,8 @@
   high barrier-to-entry for many users.
 
   \<^medskip>
-  This manual is written for users familiar with Isabelle/Isar, but
-  not necessarily Isabelle/ML. It covers the usage of the @{command method} as
+  This manual is written for users familiar with Isabelle/Isar, but not
+  necessarily Isabelle/ML. It covers the usage of the @{command method} as
   well as the @{method match} method, as well as discussing their integration
   with existing Isar concepts such as @{command named_theorems}.
 \<close>
--- a/src/Doc/Implementation/Isar.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/Isar.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -183,10 +183,10 @@
   method space, e.g.\ @{method rule_tac}.
 
   \<^item> A non-trivial method always needs to make progress: an
-  identical follow-up goal state has to be avoided.\footnote{This
+  identical follow-up goal state has to be avoided.\<^footnote>\<open>This
   enables the user to write method expressions like \<open>meth\<^sup>+\<close>
   without looping, while the trivial do-nothing case can be recovered
-  via \<open>meth\<^sup>?\<close>.}
+  via \<open>meth\<^sup>?\<close>.\<close>
 
   Exception: trivial stuttering steps, such as ``@{method -}'' or
   @{method succeed}.
@@ -275,11 +275,11 @@
   When implementing proof methods, it is advisable to study existing
   implementations carefully and imitate the typical ``boiler plate''
   for context-sensitive parsing and further combinators to wrap-up
-  tactic expressions as methods.\footnote{Aliases or abbreviations of
+  tactic expressions as methods.\<^footnote>\<open>Aliases or abbreviations of
   the standard method combinators should be avoided.  Note that from
   Isabelle99 until Isabelle2009 the system did provide various odd
   combinations of method syntax wrappers that made applications more
-  complicated than necessary.}
+  complicated than necessary.\<close>
 \<close>
 
 text %mlref \<open>
--- a/src/Doc/Implementation/Logic.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/Logic.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -19,11 +19,11 @@
   Derivations are relative to a logical theory, which declares type
   constructors, constants, and axioms.  Theory declarations support
   schematic polymorphism, which is strictly speaking outside the
-  logic.\footnote{This is the deeper logical reason, why the theory
+  logic.\<^footnote>\<open>This is the deeper logical reason, why the theory
   context \<open>\<Theta>\<close> is separate from the proof context \<open>\<Gamma>\<close>
   of the core calculus: type constructors, term constants, and facts
   (proof constants) may involve arbitrary type schemes, but the type
-  of a locally fixed term parameter is also fixed!}
+  of a locally fixed term parameter is also fixed!\<close>
 \<close>
 
 
@@ -531,9 +531,9 @@
   the simple syntactic types of Pure are always inhabitable.
   ``Assumptions'' \<open>x :: \<tau>\<close> for type-membership are only
   present as long as some \<open>x\<^sub>\<tau>\<close> occurs in the statement
-  body.\footnote{This is the key difference to ``\<open>\<lambda>HOL\<close>'' in
+  body.\<^footnote>\<open>This is the key difference to ``\<open>\<lambda>HOL\<close>'' in
   the PTS framework @{cite "Barendregt-Geuvers:2001"}, where hypotheses
-  \<open>x : A\<close> are treated uniformly for propositions and types.}
+  \<open>x : A\<close> are treated uniformly for propositions and types.\<close>
 
   \<^medskip>
   The axiomatization of a theory is implicitly closed by
--- a/src/Doc/Implementation/ML.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/ML.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -23,11 +23,11 @@
   first-hand explanations should help to understand how proper
   Isabelle/ML is to be read and written, and to get access to the
   wealth of experience that is expressed in the source text and its
-  history of changes.\footnote{See
+  history of changes.\<^footnote>\<open>See
   @{url "http://isabelle.in.tum.de/repos/isabelle"} for the full
   Mercurial history.  There are symbolic tags to refer to official
   Isabelle releases, as opposed to arbitrary \<^emph>\<open>tip\<close> versions that
-  merely reflect snapshots that are never really up-to-date.}\<close>
+  merely reflect snapshots that are never really up-to-date.\<close>\<close>
 
 
 section \<open>Style and orthography\<close>
@@ -37,10 +37,10 @@
   to tell an informed reader what is really going on and how things
   really work.  This is a non-trivial aim, but it is supported by a
   certain style of writing Isabelle/ML that has emerged from long
-  years of system development.\footnote{See also the interesting style
+  years of system development.\<^footnote>\<open>See also the interesting style
   guide for OCaml
   @{url "http://caml.inria.fr/resources/doc/guides/guidelines.en.html"}
-  which shares many of our means and ends.}
+  which shares many of our means and ends.\<close>
 
   The main principle behind any coding style is \<^emph>\<open>consistency\<close>.
   For a single author of a small program this merely means ``choose
@@ -123,10 +123,10 @@
   For historical reasons, many capitalized names omit underscores,
   e.g.\ old-style @{ML_text FooBar} instead of @{ML_text Foo_Bar}.
   Genuine mixed-case names are \<^emph>\<open>not\<close> used, because clear division
-  of words is essential for readability.\footnote{Camel-case was
+  of words is essential for readability.\<^footnote>\<open>Camel-case was
   invented to workaround the lack of underscore in some early
   non-ASCII character sets.  Later it became habitual in some language
-  communities that are now strong in numbers.}
+  communities that are now strong in numbers.\<close>
 
   A single (capital) character does not count as ``word'' in this
   respect: some Isabelle/ML names are suffixed by extra markers like
@@ -279,10 +279,10 @@
 
 paragraph \<open>Line length\<close>
 text \<open>is limited to 80 characters according to ancient standards, but we allow
-  as much as 100 characters (not more).\footnote{Readability requires to keep
+  as much as 100 characters (not more).\<^footnote>\<open>Readability requires to keep
   the beginning of a line in view while watching its end. Modern wide-screen
   displays do not change the way how the human brain works. Sources also need
-  to be printable on plain paper with reasonable font-size.} The extra 20
+  to be printable on plain paper with reasonable font-size.\<close> The extra 20
   characters acknowledge the space requirements due to qualified library
   references in Isabelle/ML.\<close>
 
@@ -327,11 +327,11 @@
 \<close>
 
 paragraph \<open>Indentation\<close>
-text \<open>uses plain spaces, never hard tabulators.\footnote{Tabulators were
+text \<open>uses plain spaces, never hard tabulators.\<^footnote>\<open>Tabulators were
   invented to move the carriage of a type-writer to certain predefined
   positions. In software they could be used as a primitive run-length
   compression of consecutive spaces, but the precise result would depend on
-  non-standardized text editor configuration.}
+  non-standardized text editor configuration.\<close>
 
   Each level of nesting is indented by 2 spaces, sometimes 1, very
   rarely 4, never 8 or any other odd number.
@@ -562,10 +562,10 @@
   Removing the above ML declaration from the source text will remove any trace
   of this definition, as expected. The Isabelle/ML toplevel environment is
   managed in a \<^emph>\<open>stateless\<close> way: in contrast to the raw ML toplevel, there
-  are no global side-effects involved here.\footnote{Such a stateless
+  are no global side-effects involved here.\<^footnote>\<open>Such a stateless
   compilation environment is also a prerequisite for robust parallel
   compilation within independent nodes of the implicit theory development
-  graph.}
+  graph.\<close>
 
   \<^medskip>
   The next example shows how to embed ML into Isar proofs, using
@@ -578,7 +578,7 @@
   ML_prf %"ML" \<open>val a = 1\<close>
   {
     ML_prf %"ML" \<open>val b = a + 1\<close>
-  } -- \<open>Isar block structure ignored by ML environment\<close>
+  } \<comment> \<open>Isar block structure ignored by ML environment\<close>
   ML_prf %"ML" \<open>val c = b + 1\<close>
 end
 
@@ -739,9 +739,9 @@
   type \<open>\<tau>\<close> is represented by the iterated function space
   \<open>\<tau>\<^sub>1 \<rightarrow> \<dots> \<rightarrow> \<tau>\<^sub>n \<rightarrow> \<tau>\<close>.  This is isomorphic to the well-known
   encoding via tuples \<open>\<tau>\<^sub>1 \<times> \<dots> \<times> \<tau>\<^sub>n \<rightarrow> \<tau>\<close>, but the curried
-  version fits more smoothly into the basic calculus.\footnote{The
+  version fits more smoothly into the basic calculus.\<^footnote>\<open>The
   difference is even more significant in HOL, because the redundant
-  tuple structure needs to be accommodated extraneous proof steps.}
+  tuple structure needs to be accommodated extraneous proof steps.\<close>
 
   Currying gives some flexibility due to \<^emph>\<open>partial application\<close>.  A
   function \<open>f: \<tau>\<^sub>1 \<rightarrow> \<tau>\<^sub>2 \<rightarrow> \<tau>\<close> can be applied to \<open>x: \<tau>\<^sub>1\<close>
@@ -1282,9 +1282,9 @@
   \<^descr> @{ML "Symbol.explode"}~\<open>str\<close> produces a symbol list
   from the packed form.  This function supersedes @{ML
   "String.explode"} for virtually all purposes of manipulating text in
-  Isabelle!\footnote{The runtime overhead for exploded strings is
+  Isabelle!\<^footnote>\<open>The runtime overhead for exploded strings is
   mainly that of the list structure: individual symbols that happen to
-  be a singleton string do not require extra memory in Poly/ML.}
+  be a singleton string do not require extra memory in Poly/ML.\<close>
 
   \<^descr> @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
   "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
@@ -1396,8 +1396,8 @@
 
   \<^descr> Type @{ML_type int} represents regular mathematical integers, which
   are \<^emph>\<open>unbounded\<close>. Overflow is treated properly, but should never happen
-  in practice.\footnote{The size limit for integer bit patterns in memory is
-  64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.} This works
+  in practice.\<^footnote>\<open>The size limit for integer bit patterns in memory is
+  64\,MB for 32-bit Poly/ML, and much higher for 64-bit systems.\<close> This works
   uniformly for all supported ML platforms (Poly/ML and SML/NJ).
 
   Literal integers in ML text are forced to be of this one true
@@ -1614,13 +1614,13 @@
   sub-components with explicit communication, general asynchronous
   interaction etc.  Moreover, parallel evaluation is a prerequisite to
   make adequate use of the CPU resources that are available on
-  multi-core systems.\footnote{Multi-core computing does not mean that
+  multi-core systems.\<^footnote>\<open>Multi-core computing does not mean that
   there are ``spare cycles'' to be wasted.  It means that the
   continued exponential speedup of CPU performance due to ``Moore's
   Law'' follows different rules: clock frequency has reached its peak
   around 2005, and applications need to be parallelized in order to
   avoid a perceived loss of performance.  See also
-  @{cite "Sutter:2005"}.}
+  @{cite "Sutter:2005"}.\<close>
 
   Isabelle/Isar exploits the inherent structure of theories and proofs to
   support \<^emph>\<open>implicit parallelism\<close> to a large extent. LCF-style theorem
@@ -1671,8 +1671,8 @@
 
   \<^item> Global references (or arrays), i.e.\ mutable memory cells that
   persist over several invocations of associated
-  operations.\footnote{This is independent of the visibility of such
-  mutable values in the toplevel scope.}
+  operations.\<^footnote>\<open>This is independent of the visibility of such
+  mutable values in the toplevel scope.\<close>
 
   \<^item> Global state of the running Isabelle/ML process, i.e.\ raw I/O
   channels, environment variables, current working directory.
--- a/src/Doc/Implementation/Prelim.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/Prelim.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -471,17 +471,17 @@
 begin
 
 declare [[show_types = false]]
-  -- \<open>declaration within (local) theory context\<close>
+  \<comment> \<open>declaration within (local) theory context\<close>
 
 notepad
 begin
   note [[show_types = true]]
-    -- \<open>declaration within proof (forward mode)\<close>
+    \<comment> \<open>declaration within proof (forward mode)\<close>
   term x
 
   have "x = x"
     using [[show_types = false]]
-      -- \<open>declaration within proof (backward mode)\<close>
+      \<comment> \<open>declaration within proof (backward mode)\<close>
     ..
 end
 
--- a/src/Doc/Implementation/Proof.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/Proof.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -58,14 +58,14 @@
 notepad
 begin
   {
-    fix x  -- \<open>all potential occurrences of some \<open>x::\<tau>\<close> are fixed\<close>
+    fix x  \<comment> \<open>all potential occurrences of some \<open>x::\<tau>\<close> are fixed\<close>
     {
-      have "x::'a \<equiv> x"  -- \<open>implicit type assignment by concrete occurrence\<close>
+      have "x::'a \<equiv> x"  \<comment> \<open>implicit type assignment by concrete occurrence\<close>
         by (rule reflexive)
     }
-    thm this  -- \<open>result still with fixed type \<open>'a\<close>\<close>
+    thm this  \<comment> \<open>result still with fixed type \<open>'a\<close>\<close>
   }
-  thm this  -- \<open>fully general result for arbitrary \<open>?x::?'a\<close>\<close>
+  thm this  \<comment> \<open>fully general result for arbitrary \<open>?x::?'a\<close>\<close>
 end
 
 text \<open>The Isabelle/Isar proof context manages the details of term
--- a/src/Doc/Implementation/Syntax.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/Syntax.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -20,9 +20,9 @@
   Moreover, type-inference in the style of Hindley-Milner @{cite hindleymilner}
   (and extensions) enables users to write \<open>\<forall>x. B x\<close> concisely, when
   the type \<open>'a\<close> is already clear from the
-  context.\footnote{Type-inference taken to the extreme can easily confuse
+  context.\<^footnote>\<open>Type-inference taken to the extreme can easily confuse
   users. Beginners often stumble over unexpectedly general types inferred by
-  the system.}
+  the system.\<close>
 
   \<^medskip>
   The main inner syntax operations are \<^emph>\<open>read\<close> for
--- a/src/Doc/Implementation/Tactic.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Implementation/Tactic.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -18,10 +18,10 @@
   Isabelle/Pure represents a goal as a theorem stating that the
   subgoals imply the main goal: \<open>A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow>
   C\<close>.  The outermost goal structure is that of a Horn Clause: i.e.\
-  an iterated implication without any quantifiers\footnote{Recall that
+  an iterated implication without any quantifiers\<^footnote>\<open>Recall that
   outermost \<open>\<And>x. \<phi>[x]\<close> is always represented via schematic
   variables in the body: \<open>\<phi>[?x]\<close>.  These variables may get
-  instantiated during the course of reasoning.}.  For \<open>n = 0\<close>
+  instantiated during the course of reasoning.\<close>.  For \<open>n = 0\<close>
   a goal is called ``solved''.
 
   The structure of each subgoal \<open>A\<^sub>i\<close> is that of a
@@ -90,11 +90,11 @@
   \secref{sec:tactical-goals}) to a lazy sequence of potential
   successor states.  The underlying sequence implementation is lazy
   both in head and tail, and is purely functional in \<^emph>\<open>not\<close>
-  supporting memoing.\footnote{The lack of memoing and the strict
+  supporting memoing.\<^footnote>\<open>The lack of memoing and the strict
   nature of ML requires some care when working with low-level
   sequence operations, to avoid duplicate or premature evaluation of
   results.  It also means that modified runtime behavior, such as
-  timeout, is very hard to achieve for general tactics.}
+  timeout, is very hard to achieve for general tactics.\<close>
 
   An \<^emph>\<open>empty result sequence\<close> means that the tactic has failed: in
   a compound tactic expression other tactics might be tried instead,
@@ -319,12 +319,12 @@
   \<^descr> @{ML match_tac}, @{ML ematch_tac}, @{ML dmatch_tac}, and @{ML
   bimatch_tac} are similar to @{ML resolve_tac}, @{ML eresolve_tac},
   @{ML dresolve_tac}, and @{ML biresolve_tac}, respectively, but do
-  not instantiate schematic variables in the goal state.%
-\footnote{Strictly speaking, matching means to treat the unknowns in the goal
+  not instantiate schematic variables in the goal state.\<^footnote>\<open>Strictly speaking,
+  matching means to treat the unknowns in the goal
   state as constants, but these tactics merely discard unifiers that would
   update the goal state. In rare situations (where the conclusion and 
   goal state have flexible terms at the same position), the tactic
-  will fail even though an acceptable unifier exists.}
+  will fail even though an acceptable unifier exists.\<close>
   These tactics were written for a specific application within the classical reasoner.
 
   Flexible subgoals are not updated at will, but are left alone.
--- a/src/Doc/Isar_Ref/Document_Preparation.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Document_Preparation.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -134,13 +134,16 @@
   surrounding \<^verbatim>\<open>@{\<close>\<open>\<dots>\<close>\<^verbatim>\<open>}\<close>) works for a single
   argument that is a cartouche.
 
-  Omitting the control symbol is also possible: a cartouche without special
-  decoration is equivalent to \<^verbatim>\<open>\<^cartouche>\<close>\<open>\<open>argument_content\<close>\<close>, which
-  is equivalent to \<^verbatim>\<open>@{cartouche\<close>~\<open>\<open>argument_content\<close>\<close>\<^verbatim>\<open>}\<close>. The
-  special name @{antiquotation_def cartouche} is defined in the context:
-  Isabelle/Pure introduces that as an alias to @{antiquotation_ref text}
-  (see below). Consequently, \<open>\<open>foo_bar + baz \<le> bazar\<close>\<close> prints literal
-  quasi-formal text (unchecked).
+  A cartouche without special decoration is equivalent to
+  \<^verbatim>\<open>\<^cartouche>\<close>\<open>\<open>argument_content\<close>\<close>, which is equivalent to
+  \<^verbatim>\<open>@{cartouche\<close>~\<open>\<open>argument_content\<close>\<close>\<^verbatim>\<open>}\<close>. The special name
+  @{antiquotation_def cartouche} is defined in the context: Isabelle/Pure
+  introduces that as an alias to @{antiquotation_ref text} (see below).
+  Consequently, \<open>\<open>foo_bar + baz \<le> bazar\<close>\<close> prints literal quasi-formal text
+  (unchecked).
+
+  A control symbol \<^verbatim>\<open>\\<close>\<^verbatim>\<open><^\<close>\<open>name\<close>\<^verbatim>\<open>>\<close> within the body text, but without a
+  subsequent cartouche, is equivalent to \<^verbatim>\<open>@{\<close>\<open>name\<close>\<^verbatim>\<open>}\<close>.
 
   \begingroup
   \def\isasymcontrolstart{\isatt{\isacharbackslash\isacharless\isacharcircum}}
--- a/src/Doc/Isar_Ref/First_Order_Logic.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/First_Order_Logic.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -357,10 +357,10 @@
 theorem
   assumes "\<exists>x. \<forall>y. R x y"
   shows "\<forall>y. \<exists>x. R x y"
-proof    -- \<open>\<open>\<forall>\<close> introduction\<close>
-  obtain x where "\<forall>y. R x y" using \<open>\<exists>x. \<forall>y. R x y\<close> ..    -- \<open>\<open>\<exists>\<close> elimination\<close>
-  fix y have "R x y" using \<open>\<forall>y. R x y\<close> ..    -- \<open>\<open>\<forall>\<close> destruction\<close>
-  then show "\<exists>x. R x y" ..    -- \<open>\<open>\<exists>\<close> introduction\<close>
+proof    \<comment> \<open>\<open>\<forall>\<close> introduction\<close>
+  obtain x where "\<forall>y. R x y" using \<open>\<exists>x. \<forall>y. R x y\<close> ..    \<comment> \<open>\<open>\<exists>\<close> elimination\<close>
+  fix y have "R x y" using \<open>\<forall>y. R x y\<close> ..    \<comment> \<open>\<open>\<forall>\<close> destruction\<close>
+  then show "\<exists>x. R x y" ..    \<comment> \<open>\<open>\<exists>\<close> introduction\<close>
 qed
 
 
--- a/src/Doc/Isar_Ref/Generic.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Generic.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -327,9 +327,9 @@
 
   \<^descr> @{method simp_all} is similar to @{method simp}, but acts on
   all goals, working backwards from the last to the first one as usual
-  in Isabelle.\footnote{The order is irrelevant for goals without
+  in Isabelle.\<^footnote>\<open>The order is irrelevant for goals without
   schematic variables, so simplification might actually be performed
-  in parallel here.}
+  in parallel here.\<close>
 
   Chained facts are inserted into all subgoals, before the
   simplification process starts.  Further rule declarations are the
@@ -347,8 +347,8 @@
   normalization process, or simplifying assumptions themselves.
   Further options allow to fine-tune the behavior of the Simplifier
   in this respect, corresponding to a variety of ML tactics as
-  follows.\footnote{Unlike the corresponding Isar proof methods, the
-  ML tactics do not insist in changing the goal state.}
+  follows.\<^footnote>\<open>Unlike the corresponding Isar proof methods, the
+  ML tactics do not insist in changing the goal state.\<close>
 
   \begin{center}
   \small
@@ -1179,9 +1179,9 @@
   is easier to automate.
 
   A \<^bold>\<open>sequent\<close> has the form \<open>\<Gamma> \<turnstile> \<Delta>\<close>, where \<open>\<Gamma>\<close>
-  and \<open>\<Delta>\<close> are sets of formulae.\footnote{For first-order
+  and \<open>\<Delta>\<close> are sets of formulae.\<^footnote>\<open>For first-order
   logic, sequents can equivalently be made from lists or multisets of
-  formulae.} The sequent \<open>P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n\<close> is
+  formulae.\<close> The sequent \<open>P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n\<close> is
   \<^bold>\<open>valid\<close> if \<open>P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m\<close> implies \<open>Q\<^sub>1 \<or> \<dots> \<or>
   Q\<^sub>n\<close>.  Thus \<open>P\<^sub>1, \<dots>, P\<^sub>m\<close> represent assumptions, each of which
   is true, while \<open>Q\<^sub>1, \<dots>, Q\<^sub>n\<close> represent alternative goals.  A
--- a/src/Doc/Isar_Ref/Inner_Syntax.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Inner_Syntax.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -277,8 +277,8 @@
   used internally in Isabelle/Pure.
 
   \<^item> \<^verbatim>\<open>xsymbols\<close>: enable proper mathematical symbols
-  instead of ASCII art.\footnote{This traditional mode name stems from
-  the ``X-Symbol'' package for classic Proof~General with XEmacs.}
+  instead of ASCII art.\<^footnote>\<open>This traditional mode name stems from
+  the ``X-Symbol'' package for classic Proof~General with XEmacs.\<close>
 
   \<^item> \<^verbatim>\<open>latex\<close>: additional mode that is active in {\LaTeX}
   document preparation of Isabelle theory sources; allows to provide
@@ -338,9 +338,9 @@
   grammar, where for each argument \<open>i\<close> the syntactic category
   is determined by \<open>\<tau>\<^sub>i\<close> (with priority \<open>p\<^sub>i\<close>), and the
   result category is determined from \<open>\<tau>\<close> (with priority \<open>p\<close>).  Priority specifications are optional, with default 0 for
-  arguments and 1000 for the result.\footnote{Omitting priorities is
+  arguments and 1000 for the result.\<^footnote>\<open>Omitting priorities is
   prone to syntactic ambiguities unless the delimiter tokens determine
-  fully bracketed notation, as in \<open>if _ then _ else _ fi\<close>.}
+  fully bracketed notation, as in \<open>if _ then _ else _ fi\<close>.\<close>
 
   Since \<open>\<tau>\<close> may be again a function type, the constant
   type scheme may have more argument positions than the mixfix
@@ -1213,10 +1213,10 @@
   side-conditions:
 
     \<^item> Rules must be left linear: \<open>lhs\<close> must not contain
-    repeated variables.\footnote{The deeper reason for this is that AST
+    repeated variables.\<^footnote>\<open>The deeper reason for this is that AST
     equality is not well-defined: different occurrences of the ``same''
     AST could be decorated differently by accidental type-constraints or
-    source position information, for example.}
+    source position information, for example.\<close>
 
     \<^item> Every variable in \<open>rhs\<close> must also occur in \<open>lhs\<close>.
 
--- a/src/Doc/Isar_Ref/Outer_Syntax.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Outer_Syntax.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -223,17 +223,18 @@
 
 subsection \<open>Comments \label{sec:comments}\<close>
 
-text \<open>Large chunks of plain @{syntax text} are usually given @{syntax
-  verbatim}, i.e.\ enclosed in \<^verbatim>\<open>{*\<close>~\<open>\<dots>\<close>~\<^verbatim>\<open>*}\<close>,
-  or as @{syntax cartouche} \<open>\<open>\<dots>\<close>\<close>. For convenience, any of the
-  smaller text units conforming to @{syntax nameref} are admitted as well. A
-  marginal @{syntax comment} is of the form \<^verbatim>\<open>--\<close>~@{syntax text}.
-  Any number of these may occur within Isabelle/Isar commands.
+text \<open>
+  Large chunks of plain @{syntax text} are usually given @{syntax verbatim},
+  i.e.\ enclosed in \<^verbatim>\<open>{*\<close>~\<open>\<dots>\<close>~\<^verbatim>\<open>*}\<close>, or as @{syntax cartouche} \<open>\<open>\<dots>\<close>\<close>. For
+  convenience, any of the smaller text units conforming to @{syntax nameref}
+  are admitted as well. A marginal @{syntax comment} is of the form
+  \<^verbatim>\<open>--\<close>~@{syntax text} or \<^verbatim>\<open>\<comment>\<close>~@{syntax text}. Any number of these may occur
+  within Isabelle/Isar commands.
 
   @{rail \<open>
     @{syntax_def text}: @{syntax verbatim} | @{syntax cartouche} | @{syntax nameref}
     ;
-    @{syntax_def comment}: '--' @{syntax text}
+    @{syntax_def comment}: ('--' | @'\<comment>') @{syntax text}
   \<close>}
 \<close>
 
--- a/src/Doc/Isar_Ref/Proof.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Proof.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -530,8 +530,8 @@
 
   \<^medskip>
   The Isar calculation proof commands may be defined as
-  follows:\footnote{We suppress internal bookkeeping such as proper
-  handling of block-structure.}
+  follows:\<^footnote>\<open>We suppress internal bookkeeping such as proper
+  handling of block-structure.\<close>
 
   \begin{matharray}{rcl}
     @{command "also"}\<open>\<^sub>0\<close> & \equiv & @{command "note"}~\<open>calculation = this\<close> \\
@@ -718,9 +718,9 @@
   If the goal had been \<open>show\<close> (or \<open>thus\<close>), some
   pending sub-goal is solved as well by the rule resulting from the
   result \<^emph>\<open>exported\<close> into the enclosing goal context.  Thus \<open>qed\<close> may fail for two reasons: either \<open>m\<^sub>2\<close> fails, or the
-  resulting rule does not fit to any pending goal\footnote{This
+  resulting rule does not fit to any pending goal\<^footnote>\<open>This
   includes any additional ``strong'' assumptions as introduced by
-  @{command "assume"}.} of the enclosing context.  Debugging such a
+  @{command "assume"}.\<close> of the enclosing context.  Debugging such a
   situation might involve temporarily changing @{command "show"} into
   @{command "have"}, or weakening the local context by replacing
   occurrences of @{command "assume"} by @{command "presume"}.
--- a/src/Doc/Isar_Ref/Spec.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Spec.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -462,46 +462,41 @@
 subsection \<open>Locale expressions \label{sec:locale-expr}\<close>
 
 text \<open>
-  A \<^emph>\<open>locale expression\<close> denotes a context composed of instances
-  of existing locales.  The context consists of the declaration
-  elements from the locale instances.  Redundant locale instances are
-  omitted according to roundup.
+  A \<^emph>\<open>locale expression\<close> denotes a context composed of instances of existing
+  locales. The context consists of the declaration elements from the locale
+  instances. Redundant locale instances are omitted according to roundup.
 
   @{rail \<open>
     @{syntax_def locale_expr}: (instance + '+') @{syntax for_fixes}
     ;
     instance: (qualifier ':')? @{syntax nameref} (pos_insts | named_insts)
     ;
-    qualifier: @{syntax name} ('?' | '!')?
+    qualifier: @{syntax name} ('?')?
     ;
     pos_insts: ('_' | @{syntax term})*
     ;
     named_insts: @'where' (@{syntax name} '=' @{syntax term} + @'and')
   \<close>}
 
-  A locale instance consists of a reference to a locale and either
-  positional or named parameter instantiations.  Identical
-  instantiations (that is, those that instantiate a parameter by itself)
-  may be omitted.  The notation `\<open>_\<close>' enables to omit the
-  instantiation for a parameter inside a positional instantiation.
+  A locale instance consists of a reference to a locale and either positional
+  or named parameter instantiations. Identical instantiations (that is, those
+  that instantiate a parameter by itself) may be omitted. The notation ``\<open>_\<close>''
+  enables to omit the instantiation for a parameter inside a positional
+  instantiation.
 
-  Terms in instantiations are from the context the locale expressions
-  is declared in.  Local names may be added to this context with the
-  optional @{keyword "for"} clause.  This is useful for shadowing names
-  bound in outer contexts, and for declaring syntax.  In addition,
-  syntax declarations from one instance are effective when parsing
-  subsequent instances of the same expression.
+  Terms in instantiations are from the context the locale expressions is
+  declared in. Local names may be added to this context with the optional
+  @{keyword "for"} clause. This is useful for shadowing names bound in outer
+  contexts, and for declaring syntax. In addition, syntax declarations from
+  one instance are effective when parsing subsequent instances of the same
+  expression.
 
-  Instances have an optional qualifier which applies to names in
-  declarations.  Names include local definitions and theorem names.
-  If present, the qualifier itself is either optional
-  (``\<^verbatim>\<open>?\<close>''), which means that it may be omitted on input of the
-  qualified name, or mandatory (``\<^verbatim>\<open>!\<close>'').  If neither
-  ``\<^verbatim>\<open>?\<close>'' nor ``\<^verbatim>\<open>!\<close>'' are present, the command's default
-  is used.  For @{command "interpretation"} and @{command "interpret"}
-  the default is ``mandatory'', for @{command "locale"} and @{command
-  "sublocale"} the default is ``optional''.  Qualifiers play no role
-  in determining whether one locale instance subsumes another.
+  Instances have an optional qualifier which applies to names in declarations.
+  Names include local definitions and theorem names. If present, the qualifier
+  itself is either mandatory (default) or non-mandatory (when followed by
+  ``\<^verbatim>\<open>?\<close>''). Non-mandatory means that the qualifier may be omitted on input.
+  Qualifiers only affect name spaces; they play no role in determining whether
+  one locale instance subsumes another.
 \<close>
 
 
@@ -678,10 +673,10 @@
     @@{command print_interps} @{syntax nameref}
     ;
 
-    equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
+    equations: @'rewrites' (@{syntax thmdecl}? @{syntax prop} + @'and')
   \<close>}
 
-  \<^descr> @{command "interpretation"}~\<open>expr \<WHERE> eqns\<close>
+  \<^descr> @{command "interpretation"}~\<open>expr\<close>~@{keyword "rewrites"}~\<open>eqns\<close>
   interprets \<open>expr\<close> in a global or local theory.  The command
   generates proof obligations for the instantiated specifications.
   Once these are discharged by the user, instantiated declarations (in
@@ -722,14 +717,13 @@
   concepts introduced through definitions.  The equations must be
   proved.
 
-  \<^descr> @{command "interpret"}~\<open>expr \<WHERE> eqns\<close> interprets
+  \<^descr> @{command "interpret"}~\<open>expr\<close>~@{keyword "rewrites"}~\<open>eqns\<close> interprets
   \<open>expr\<close> in the proof context and is otherwise similar to
   interpretation in local theories.  Note that for @{command
   "interpret"} the \<open>eqns\<close> should be
   explicitly universally quantified.
 
-  \<^descr> @{command "sublocale"}~\<open>name \<subseteq> expr \<WHERE>
-  eqns\<close>
+  \<^descr> @{command "sublocale"}~\<open>name \<subseteq> expr\<close>~@{keyword "rewrites"}~\<open>eqns\<close>
   interprets \<open>expr\<close> in the locale \<open>name\<close>.  A proof that
   the specification of \<open>name\<close> implies the specification of
   \<open>expr\<close> is required.  As in the localized version of the
@@ -828,10 +822,10 @@
     ;
     definitions: @'defining' (@{syntax thmdecl}? @{syntax name} \<newline>
       @{syntax mixfix}? @'=' @{syntax term} + @'and');
-    equations: @'where' (@{syntax thmdecl}? @{syntax prop} + @'and')
+    equations: @'rewrites' (@{syntax thmdecl}? @{syntax prop} + @'and')
   \<close>}
 
-  \<^descr> @{command "permanent_interpretation"}~\<open>expr \<DEFINING> defs \<WHERE> eqns\<close>
+  \<^descr> @{command "permanent_interpretation"}~\<open>expr \<DEFINING> defs\<close>~@{keyword "rewrites"}~\<open>eqns\<close>
   interprets \<open>expr\<close> in the current local theory.  The command
   generates proof obligations for the instantiated specifications.
   Instantiated declarations (in particular, facts) are added to the
--- a/src/Doc/Isar_Ref/Synopsis.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/Synopsis.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -17,11 +17,11 @@
 notepad
 begin
   txt \<open>Locally fixed entities:\<close>
-  fix x   -- \<open>local constant, without any type information yet\<close>
-  fix x :: 'a  -- \<open>variant with explicit type-constraint for subsequent use\<close>
+  fix x   \<comment> \<open>local constant, without any type information yet\<close>
+  fix x :: 'a  \<comment> \<open>variant with explicit type-constraint for subsequent use\<close>
 
   fix a b
-  assume "a = b"  -- \<open>type assignment at first occurrence in concrete term\<close>
+  assume "a = b"  \<comment> \<open>type assignment at first occurrence in concrete term\<close>
 
   txt \<open>Definitions (non-polymorphic):\<close>
   def x \<equiv> "t::'a"
@@ -234,7 +234,7 @@
   proof -
     term ?thesis
     show ?thesis sorry
-    term ?thesis  -- \<open>static!\<close>
+    term ?thesis  \<comment> \<open>static!\<close>
   qed
   term "\<dots>"
   thm this
@@ -345,7 +345,7 @@
   moreover
   { assume C have R sorry }
   ultimately
-  have R by blast  -- \<open>``big-bang integration'' of proof blocks (occasionally fragile)\<close>
+  have R by blast  \<comment> \<open>``big-bang integration'' of proof blocks (occasionally fragile)\<close>
 end
 
 
@@ -364,7 +364,7 @@
 begin
   fix n :: nat
   have "P n"
-  proof (rule nat.induct)  -- \<open>fragile rule application!\<close>
+  proof (rule nat.induct)  \<comment> \<open>fragile rule application!\<close>
     show "P 0" sorry
   next
     fix n :: nat
@@ -503,7 +503,7 @@
     from \<open>A x 0\<close> show "Q x 0" sorry
   next
     case (Suc n)
-    from \<open>\<And>x. A x n \<Longrightarrow> Q x n\<close>  -- \<open>arbitrary instances can be produced here\<close>
+    from \<open>\<And>x. A x n \<Longrightarrow> Q x n\<close>  \<comment> \<open>arbitrary instances can be produced here\<close>
       and \<open>A x (Suc n)\<close> show "Q x (Suc n)" sorry
   qed
 end
@@ -675,9 +675,9 @@
 begin
   assume a: A and b: B
   thm conjI
-  thm conjI [of A B]  -- "instantiation"
-  thm conjI [of A B, OF a b]  -- "instantiation and composition"
-  thm conjI [OF a b]  -- "composition via unification (trivial)"
+  thm conjI [of A B]  \<comment> "instantiation"
+  thm conjI [of A B, OF a b]  \<comment> "instantiation and composition"
+  thm conjI [OF a b]  \<comment> "composition via unification (trivial)"
   thm conjI [OF \<open>A\<close> \<open>B\<close>]
 
   thm conjI [OF disjI1]
@@ -710,9 +710,9 @@
       fix x
       assume "A x"
       show "B x" sorry
-    } -- "implicit block structure made explicit"
+    } \<comment> "implicit block structure made explicit"
     note \<open>\<And>x. A x \<Longrightarrow> B x\<close>
-      -- "side exit for the resulting rule"
+      \<comment> "side exit for the resulting rule"
   qed
 end
 
@@ -726,12 +726,12 @@
 
 notepad
 begin
-  assume r1: "A \<Longrightarrow> B \<Longrightarrow> C"  -- \<open>simple rule (Horn clause)\<close>
+  assume r1: "A \<Longrightarrow> B \<Longrightarrow> C"  \<comment> \<open>simple rule (Horn clause)\<close>
 
-  have A sorry  -- "prefix of facts via outer sub-proof"
+  have A sorry  \<comment> "prefix of facts via outer sub-proof"
   then have C
   proof (rule r1)
-    show B sorry  -- "remaining rule premises via inner sub-proof"
+    show B sorry  \<comment> "remaining rule premises via inner sub-proof"
   qed
 
   have C
@@ -750,7 +750,7 @@
 
 next
 
-  assume r2: "A \<Longrightarrow> (\<And>x. B1 x \<Longrightarrow> B2 x) \<Longrightarrow> C"  -- \<open>nested rule\<close>
+  assume r2: "A \<Longrightarrow> (\<And>x. B1 x \<Longrightarrow> B2 x) \<Longrightarrow> C"  \<comment> \<open>nested rule\<close>
 
   have A sorry
   then have C
@@ -850,31 +850,31 @@
 notepad
 begin
   have "A \<and> B"
-  proof  -- \<open>two strictly isolated subproofs\<close>
+  proof  \<comment> \<open>two strictly isolated subproofs\<close>
     show A sorry
   next
     show B sorry
   qed
 
   have "A \<and> B"
-  proof  -- \<open>one simultaneous sub-proof\<close>
+  proof  \<comment> \<open>one simultaneous sub-proof\<close>
     show A and B sorry
   qed
 
   have "A \<and> B"
-  proof  -- \<open>two subproofs in the same context\<close>
+  proof  \<comment> \<open>two subproofs in the same context\<close>
     show A sorry
     show B sorry
   qed
 
   have "A \<and> B"
-  proof  -- \<open>swapped order\<close>
+  proof  \<comment> \<open>swapped order\<close>
     show B sorry
     show A sorry
   qed
 
   have "A \<and> B"
-  proof  -- \<open>sequential subproofs\<close>
+  proof  \<comment> \<open>sequential subproofs\<close>
     show A sorry
     show B using \<open>A\<close> sorry
   qed
@@ -941,9 +941,9 @@
   following typical representatives:
 \<close>
 
-thm exE     -- \<open>local parameter\<close>
-thm conjE   -- \<open>local premises\<close>
-thm disjE   -- \<open>split into cases\<close>
+thm exE     \<comment> \<open>local parameter\<close>
+thm conjE   \<comment> \<open>local premises\<close>
+thm disjE   \<comment> \<open>split into cases\<close>
 
 text \<open>
   Combining these characteristics leads to the following general scheme
@@ -1001,7 +1001,7 @@
 print_statement disjE
 
 lemma
-  assumes A1 and A2  -- \<open>assumptions\<close>
+  assumes A1 and A2  \<comment> \<open>assumptions\<close>
   obtains
     (case1)  x y where "B1 x y" and "C1 x y"
   | (case2)  x y where "B2 x y" and "C2 x y"
--- a/src/Doc/Isar_Ref/document/style.sty	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Isar_Ref/document/style.sty	Tue Nov 10 14:43:29 2015 +0000
@@ -40,8 +40,6 @@
 
 \isabellestyle{literalunderscore}
 
-\newcommand{\isasymdash}{\isatext{\mbox{-}}}
-
 \railtermfont{\isabellestyle{tt}}
 \railnontermfont{\isabellestyle{literalunderscore}}
 \railnamefont{\isabellestyle{literalunderscore}}
--- a/src/Doc/JEdit/JEdit.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/JEdit/JEdit.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -9,52 +9,50 @@
 section \<open>Concepts and terminology\<close>
 
 text \<open>
-  Isabelle/jEdit is a Prover IDE that integrates \<^emph>\<open>parallel proof
-  checking\<close> @{cite "Wenzel:2009" and "Wenzel:2013:ITP"} with
-  \<^emph>\<open>asynchronous user interaction\<close> @{cite "Wenzel:2010" and
-  "Wenzel:2012:UITP-EPTCS" and "Wenzel:2014:ITP-PIDE" and "Wenzel:2014:UITP"},
-  based on a document-oriented approach to \<^emph>\<open>continuous proof processing\<close>
-  @{cite "Wenzel:2011:CICM" and "Wenzel:2012"}. Many concepts and system
-  components are fit together in order to make this work. The main building
-  blocks are as follows.
+  Isabelle/jEdit is a Prover IDE that integrates \<^emph>\<open>parallel proof checking\<close>
+  @{cite "Wenzel:2009" and "Wenzel:2013:ITP"} with \<^emph>\<open>asynchronous user
+  interaction\<close> @{cite "Wenzel:2010" and "Wenzel:2012:UITP-EPTCS" and
+  "Wenzel:2014:ITP-PIDE" and "Wenzel:2014:UITP"}, based on a document-oriented
+  approach to \<^emph>\<open>continuous proof processing\<close> @{cite "Wenzel:2011:CICM" and
+  "Wenzel:2012"}. Many concepts and system components are fit together in
+  order to make this work. The main building blocks are as follows.
 
-  \<^descr>[PIDE] is a general framework for Prover IDEs based on Isabelle/Scala.
-  It is built around a concept of parallel and asynchronous document
-  processing, which is supported natively by the parallel proof engine that is
-  implemented in Isabelle/ML. The traditional prover command loop is given up;
-  instead there is direct support for editing of source text, with rich formal
-  markup for GUI rendering.
+    \<^descr>[PIDE] is a general framework for Prover IDEs based on Isabelle/Scala. It
+    is built around a concept of parallel and asynchronous document
+    processing, which is supported natively by the parallel proof engine that
+    is implemented in Isabelle/ML. The traditional prover command loop is
+    given up; instead there is direct support for editing of source text, with
+    rich formal markup for GUI rendering.
 
-  \<^descr>[Isabelle/ML] is the implementation and extension language of
-  Isabelle, see also @{cite "isabelle-implementation"}. It is integrated
-  into the logical context of Isabelle/Isar and allows to manipulate
-  logical entities directly. Arbitrary add-on tools may be implemented
-  for object-logics such as Isabelle/HOL.
+    \<^descr>[Isabelle/ML] is the implementation and extension language of Isabelle,
+    see also @{cite "isabelle-implementation"}. It is integrated into the
+    logical context of Isabelle/Isar and allows to manipulate logical entities
+    directly. Arbitrary add-on tools may be implemented for object-logics such
+    as Isabelle/HOL.
 
-  \<^descr>[Isabelle/Scala] is the system programming language of
-  Isabelle. It extends the pure logical environment of Isabelle/ML
-  towards the ``real world'' of graphical user interfaces, text
-  editors, IDE frameworks, web services etc.  Special infrastructure
-  allows to transfer algebraic datatypes and formatted text easily
-  between ML and Scala, using asynchronous protocol commands.
+    \<^descr>[Isabelle/Scala] is the system programming language of Isabelle. It
+    extends the pure logical environment of Isabelle/ML towards the ``real
+    world'' of graphical user interfaces, text editors, IDE frameworks, web
+    services etc. Special infrastructure allows to transfer algebraic
+    datatypes and formatted text easily between ML and Scala, using
+    asynchronous protocol commands.
 
-  \<^descr>[jEdit] is a sophisticated text editor implemented in
-  Java.\footnote{@{url "http://www.jedit.org"}} It is easily extensible
-  by plugins written in languages that work on the JVM, e.g.\
-  Scala\footnote{@{url "http://www.scala-lang.org"}}.
+    \<^descr>[jEdit] is a sophisticated text editor implemented in Java.\<^footnote>\<open>@{url
+    "http://www.jedit.org"}\<close> It is easily extensible by plugins written in
+    languages that work on the JVM, e.g.\ Scala\<^footnote>\<open>@{url
+    "http://www.scala-lang.org"}\<close>.
 
-  \<^descr>[Isabelle/jEdit] is the main example application of the PIDE
-  framework and the default user-interface for Isabelle. It targets
-  both beginners and experts. Technically, Isabelle/jEdit combines a
-  slightly modified version of the jEdit code base with a special
-  plugin for Isabelle, integrated as standalone application for the
-  main operating system platforms: Linux, Windows, Mac OS X.
+    \<^descr>[Isabelle/jEdit] is the main example application of the PIDE framework
+    and the default user-interface for Isabelle. It targets both beginners and
+    experts. Technically, Isabelle/jEdit combines a slightly modified version
+    of the jEdit code base with a special plugin for Isabelle, integrated as
+    standalone application for the main operating system platforms: Linux,
+    Windows, Mac OS X.
 
-
-  The subtle differences of Isabelle/ML versus Standard ML,
-  Isabelle/Scala versus Scala, Isabelle/jEdit versus jEdit need to be
-  taken into account when discussing any of these PIDE building blocks
-  in public forums, mailing lists, or even scientific publications.
+  The subtle differences of Isabelle/ML versus Standard ML, Isabelle/Scala
+  versus Scala, Isabelle/jEdit versus jEdit need to be taken into account when
+  discussing any of these PIDE building blocks in public forums, mailing
+  lists, or even scientific publications.
 \<close>
 
 
@@ -73,31 +71,31 @@
   the jEdit text editor, while preserving its general look-and-feel as far as
   possible. The main plugin is called ``Isabelle'' and has its own menu
   \<^emph>\<open>Plugins~/ Isabelle\<close> with access to several panels (see also
-  \secref{sec:dockables}), as well as \<^emph>\<open>Plugins~/ Plugin Options~/
-  Isabelle\<close> (see also \secref{sec:options}).
+  \secref{sec:dockables}), as well as \<^emph>\<open>Plugins~/ Plugin Options~/ Isabelle\<close>
+  (see also \secref{sec:options}).
 
   The options allow to specify a logic session name --- the same selector is
-  accessible in the \<^emph>\<open>Theories\<close> panel (\secref{sec:theories}). On
-  application startup, the selected logic session image is provided
-  automatically by the Isabelle build tool @{cite "isabelle-system"}: if it is
-  absent or outdated wrt.\ its sources, the build process updates it before
-  entering the Prover IDE.  Change of the logic session within Isabelle/jEdit
-  requires a restart of the whole application.
+  accessible in the \<^emph>\<open>Theories\<close> panel (\secref{sec:theories}). On application
+  startup, the selected logic session image is provided automatically by the
+  Isabelle build tool @{cite "isabelle-system"}: if it is absent or outdated
+  wrt.\ its sources, the build process updates it before entering the Prover
+  IDE. Change of the logic session within Isabelle/jEdit requires a restart of
+  the whole application.
 
   \<^medskip>
-  The main job of the Prover IDE is to manage sources and their
-  changes, taking the logical structure as a formal document into account (see
-  also \secref{sec:document-model}). The editor and the prover are connected
+  The main job of the Prover IDE is to manage sources and their changes,
+  taking the logical structure as a formal document into account (see also
+  \secref{sec:document-model}). The editor and the prover are connected
   asynchronously in a lock-free manner. The prover is free to organize the
   checking of the formal text in parallel on multiple cores, and provides
   feedback via markup, which is rendered in the editor via colors, boxes,
   squiggly underlines, hyperlinks, popup windows, icons, clickable output etc.
 
-  Using the mouse together with the modifier key \<^verbatim>\<open>CONTROL\<close> (Linux,
-  Windows) or \<^verbatim>\<open>COMMAND\<close> (Mac OS X) exposes additional formal content
-  via tooltips and/or hyperlinks (see also \secref{sec:tooltips-hyperlinks}).
-  Output (in popups etc.) may be explored recursively, using the same
-  techniques as in the editor source buffer.
+  Using the mouse together with the modifier key \<^verbatim>\<open>CONTROL\<close> (Linux, Windows)
+  or \<^verbatim>\<open>COMMAND\<close> (Mac OS X) exposes additional formal content via tooltips
+  and/or hyperlinks (see also \secref{sec:tooltips-hyperlinks}). Output (in
+  popups etc.) may be explored recursively, using the same techniques as in
+  the editor source buffer.
 
   Thus the Prover IDE gives an impression of direct access to formal content
   of the prover within the editor, but in reality only certain aspects are
@@ -109,14 +107,13 @@
 subsection \<open>Documentation\<close>
 
 text \<open>
-  The \<^emph>\<open>Documentation\<close> panel of Isabelle/jEdit provides access to the
-  standard Isabelle documentation: PDF files are opened by regular desktop
-  operations of the underlying platform. The section ``Original jEdit
-  Documentation'' contains the original \<^emph>\<open>User's Guide\<close> of this
-  sophisticated text editor. The same is accessible via the \<^verbatim>\<open>Help\<close>
-  menu or \<^verbatim>\<open>F1\<close> keyboard shortcut, using the built-in HTML viewer of
-  Java/Swing. The latter also includes \<^emph>\<open>Frequently Asked Questions\<close> and
-  documentation of individual plugins.
+  The \<^emph>\<open>Documentation\<close> panel of Isabelle/jEdit provides access to the standard
+  Isabelle documentation: PDF files are opened by regular desktop operations
+  of the underlying platform. The section ``Original jEdit Documentation''
+  contains the original \<^emph>\<open>User's Guide\<close> of this sophisticated text editor. The
+  same is accessible via the \<^verbatim>\<open>Help\<close> menu or \<^verbatim>\<open>F1\<close> keyboard shortcut, using
+  the built-in HTML viewer of Java/Swing. The latter also includes
+  \<^emph>\<open>Frequently Asked Questions\<close> and documentation of individual plugins.
 
   Most of the information about generic jEdit is relevant for Isabelle/jEdit
   as well, but one needs to keep in mind that defaults sometimes differ, and
@@ -129,9 +126,9 @@
 subsection \<open>Plugins\<close>
 
 text \<open>
-  The \<^emph>\<open>Plugin Manager\<close> of jEdit allows to augment editor functionality by
-  JVM modules (jars) that are provided by the central plugin repository, which
-  is accessible via various mirror sites.
+  The \<^emph>\<open>Plugin Manager\<close> of jEdit allows to augment editor functionality by JVM
+  modules (jars) that are provided by the central plugin repository, which is
+  accessible via various mirror sites.
 
   Connecting to the plugin server-infrastructure of the jEdit project allows
   to update bundled plugins or to add further functionality. This needs to be
@@ -142,28 +139,27 @@
   at a grand scale.
 
   \<^medskip>
-  The main \<^emph>\<open>Isabelle\<close> plugin is an integral part of
-  Isabelle/jEdit and needs to remain active at all times! A few additional
-  plugins are bundled with Isabelle/jEdit for convenience or out of necessity,
-  notably \<^emph>\<open>Console\<close> with its Isabelle/Scala sub-plugin
-  (\secref{sec:scala-console}) and \<^emph>\<open>SideKick\<close> with some Isabelle-specific
-  parsers for document tree structure (\secref{sec:sidekick}). The
-  \<^emph>\<open>Navigator\<close> plugin is particularly important for hyperlinks within the
-  formal document-model (\secref{sec:tooltips-hyperlinks}). Further plugins
-  (e.g.\ \<^emph>\<open>ErrorList\<close>, \<^emph>\<open>Code2HTML\<close>) are included to saturate the
-  dependencies of bundled plugins, but have no particular use in
-  Isabelle/jEdit.
+  The main \<^emph>\<open>Isabelle\<close> plugin is an integral part of Isabelle/jEdit and needs
+  to remain active at all times! A few additional plugins are bundled with
+  Isabelle/jEdit for convenience or out of necessity, notably \<^emph>\<open>Console\<close> with
+  its Isabelle/Scala sub-plugin (\secref{sec:scala-console}) and \<^emph>\<open>SideKick\<close>
+  with some Isabelle-specific parsers for document tree structure
+  (\secref{sec:sidekick}). The \<^emph>\<open>Navigator\<close> plugin is particularly important
+  for hyperlinks within the formal document-model
+  (\secref{sec:tooltips-hyperlinks}). Further plugins (e.g.\ \<^emph>\<open>ErrorList\<close>,
+  \<^emph>\<open>Code2HTML\<close>) are included to saturate the dependencies of bundled plugins,
+  but have no particular use in Isabelle/jEdit.
 \<close>
 
 
 subsection \<open>Options \label{sec:options}\<close>
 
-text \<open>Both jEdit and Isabelle have distinctive management of
-  persistent options.
+text \<open>
+  Both jEdit and Isabelle have distinctive management of persistent options.
 
-  Regular jEdit options are accessible via the dialogs \<^emph>\<open>Utilities~/
-  Global Options\<close> or \<^emph>\<open>Plugins~/ Plugin Options\<close>, with a second chance to
-  flip the two within the central options dialog. Changes are stored in
+  Regular jEdit options are accessible via the dialogs \<^emph>\<open>Utilities~/ Global
+  Options\<close> or \<^emph>\<open>Plugins~/ Plugin Options\<close>, with a second chance to flip the
+  two within the central options dialog. Changes are stored in
   @{file_unchecked "$ISABELLE_HOME_USER/jedit/properties"} and
   @{file_unchecked "$ISABELLE_HOME_USER/jedit/keymaps"}.
 
@@ -173,17 +169,17 @@
   coverage of sessions and command-line tools like @{tool build} or @{tool
   options}.
 
-  Those Isabelle options that are declared as \<^bold>\<open>public\<close> are configurable
-  in Isabelle/jEdit via \<^emph>\<open>Plugin Options~/ Isabelle~/ General\<close>. Moreover,
-  there are various options for rendering of document content, which are
-  configurable via \<^emph>\<open>Plugin Options~/ Isabelle~/ Rendering\<close>. Thus
-  \<^emph>\<open>Plugin Options~/ Isabelle\<close> in jEdit provides a view on a subset of
-  Isabelle system options. Note that some of these options affect general
-  parameters that are relevant outside Isabelle/jEdit as well, e.g.\
-  @{system_option threads} or @{system_option parallel_proofs} for the
-  Isabelle build tool @{cite "isabelle-system"}, but it is possible to use the
-  settings variable @{setting ISABELLE_BUILD_OPTIONS} to change defaults for
-  batch builds without affecting Isabelle/jEdit.
+  Those Isabelle options that are declared as \<^bold>\<open>public\<close> are configurable in
+  Isabelle/jEdit via \<^emph>\<open>Plugin Options~/ Isabelle~/ General\<close>. Moreover, there
+  are various options for rendering of document content, which are
+  configurable via \<^emph>\<open>Plugin Options~/ Isabelle~/ Rendering\<close>. Thus \<^emph>\<open>Plugin
+  Options~/ Isabelle\<close> in jEdit provides a view on a subset of Isabelle system
+  options. Note that some of these options affect general parameters that are
+  relevant outside Isabelle/jEdit as well, e.g.\ @{system_option threads} or
+  @{system_option parallel_proofs} for the Isabelle build tool @{cite
+  "isabelle-system"}, but it is possible to use the settings variable
+  @{setting ISABELLE_BUILD_OPTIONS} to change defaults for batch builds
+  without affecting Isabelle/jEdit.
 
   The jEdit action @{action_def isabelle.options} opens the options dialog for
   the Isabelle plugin; it can be mapped to editor GUI elements as usual.
@@ -193,24 +189,25 @@
   Isabelle/jEdit. Editing the machine-generated @{file_unchecked
   "$ISABELLE_HOME_USER/jedit/properties"} or @{file_unchecked
   "$ISABELLE_HOME_USER/etc/preferences"} manually while the application is
-  running is likely to cause surprise due to lost update!\<close>
+  running is likely to cause surprise due to lost update!
+\<close>
 
 
 subsection \<open>Keymaps\<close>
 
-text \<open>Keyboard shortcuts used to be managed as jEdit properties in
-  the past, but recent versions (2013) have a separate concept of
-  \<^emph>\<open>keymap\<close> that is configurable via \<^emph>\<open>Global Options~/
-  Shortcuts\<close>.  The \<^verbatim>\<open>imported\<close> keymap is derived from the
-  initial environment of properties that is available at the first
-  start of the editor; afterwards the keymap file takes precedence.
+text \<open>
+  Keyboard shortcuts used to be managed as jEdit properties in the past, but
+  recent versions (2013) have a separate concept of \<^emph>\<open>keymap\<close> that is
+  configurable via \<^emph>\<open>Global Options~/ Shortcuts\<close>. The \<^verbatim>\<open>imported\<close> keymap is
+  derived from the initial environment of properties that is available at the
+  first start of the editor; afterwards the keymap file takes precedence.
 
   This is relevant for Isabelle/jEdit due to various fine-tuning of default
   properties, and additional keyboard shortcuts for Isabelle-specific
   functionality. Users may change their keymap later, but need to copy some
   keyboard shortcuts manually (see also @{file_unchecked
-  "$ISABELLE_HOME_USER/jedit/keymaps"} versus \<^verbatim>\<open>shortcut\<close> properties
-  in @{file "$ISABELLE_HOME/src/Tools/jEdit/src/jEdit.props"}).
+  "$ISABELLE_HOME_USER/jedit/keymaps"} versus \<^verbatim>\<open>shortcut\<close> properties in @{file
+  "$ISABELLE_HOME/src/Tools/jEdit/src/jEdit.props"}).
 \<close>
 
 
@@ -239,33 +236,32 @@
   Start jEdit with Isabelle plugin setup and open FILES
   (default "$USER_HOME/Scratch.thy" or ":" for empty buffer).\<close>}
 
-  The \<^verbatim>\<open>-l\<close> option specifies the session name of the logic
-  image to be used for proof processing.  Additional session root
-  directories may be included via option \<^verbatim>\<open>-d\<close> to augment
-  that name space of @{tool build} @{cite "isabelle-system"}.
+  The \<^verbatim>\<open>-l\<close> option specifies the session name of the logic image to be used
+  for proof processing. Additional session root directories may be included
+  via option \<^verbatim>\<open>-d\<close> to augment that name space of @{tool build} @{cite
+  "isabelle-system"}.
 
-  By default, the specified image is checked and built on demand. The
-  \<^verbatim>\<open>-s\<close> option determines where to store the result session image
-  of @{tool build}. The \<^verbatim>\<open>-n\<close> option bypasses the implicit build
-  process for the selected session image.
+  By default, the specified image is checked and built on demand. The \<^verbatim>\<open>-s\<close>
+  option determines where to store the result session image of @{tool build}.
+  The \<^verbatim>\<open>-n\<close> option bypasses the implicit build process for the selected
+  session image.
 
-  The \<^verbatim>\<open>-m\<close> option specifies additional print modes for the prover
-  process. Note that the system option @{system_option_ref jedit_print_mode}
-  allows to do the same persistently (e.g.\ via the \<^emph>\<open>Plugin Options\<close>
-  dialog of Isabelle/jEdit), without requiring command-line invocation.
+  The \<^verbatim>\<open>-m\<close> option specifies additional print modes for the prover process.
+  Note that the system option @{system_option_ref jedit_print_mode} allows to
+  do the same persistently (e.g.\ via the \<^emph>\<open>Plugin Options\<close> dialog of
+  Isabelle/jEdit), without requiring command-line invocation.
 
-  The \<^verbatim>\<open>-J\<close> and \<^verbatim>\<open>-j\<close> options allow to pass additional
-  low-level options to the JVM or jEdit, respectively. The defaults are
-  provided by the Isabelle settings environment @{cite "isabelle-system"}, but
-  note that these only work for the command-line tool described here, and not
-  the regular application.
+  The \<^verbatim>\<open>-J\<close> and \<^verbatim>\<open>-j\<close> options allow to pass additional low-level options to
+  the JVM or jEdit, respectively. The defaults are provided by the Isabelle
+  settings environment @{cite "isabelle-system"}, but note that these only
+  work for the command-line tool described here, and not the regular
+  application.
 
-  The \<^verbatim>\<open>-b\<close> and \<^verbatim>\<open>-f\<close> options control the self-build
-  mechanism of Isabelle/jEdit. This is only relevant for building from
-  sources, which also requires an auxiliary \<^verbatim>\<open>jedit_build\<close> component
-  from @{url "http://isabelle.in.tum.de/components"}. The official
-  Isabelle release already includes a pre-built version of Isabelle/jEdit.
-\<close>
+  The \<^verbatim>\<open>-b\<close> and \<^verbatim>\<open>-f\<close> options control the self-build mechanism of
+  Isabelle/jEdit. This is only relevant for building from sources, which also
+  requires an auxiliary \<^verbatim>\<open>jedit_build\<close> component from @{url
+  "http://isabelle.in.tum.de/components"}. The official Isabelle release
+  already includes a pre-built version of Isabelle/jEdit. \<close>
 
 
 chapter \<open>Augmented jEdit functionality\<close>
@@ -283,39 +279,35 @@
   Isabelle/jEdit enables platform-specific look-and-feel by default as
   follows.
 
-  \<^descr>[Linux:] The platform-independent \<^emph>\<open>Metal\<close> is used by
-  default.
+    \<^descr>[Linux:] The platform-independent \<^emph>\<open>Metal\<close> is used by default.
 
-  \<^emph>\<open>GTK+\<close> also works under the side-condition that the overall GTK theme
-  is selected in a Swing-friendly way.\footnote{GTK support in Java/Swing was
-  once marketed aggressively by Sun, but never quite finished. Today (2015) it
-  is lagging behind further development of Swing and GTK. The graphics
-  rendering performance can be worse than for other Swing look-and-feels.
-  Nonetheless it has its uses for displays with very high resolution (such as
-  ``4K'' or ``UHD'' models), because the rendering by the external library is
-  subject to global system settings for font scaling.}
+    \<^emph>\<open>GTK+\<close> also works under the side-condition that the overall GTK theme is
+    selected in a Swing-friendly way.\<^footnote>\<open>GTK support in Java/Swing was once
+    marketed aggressively by Sun, but never quite finished. Today (2015) it is
+    lagging behind further development of Swing and GTK. The graphics
+    rendering performance can be worse than for other Swing look-and-feels.
+    Nonetheless it has its uses for displays with very high resolution (such
+    as ``4K'' or ``UHD'' models), because the rendering by the external
+    library is subject to global system settings for font scaling.\<close>
 
-  \<^descr>[Windows:] Regular \<^emph>\<open>Windows\<close> is used by default, but
-  \<^emph>\<open>Windows Classic\<close> also works.
-
-  \<^descr>[Mac OS X:] Regular \<^emph>\<open>Mac OS X\<close> is used by default.
+    \<^descr>[Windows:] Regular \<^emph>\<open>Windows\<close> is used by default, but \<^emph>\<open>Windows Classic\<close>
+    also works.
 
-  The bundled \<^emph>\<open>MacOSX\<close> plugin provides various functions that are
-  expected from applications on that particular platform: quit from menu or
-  dock, preferences menu, drag-and-drop of text files on the application,
-  full-screen mode for main editor windows. It is advisable to have the
-  \<^emph>\<open>MacOSX\<close> plugin enabled all the time on that platform.
+    \<^descr>[Mac OS X:] Regular \<^emph>\<open>Mac OS X\<close> is used by default.
 
+    The bundled \<^emph>\<open>MacOSX\<close> plugin provides various functions that are expected
+    from applications on that particular platform: quit from menu or dock,
+    preferences menu, drag-and-drop of text files on the application,
+    full-screen mode for main editor windows. It is advisable to have the
+    \<^emph>\<open>MacOSX\<close> plugin enabled all the time on that platform.
 
-  Users may experiment with different look-and-feels, but need to keep
-  in mind that this extra variance of GUI functionality is unlikely to
-  work in arbitrary combinations.  The platform-independent
-  \<^emph>\<open>Metal\<close> and \<^emph>\<open>Nimbus\<close> should always work.  The historic
-  \<^emph>\<open>CDE/Motif\<close> should be ignored.
+  Users may experiment with different look-and-feels, but need to keep in mind
+  that this extra variance of GUI functionality is unlikely to work in
+  arbitrary combinations. The platform-independent \<^emph>\<open>Metal\<close> and \<^emph>\<open>Nimbus\<close>
+  should always work. The historic \<^emph>\<open>CDE/Motif\<close> should be ignored.
 
-  After changing the look-and-feel in \<^emph>\<open>Global Options~/
-  Appearance\<close>, it is advisable to restart Isabelle/jEdit in order to
-  take full effect.
+  After changing the look-and-feel in \<^emph>\<open>Global Options~/ Appearance\<close>, it is
+  advisable to restart Isabelle/jEdit in order to take full effect.
 \<close>
 
 
@@ -326,49 +318,47 @@
   were considered ``high resolution'' and bitmap fonts with 12 or 14 pixels as
   adequate for text rendering. Today (2015), we routinely see ``Full HD''
   monitors at $1920 \times 1080$ pixels, and occasionally ``Ultra HD'' at
-  $3840 \times 2160$ or more, but GUI rendering did not really progress
-  beyond the old standards.
+  $3840 \times 2160$ or more, but GUI rendering did not really progress beyond
+  the old standards.
 
   Isabelle/jEdit defaults are a compromise for reasonable out-of-the box
   results on common platforms and medium resolution displays (e.g.\ the ``Full
   HD'' category). Subsequently there are further hints to improve on that.
 
   \<^medskip>
-  The \<^bold>\<open>operating-system platform\<close> usually provides some
-  configuration for global scaling of text fonts, e.g.\ $120\%$--$250\%$ on
-  Windows. Changing that only has a partial effect on GUI rendering;
-  satisfactory display quality requires further adjustments.
+  The \<^bold>\<open>operating-system platform\<close> usually provides some configuration for
+  global scaling of text fonts, e.g.\ $120\%$--$250\%$ on Windows. Changing
+  that only has a partial effect on GUI rendering; satisfactory display
+  quality requires further adjustments.
 
   \<^medskip>
-  The Isabelle/jEdit \<^bold>\<open>application\<close> and its plugins provide
-  various font properties that are summarized below.
+  The Isabelle/jEdit \<^bold>\<open>application\<close> and its plugins provide various font
+  properties that are summarized below.
 
-  \<^item> \<^emph>\<open>Global Options / Text Area / Text font\<close>: the main text area
-  font, which is also used as reference point for various derived font sizes,
-  e.g.\ the Output panel (\secref{sec:output}).
+    \<^item> \<^emph>\<open>Global Options / Text Area / Text font\<close>: the main text area font,
+    which is also used as reference point for various derived font sizes,
+    e.g.\ the Output panel (\secref{sec:output}).
 
-  \<^item> \<^emph>\<open>Global Options / Gutter / Gutter font\<close>: the font for the gutter
-  area left of the main text area, e.g.\ relevant for display of line numbers
-  (disabled by default).
+    \<^item> \<^emph>\<open>Global Options / Gutter / Gutter font\<close>: the font for the gutter area
+    left of the main text area, e.g.\ relevant for display of line numbers
+    (disabled by default).
 
-  \<^item> \<^emph>\<open>Global Options / Appearance / Button, menu and label font\<close> as
-  well as \<^emph>\<open>List and text field font\<close>: this specifies the primary and
-  secondary font for the traditional \<^emph>\<open>Metal\<close> look-and-feel
-  (\secref{sec:look-and-feel}), which happens to scale better than newer ones
-  like \<^emph>\<open>Nimbus\<close>.
+    \<^item> \<^emph>\<open>Global Options / Appearance / Button, menu and label font\<close> as well as
+    \<^emph>\<open>List and text field font\<close>: this specifies the primary and secondary font
+    for the traditional \<^emph>\<open>Metal\<close> look-and-feel (\secref{sec:look-and-feel}),
+    which happens to scale better than newer ones like \<^emph>\<open>Nimbus\<close>.
 
-  \<^item> \<^emph>\<open>Plugin Options / Isabelle / General / Reset Font Size\<close>: the main
-  text area font size for action @{action_ref "isabelle.reset-font-size"},
-  e.g.\ relevant for quick scaling like in major web browsers.
+    \<^item> \<^emph>\<open>Plugin Options / Isabelle / General / Reset Font Size\<close>: the main text
+    area font size for action @{action_ref "isabelle.reset-font-size"}, e.g.\
+    relevant for quick scaling like in major web browsers.
 
-  \<^item> \<^emph>\<open>Plugin Options / Console / General / Font\<close>: the console window
-  font, e.g.\ relevant for Isabelle/Scala command-line.
-
+    \<^item> \<^emph>\<open>Plugin Options / Console / General / Font\<close>: the console window font,
+    e.g.\ relevant for Isabelle/Scala command-line.
 
-  In \figref{fig:isabelle-jedit-hdpi} the \<^emph>\<open>Metal\<close> look-and-feel is
-  configured with custom fonts at 30 pixels, and the main text area and
-  console at 36 pixels. Despite the old-fashioned appearance of \<^emph>\<open>Metal\<close>,
-  this leads to decent rendering quality on all platforms.
+  In \figref{fig:isabelle-jedit-hdpi} the \<^emph>\<open>Metal\<close> look-and-feel is configured
+  with custom fonts at 30 pixels, and the main text area and console at 36
+  pixels. Despite the old-fashioned appearance of \<^emph>\<open>Metal\<close>, this leads to
+  decent rendering quality on all platforms.
 
   \begin{figure}[htb]
   \begin{center}
@@ -387,25 +377,25 @@
 section \<open>Dockable windows \label{sec:dockables}\<close>
 
 text \<open>
-  In jEdit terminology, a \<^emph>\<open>view\<close> is an editor window with one or more
-  \<^emph>\<open>text areas\<close> that show the content of one or more \<^emph>\<open>buffers\<close>. A
-  regular view may be surrounded by \<^emph>\<open>dockable windows\<close> that show
-  additional information in arbitrary format, not just text; a \<^emph>\<open>plain
-  view\<close> does not allow dockables. The \<^emph>\<open>dockable window manager\<close> of jEdit
-  organizes these dockable windows, either as \<^emph>\<open>floating\<close> windows, or
-  \<^emph>\<open>docked\<close> panels within one of the four margins of the view. There may
-  be any number of floating instances of some dockable window, but at most one
-  docked instance; jEdit actions that address \<^emph>\<open>the\<close> dockable window of a
-  particular kind refer to the unique docked instance.
+  In jEdit terminology, a \<^emph>\<open>view\<close> is an editor window with one or more \<^emph>\<open>text
+  areas\<close> that show the content of one or more \<^emph>\<open>buffers\<close>. A regular view may
+  be surrounded by \<^emph>\<open>dockable windows\<close> that show additional information in
+  arbitrary format, not just text; a \<^emph>\<open>plain view\<close> does not allow dockables.
+  The \<^emph>\<open>dockable window manager\<close> of jEdit organizes these dockable windows,
+  either as \<^emph>\<open>floating\<close> windows, or \<^emph>\<open>docked\<close> panels within one of the four
+  margins of the view. There may be any number of floating instances of some
+  dockable window, but at most one docked instance; jEdit actions that address
+  \<^emph>\<open>the\<close> dockable window of a particular kind refer to the unique docked
+  instance.
 
   Dockables are used routinely in jEdit for important functionality like
-  \<^emph>\<open>HyperSearch Results\<close> or the \<^emph>\<open>File System Browser\<close>. Plugins often
-  provide a central dockable to access their key functionality, which may be
-  opened by the user on demand. The Isabelle/jEdit plugin takes this approach
-  to the extreme: its plugin menu provides the entry-points to many panels
-  that are managed as dockable windows. Some important panels are docked by
-  default, e.g.\ \<^emph>\<open>Documentation\<close>, \<^emph>\<open>Output\<close>, \<^emph>\<open>Query\<close>, but the
-  user can change this arrangement easily and persistently.
+  \<^emph>\<open>HyperSearch Results\<close> or the \<^emph>\<open>File System Browser\<close>. Plugins often provide
+  a central dockable to access their key functionality, which may be opened by
+  the user on demand. The Isabelle/jEdit plugin takes this approach to the
+  extreme: its plugin menu provides the entry-points to many panels that are
+  managed as dockable windows. Some important panels are docked by default,
+  e.g.\ \<^emph>\<open>Documentation\<close>, \<^emph>\<open>Output\<close>, \<^emph>\<open>Query\<close>, but the user can change this
+  arrangement easily and persistently.
 
   Compared to plain jEdit, dockable window management in Isabelle/jEdit is
   slightly augmented according to the the following principles:
@@ -439,19 +429,18 @@
   Isabelle sources consist of \<^emph>\<open>symbols\<close> that extend plain ASCII to allow
   infinitely many mathematical symbols within the formal sources. This works
   without depending on particular encodings and varying Unicode
-  standards.\footnote{Raw Unicode characters within formal sources would
-  compromise portability and reliability in the face of changing
-  interpretation of special features of Unicode, such as Combining Characters
-  or Bi-directional Text.} See also @{cite "Wenzel:2011:CICM"}.
+  standards.\<^footnote>\<open>Raw Unicode characters within formal sources would compromise
+  portability and reliability in the face of changing interpretation of
+  special features of Unicode, such as Combining Characters or Bi-directional
+  Text.\<close> See also @{cite "Wenzel:2011:CICM"}.
 
   For the prover back-end, formal text consists of ASCII characters that are
-  grouped according to some simple rules, e.g.\ as plain ``\<^verbatim>\<open>a\<close>'' or
-  symbolic ``\<^verbatim>\<open>\<alpha>\<close>''. For the editor front-end, a certain subset of
-  symbols is rendered physically via Unicode glyphs, in order to show
-  ``\<^verbatim>\<open>\<alpha>\<close>'' as ``\<open>\<alpha>\<close>'', for example. This symbol
-  interpretation is specified by the Isabelle system distribution in @{file
-  "$ISABELLE_HOME/etc/symbols"} and may be augmented by the user in
-  @{file_unchecked "$ISABELLE_HOME_USER/etc/symbols"}.
+  grouped according to some simple rules, e.g.\ as plain ``\<^verbatim>\<open>a\<close>'' or symbolic
+  ``\<^verbatim>\<open>\<alpha>\<close>''. For the editor front-end, a certain subset of symbols is rendered
+  physically via Unicode glyphs, in order to show ``\<^verbatim>\<open>\<alpha>\<close>'' as ``\<open>\<alpha>\<close>'', for
+  example. This symbol interpretation is specified by the Isabelle system
+  distribution in @{file "$ISABELLE_HOME/etc/symbols"} and may be augmented by
+  the user in @{file_unchecked "$ISABELLE_HOME_USER/etc/symbols"}.
 
   The appendix of @{cite "isabelle-isar-ref"} gives an overview of the
   standard interpretation of finitely many symbols from the infinite
@@ -484,13 +473,13 @@
 
   Note that a Java/AWT/Swing application can load additional fonts only if
   they are not installed on the operating system already! Some outdated
-  version of \<^verbatim>\<open>IsabelleText\<close> that happens to be provided by the
-  operating system would prevent Isabelle/jEdit to use its bundled version.
-  This could lead to missing glyphs (black rectangles), when the system
-  version of \<^verbatim>\<open>IsabelleText\<close> is older than the application version.
-  This problem can be avoided by refraining to ``install'' any version of
-  \<^verbatim>\<open>IsabelleText\<close> in the first place, although it is occasionally
-  tempting to use the same font in other applications.
+  version of \<^verbatim>\<open>IsabelleText\<close> that happens to be provided by the operating
+  system would prevent Isabelle/jEdit to use its bundled version. This could
+  lead to missing glyphs (black rectangles), when the system version of
+  \<^verbatim>\<open>IsabelleText\<close> is older than the application version. This problem can be
+  avoided by refraining to ``install'' any version of \<^verbatim>\<open>IsabelleText\<close> in the
+  first place, although it is occasionally tempting to use the same font in
+  other applications.
 \<close>
 
 paragraph \<open>Input methods.\<close>
@@ -548,17 +537,16 @@
     \<open>\<notin>\<close> & \<^verbatim>\<open>\notin\<close> & \<^verbatim>\<open>~:\<close> \\
   \end{tabular}
   \<^medskip>
- 
+
   Note that the above abbreviations refer to the input method. The logical
   notation provides ASCII alternatives that often coincide, but sometimes
   deviate. This occasionally causes user confusion with very old-fashioned
-  Isabelle source that use ASCII replacement notation like \<^verbatim>\<open>!\<close> or
-  \<^verbatim>\<open>ALL\<close> directly in the text.
+  Isabelle source that use ASCII replacement notation like \<^verbatim>\<open>!\<close> or \<^verbatim>\<open>ALL\<close>
+  directly in the text.
 
   On the other hand, coincidence of symbol abbreviations with ASCII
-  replacement syntax syntax helps to update old theory sources via
-  explicit completion (see also \<^verbatim>\<open>C+b\<close> explained in
-  \secref{sec:completion}).
+  replacement syntax syntax helps to update old theory sources via explicit
+  completion (see also \<^verbatim>\<open>C+b\<close> explained in \secref{sec:completion}).
 \<close>
 
 paragraph \<open>Control symbols.\<close>
@@ -596,8 +584,7 @@
 
   Isabelle/jEdit provides SideKick parsers for its main mode for theory files,
   as well as some minor modes for the \<^verbatim>\<open>NEWS\<close> file (see
-  \figref{fig:sidekick}), session \<^verbatim>\<open>ROOT\<close> files, and system
-  \<^verbatim>\<open>options\<close>.
+  \figref{fig:sidekick}), session \<^verbatim>\<open>ROOT\<close> files, and system \<^verbatim>\<open>options\<close>.
 
   \begin{figure}[htb]
   \begin{center}
@@ -607,35 +594,33 @@
   \label{fig:sidekick}
   \end{figure}
 
-  Moreover, the special SideKick parser \<^verbatim>\<open>isabelle-markup\<close>
-  provides access to the full (uninterpreted) markup tree of the PIDE
-  document model of the current buffer.  This is occasionally useful
-  for informative purposes, but the amount of displayed information
-  might cause problems for large buffers, both for the human and the
-  machine.
+  Moreover, the special SideKick parser \<^verbatim>\<open>isabelle-markup\<close> provides access to
+  the full (uninterpreted) markup tree of the PIDE document model of the
+  current buffer. This is occasionally useful for informative purposes, but
+  the amount of displayed information might cause problems for large buffers,
+  both for the human and the machine.
 \<close>
 
 
 section \<open>Scala console \label{sec:scala-console}\<close>
 
 text \<open>
-  The \<^emph>\<open>Console\<close> plugin manages various shells (command interpreters),
-  e.g.\ \<^emph>\<open>BeanShell\<close>, which is the official jEdit scripting language, and
-  the cross-platform \<^emph>\<open>System\<close> shell. Thus the console provides similar
-  functionality than the Emacs buffers \<^verbatim>\<open>*scratch*\<close> and
-  \<^verbatim>\<open>*shell*\<close>.
+  The \<^emph>\<open>Console\<close> plugin manages various shells (command interpreters), e.g.\
+  \<^emph>\<open>BeanShell\<close>, which is the official jEdit scripting language, and the
+  cross-platform \<^emph>\<open>System\<close> shell. Thus the console provides similar
+  functionality than the Emacs buffers \<^verbatim>\<open>*scratch*\<close> and \<^verbatim>\<open>*shell*\<close>.
 
-  Isabelle/jEdit extends the repertoire of the console by \<^emph>\<open>Scala\<close>, which
-  is the regular Scala toplevel loop running inside the same JVM process as
+  Isabelle/jEdit extends the repertoire of the console by \<^emph>\<open>Scala\<close>, which is
+  the regular Scala toplevel loop running inside the same JVM process as
   Isabelle/jEdit itself. This means the Scala command interpreter has access
   to the JVM name space and state of the running Prover IDE application. The
   default environment imports the full content of packages \<^verbatim>\<open>isabelle\<close> and
   \<^verbatim>\<open>isabelle.jedit\<close>.
 
-  For example, \<^verbatim>\<open>PIDE\<close> refers to the Isabelle/jEdit plugin object,
-  and \<^verbatim>\<open>view\<close> to the current editor view of jEdit. The Scala
-  expression \<^verbatim>\<open>PIDE.snapshot(view)\<close> makes a PIDE document snapshot
-  of the current buffer within the current editor view.
+  For example, \<^verbatim>\<open>PIDE\<close> refers to the Isabelle/jEdit plugin object, and \<^verbatim>\<open>view\<close>
+  to the current editor view of jEdit. The Scala expression
+  \<^verbatim>\<open>PIDE.snapshot(view)\<close> makes a PIDE document snapshot of the current buffer
+  within the current editor view.
 
   This helps to explore Isabelle/Scala functionality interactively. Some care
   is required to avoid interference with the internals of the running
@@ -649,10 +634,10 @@
   File specifications in jEdit follow various formats and conventions
   according to \<^emph>\<open>Virtual File Systems\<close>, which may be also provided by
   additional plugins. This allows to access remote files via the \<^verbatim>\<open>http:\<close>
-  protocol prefix, for example. Isabelle/jEdit attempts to work with
-  the file-system model of jEdit as far as possible. In particular, theory
-  sources are passed directly from the editor to the prover, without
-  indirection via physical files.
+  protocol prefix, for example. Isabelle/jEdit attempts to work with the
+  file-system model of jEdit as far as possible. In particular, theory sources
+  are passed directly from the editor to the prover, without indirection via
+  physical files.
 
   Despite the flexibility of URLs in jEdit, local files are particularly
   important and are accessible without protocol prefix. Here the path notation
@@ -663,12 +648,11 @@
 
   The Java notation for files needs to be distinguished from the one of
   Isabelle, which uses POSIX notation with forward slashes on \<^emph>\<open>all\<close>
-  platforms.\footnote{Isabelle/ML on Windows uses Cygwin file-system access
-  and Unix-style path notation.} Moreover, environment variables from the
-  Isabelle process may be used freely, e.g.\ @{file
-  "$ISABELLE_HOME/etc/symbols"} or @{file_unchecked "$POLYML_HOME/README"}.
-  There are special shortcuts: @{file "~"} for @{file "$USER_HOME"} and @{file
-  "~~"} for @{file "$ISABELLE_HOME"}.
+  platforms.\<^footnote>\<open>Isabelle/ML on Windows uses Cygwin file-system access and
+  Unix-style path notation.\<close> Moreover, environment variables from the Isabelle
+  process may be used freely, e.g.\ @{file "$ISABELLE_HOME/etc/symbols"} or
+  @{file_unchecked "$POLYML_HOME/README"}. There are special shortcuts: @{file
+  "~"} for @{file "$USER_HOME"} and @{file "~~"} for @{file "$ISABELLE_HOME"}.
 
   \<^medskip>
   Since jEdit happens to support environment variables within file
@@ -681,20 +665,19 @@
   (\secref{sec:command-line}).
 
   Isabelle/jEdit imitates \<^verbatim>\<open>$ISABELLE_HOME\<close> and \<^verbatim>\<open>$ISABELLE_HOME_USER\<close> within
-  the Java process environment, in order to
-  allow easy access to these important places from the editor. The file
-  browser of jEdit also includes \<^emph>\<open>Favorites\<close> for these two important
-  locations.
+  the Java process environment, in order to allow easy access to these
+  important places from the editor. The file browser of jEdit also includes
+  \<^emph>\<open>Favorites\<close> for these two important locations.
 
   \<^medskip>
-  Path specifications in prover input or output usually include
-  formal markup that turns it into a hyperlink (see also
-  \secref{sec:tooltips-hyperlinks}). This allows to open the corresponding
-  file in the text editor, independently of the path notation.
+  Path specifications in prover input or output usually include formal markup
+  that turns it into a hyperlink (see also \secref{sec:tooltips-hyperlinks}).
+  This allows to open the corresponding file in the text editor, independently
+  of the path notation.
 
   Formally checked paths in prover input are subject to completion
-  (\secref{sec:completion}): partial specifications are resolved via
-  directory content and possible completions are offered in a popup.
+  (\secref{sec:completion}): partial specifications are resolved via directory
+  content and possible completions are offered in a popup.
 \<close>
 
 
@@ -720,9 +703,9 @@
 text \<open>
   As a regular text editor, jEdit maintains a collection of \<^emph>\<open>buffers\<close> to
   store text files; each buffer may be associated with any number of visible
-  \<^emph>\<open>text areas\<close>. Buffers are subject to an \<^emph>\<open>edit mode\<close> that is
-  determined from the file name extension. The following modes are treated
-  specifically in Isabelle/jEdit:
+  \<^emph>\<open>text areas\<close>. Buffers are subject to an \<^emph>\<open>edit mode\<close> that is determined
+  from the file name extension. The following modes are treated specifically
+  in Isabelle/jEdit:
 
   \<^medskip>
   \begin{tabular}{lll}
@@ -734,17 +717,16 @@
   \<^medskip>
 
   All jEdit buffers are automatically added to the PIDE document-model as
-  \<^emph>\<open>document nodes\<close>. The overall document structure is defined by the
-  theory nodes in two dimensions:
+  \<^emph>\<open>document nodes\<close>. The overall document structure is defined by the theory
+  nodes in two dimensions:
 
-  \<^enum> via \<^bold>\<open>theory imports\<close> that are specified in the \<^emph>\<open>theory
-  header\<close> using concrete syntax of the @{command_ref theory} command
-  @{cite "isabelle-isar-ref"};
+    \<^enum> via \<^bold>\<open>theory imports\<close> that are specified in the \<^emph>\<open>theory header\<close> using
+    concrete syntax of the @{command_ref theory} command @{cite
+    "isabelle-isar-ref"};
 
-  \<^enum> via \<^bold>\<open>auxiliary files\<close> that are loaded into a theory by special
-  \<^emph>\<open>load commands\<close>, notably @{command_ref ML_file} and @{command_ref
-  SML_file} @{cite "isabelle-isar-ref"}.
-
+    \<^enum> via \<^bold>\<open>auxiliary files\<close> that are loaded into a theory by special \<^emph>\<open>load
+    commands\<close>, notably @{command_ref ML_file} and @{command_ref SML_file}
+    @{cite "isabelle-isar-ref"}.
 
   In any case, source files are managed by the PIDE infrastructure: the
   physical file-system only plays a subordinate role. The relevant version of
@@ -756,12 +738,12 @@
 subsection \<open>Theories \label{sec:theories}\<close>
 
 text \<open>
-  The \<^emph>\<open>Theories\<close> panel (see also \figref{fig:theories}) provides an
-  overview of the status of continuous checking of theory nodes within the
-  document model. Unlike batch sessions of @{tool build} @{cite
-  "isabelle-system"}, theory nodes are identified by full path names; this allows
-  to work with multiple (disjoint) Isabelle sessions simultaneously within the
-  same editor session.
+  The \<^emph>\<open>Theories\<close> panel (see also \figref{fig:theories}) provides an overview
+  of the status of continuous checking of theory nodes within the document
+  model. Unlike batch sessions of @{tool build} @{cite "isabelle-system"},
+  theory nodes are identified by full path names; this allows to work with
+  multiple (disjoint) Isabelle sessions simultaneously within the same editor
+  session.
 
   \begin{figure}[htb]
   \begin{center}
@@ -780,38 +762,36 @@
   @{system_option jedit_auto_load}.
 
   \<^medskip>
-  The visible \<^emph>\<open>perspective\<close> of Isabelle/jEdit is defined by the
-  collective view on theory buffers via open text areas. The perspective is
-  taken as a hint for document processing: the prover ensures that those parts
-  of a theory where the user is looking are checked, while other parts that
-  are presently not required are ignored. The perspective is changed by
-  opening or closing text area windows, or scrolling within a window.
+  The visible \<^emph>\<open>perspective\<close> of Isabelle/jEdit is defined by the collective
+  view on theory buffers via open text areas. The perspective is taken as a
+  hint for document processing: the prover ensures that those parts of a
+  theory where the user is looking are checked, while other parts that are
+  presently not required are ignored. The perspective is changed by opening or
+  closing text area windows, or scrolling within a window.
 
-  The \<^emph>\<open>Theories\<close> panel provides some further options to influence
-  the process of continuous checking: it may be switched off globally
-  to restrict the prover to superficial processing of command syntax.
-  It is also possible to indicate theory nodes as \<^emph>\<open>required\<close> for
-  continuous checking: this means such nodes and all their imports are
-  always processed independently of the visibility status (if
-  continuous checking is enabled).  Big theory libraries that are
-  marked as required can have significant impact on performance,
+  The \<^emph>\<open>Theories\<close> panel provides some further options to influence the process
+  of continuous checking: it may be switched off globally to restrict the
+  prover to superficial processing of command syntax. It is also possible to
+  indicate theory nodes as \<^emph>\<open>required\<close> for continuous checking: this means
+  such nodes and all their imports are always processed independently of the
+  visibility status (if continuous checking is enabled). Big theory libraries
+  that are marked as required can have significant impact on performance,
   though.
 
   \<^medskip>
-  Formal markup of checked theory content is turned into GUI
-  rendering, based on a standard repertoire known from IDEs for programming
-  languages: colors, icons, highlighting, squiggly underlines, tooltips,
-  hyperlinks etc. For outer syntax of Isabelle/Isar there is some traditional
-  syntax-highlighting via static keywords and tokenization within the editor;
-  this buffer syntax is determined from theory imports. In contrast, the
-  painting of inner syntax (term language etc.)\ uses semantic information
-  that is reported dynamically from the logical context. Thus the prover can
-  provide additional markup to help the user to understand the meaning of
-  formal text, and to produce more text with some add-on tools (e.g.\
-  information messages with \<^emph>\<open>sendback\<close> markup by automated provers or
-  disprovers in the background).
+  Formal markup of checked theory content is turned into GUI rendering, based
+  on a standard repertoire known from IDEs for programming languages: colors,
+  icons, highlighting, squiggly underlines, tooltips, hyperlinks etc. For
+  outer syntax of Isabelle/Isar there is some traditional syntax-highlighting
+  via static keywords and tokenization within the editor; this buffer syntax
+  is determined from theory imports. In contrast, the painting of inner syntax
+  (term language etc.)\ uses semantic information that is reported dynamically
+  from the logical context. Thus the prover can provide additional markup to
+  help the user to understand the meaning of formal text, and to produce more
+  text with some add-on tools (e.g.\ information messages with \<^emph>\<open>sendback\<close>
+  markup by automated provers or disprovers in the background).
+\<close>
 
-\<close>
 
 subsection \<open>Auxiliary files \label{sec:aux-files}\<close>
 
@@ -826,14 +806,13 @@
   treated as changes of the corresponding load command.
 
   \<^medskip>
-  As a concession to the massive amount of ML files in Isabelle/HOL
-  itself, the content of auxiliary files is only added to the PIDE
-  document-model on demand, the first time when opened explicitly in the
-  editor. There are further tricks to manage markup of ML files, such that
-  Isabelle/HOL may be edited conveniently in the Prover IDE on small machines
-  with only 8\,GB of main memory. Using \<^verbatim>\<open>Pure\<close> as logic session
-  image, the exploration may start at the top @{file
-  "$ISABELLE_HOME/src/HOL/Main.thy"} or the bottom @{file
+  As a concession to the massive amount of ML files in Isabelle/HOL itself,
+  the content of auxiliary files is only added to the PIDE document-model on
+  demand, the first time when opened explicitly in the editor. There are
+  further tricks to manage markup of ML files, such that Isabelle/HOL may be
+  edited conveniently in the Prover IDE on small machines with only 8\,GB of
+  main memory. Using \<^verbatim>\<open>Pure\<close> as logic session image, the exploration may start
+  at the top @{file "$ISABELLE_HOME/src/HOL/Main.thy"} or the bottom @{file
   "$ISABELLE_HOME/src/HOL/HOL.thy"}, for example.
 
   Initially, before an auxiliary file is opened in the editor, the prover
@@ -853,30 +832,30 @@
   morally unsupported and might lead to confusion.
 
   \<^medskip>
-  Output that refers to an auxiliary file is combined with that of
-  the corresponding load command, and shown whenever the file or the command
-  are active (see also \secref{sec:output}).
+  Output that refers to an auxiliary file is combined with that of the
+  corresponding load command, and shown whenever the file or the command are
+  active (see also \secref{sec:output}).
 
   Warnings, errors, and other useful markup is attached directly to the
   positions in the auxiliary file buffer, in the manner of other well-known
   IDEs. By using the load command @{command SML_file} as explained in @{file
   "$ISABELLE_HOME/src/Tools/SML/Examples.thy"}, Isabelle/jEdit may be used as
   fully-featured IDE for Standard ML, independently of theory or proof
-  development: the required theory merely serves as some kind of project
-  file for a collection of SML source modules.
+  development: the required theory merely serves as some kind of project file
+  for a collection of SML source modules.
 \<close>
 
 
 section \<open>Output \label{sec:output}\<close>
 
 text \<open>
-  Prover output consists of \<^emph>\<open>markup\<close> and \<^emph>\<open>messages\<close>. Both are
-  directly attached to the corresponding positions in the original source
-  text, and visualized in the text area, e.g.\ as text colours for free and
-  bound variables, or as squiggly underlines for warnings, errors etc.\ (see
-  also \figref{fig:output}). In the latter case, the corresponding messages
-  are shown by hovering with the mouse over the highlighted text --- although
-  in many situations the user should already get some clue by looking at the
+  Prover output consists of \<^emph>\<open>markup\<close> and \<^emph>\<open>messages\<close>. Both are directly
+  attached to the corresponding positions in the original source text, and
+  visualized in the text area, e.g.\ as text colours for free and bound
+  variables, or as squiggly underlines for warnings, errors etc.\ (see also
+  \figref{fig:output}). In the latter case, the corresponding messages are
+  shown by hovering with the mouse over the highlighted text --- although in
+  many situations the user should already get some clue by looking at the
   position of the text highlighting, without the text itself.
 
   \begin{figure}[htb]
@@ -888,11 +867,10 @@
   \label{fig:output}
   \end{figure}
 
-  The ``gutter area'' on the left-hand-side of the text area uses
-  icons to provide a summary of the messages within the adjacent
-  line of text.  Message priorities are used to prefer errors over
-  warnings, warnings over information messages, but plain output is
-  ignored.
+  The ``gutter area'' on the left-hand-side of the text area uses icons to
+  provide a summary of the messages within the adjacent line of text. Message
+  priorities are used to prefer errors over warnings, warnings over
+  information messages, but plain output is ignored.
 
   The ``overview area'' on the right-hand-side of the text area uses similar
   information to paint small rectangles for the overall status of the whole
@@ -900,16 +878,14 @@
   the given window height. Mouse clicks on the overview area position the
   cursor approximately to the corresponding line of text in the buffer.
 
-  Another course-grained overview is provided by the \<^emph>\<open>Theories\<close>
-  panel, but without direct correspondence to text positions.  A
-  double-click on one of the theory entries with their status overview
-  opens the corresponding text buffer, without changing the cursor
-  position.
+  Another course-grained overview is provided by the \<^emph>\<open>Theories\<close> panel, but
+  without direct correspondence to text positions. A double-click on one of
+  the theory entries with their status overview opens the corresponding text
+  buffer, without changing the cursor position.
 
   \<^medskip>
-  In addition, the \<^emph>\<open>Output\<close> panel displays prover
-  messages that correspond to a given command, within a separate
-  window.
+  In addition, the \<^emph>\<open>Output\<close> panel displays prover messages that correspond to
+  a given command, within a separate window.
 
   The cursor position in the presently active text area determines the prover
   command whose cumulative message output is appended and shown in that window
@@ -925,36 +901,34 @@
   possible to do meaningful proof editing within the primary text area and its
   markup, while using secondary output windows only rarely.
 
-  The main purpose of the output window is to ``debug'' unclear
-  situations by inspecting internal state of the prover.\footnote{In
-  that sense, unstructured tactic scripts depend on continuous
-  debugging with internal state inspection.} Consequently, some
-  special messages for \<^emph>\<open>tracing\<close> or \<^emph>\<open>proof state\<close> only
+  The main purpose of the output window is to ``debug'' unclear situations by
+  inspecting internal state of the prover.\<^footnote>\<open>In that sense, unstructured tactic
+  scripts depend on continuous debugging with internal state inspection.\<close>
+  Consequently, some special messages for \<^emph>\<open>tracing\<close> or \<^emph>\<open>proof state\<close> only
   appear here, and are not attached to the original source.
 
   \<^medskip>
-  In any case, prover messages also contain markup that may
-  be explored recursively via tooltips or hyperlinks (see
-  \secref{sec:tooltips-hyperlinks}), or clicked directly to initiate
-  certain actions (see \secref{sec:auto-tools} and
-  \secref{sec:sledgehammer}).\<close>
+  In any case, prover messages also contain markup that may be explored
+  recursively via tooltips or hyperlinks (see
+  \secref{sec:tooltips-hyperlinks}), or clicked directly to initiate certain
+  actions (see \secref{sec:auto-tools} and \secref{sec:sledgehammer}).
+\<close>
 
 
 section \<open>Query \label{sec:query}\<close>
 
 text \<open>
-  The \<^emph>\<open>Query\<close> panel provides various GUI forms to request extra
-  information from the prover. In old times the user would have issued some
-  diagnostic command like @{command find_theorems} and inspected its output,
-  but this is now integrated into the Prover IDE.
+  The \<^emph>\<open>Query\<close> panel provides various GUI forms to request extra information
+  from the prover. In old times the user would have issued some diagnostic
+  command like @{command find_theorems} and inspected its output, but this is
+  now integrated into the Prover IDE.
 
-  A \<^emph>\<open>Query\<close> window provides some input fields and buttons for a
-  particular query command, with output in a dedicated text area. There are
-  various query modes: \<^emph>\<open>Find Theorems\<close>, \<^emph>\<open>Find Constants\<close>,
-  \<^emph>\<open>Print Context\<close>, e.g.\ see \figref{fig:query}. As usual in jEdit,
-  multiple \<^emph>\<open>Query\<close> windows may be active at the same time: any number of
-  floating instances, but at most one docked instance (which is used by
-  default).
+  A \<^emph>\<open>Query\<close> window provides some input fields and buttons for a particular
+  query command, with output in a dedicated text area. There are various query
+  modes: \<^emph>\<open>Find Theorems\<close>, \<^emph>\<open>Find Constants\<close>, \<^emph>\<open>Print Context\<close>, e.g.\ see
+  \figref{fig:query}. As usual in jEdit, multiple \<^emph>\<open>Query\<close> windows may be
+  active at the same time: any number of floating instances, but at most one
+  docked instance (which is used by default).
 
   \begin{figure}[htb]
   \begin{center}
@@ -967,20 +941,19 @@
   \<^medskip>
   The following GUI elements are common to all query modes:
 
-  \<^item> The spinning wheel provides feedback about the status of a pending
-  query wrt.\ the evaluation of its context and its own operation.
+    \<^item> The spinning wheel provides feedback about the status of a pending query
+    wrt.\ the evaluation of its context and its own operation.
 
-  \<^item> The \<^emph>\<open>Apply\<close> button attaches a fresh query invocation to the
-  current context of the command where the cursor is pointing in the text.
+    \<^item> The \<^emph>\<open>Apply\<close> button attaches a fresh query invocation to the current
+    context of the command where the cursor is pointing in the text.
 
-  \<^item> The \<^emph>\<open>Search\<close> field allows to highlight query output according to
-  some regular expression, in the notation that is commonly used on the Java
-  platform.\footnote{@{url
-  "http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html"}}
-  This may serve as an additional visual filter of the result.
+    \<^item> The \<^emph>\<open>Search\<close> field allows to highlight query output according to some
+    regular expression, in the notation that is commonly used on the Java
+    platform.\<^footnote>\<open>@{url
+    "https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html"}\<close>
+    This may serve as an additional visual filter of the result.
 
-  \<^item> The \<^emph>\<open>Zoom\<close> box controls the font size of the output area.
-
+    \<^item> The \<^emph>\<open>Zoom\<close> box controls the font size of the output area.
 
   All query operations are asynchronous: there is no need to wait for the
   evaluation of the document for the query context, nor for the query
@@ -994,26 +967,26 @@
 subsection \<open>Find theorems\<close>
 
 text \<open>
-  The \<^emph>\<open>Query\<close> panel in \<^emph>\<open>Find Theorems\<close> mode retrieves facts from the
-  theory or proof context matching all of given criteria in the \<^emph>\<open>Find\<close>
-  text field. A single criterium has the following syntax:
+  The \<^emph>\<open>Query\<close> panel in \<^emph>\<open>Find Theorems\<close> mode retrieves facts from the theory
+  or proof context matching all of given criteria in the \<^emph>\<open>Find\<close> text field. A
+  single criterium has the following syntax:
 
   @{rail \<open>
     ('-'?) ('name' ':' @{syntax nameref} | 'intro' | 'elim' | 'dest' |
       'solves' | 'simp' ':' @{syntax term} | @{syntax term})
   \<close>}
 
-  See also the Isar command @{command_ref find_theorems} in
-  @{cite "isabelle-isar-ref"}.
+  See also the Isar command @{command_ref find_theorems} in @{cite
+  "isabelle-isar-ref"}.
 \<close>
 
 
 subsection \<open>Find constants\<close>
 
 text \<open>
-  The \<^emph>\<open>Query\<close> panel in \<^emph>\<open>Find Constants\<close> mode prints all constants
-  whose type meets all of the given criteria in the \<^emph>\<open>Find\<close> text field.
-  A single criterium has the following syntax:
+  The \<^emph>\<open>Query\<close> panel in \<^emph>\<open>Find Constants\<close> mode prints all constants whose type
+  meets all of the given criteria in the \<^emph>\<open>Find\<close> text field. A single
+  criterium has the following syntax:
 
   @{rail \<open>
     ('-'?)
@@ -1028,8 +1001,8 @@
 subsection \<open>Print context\<close>
 
 text \<open>
-  The \<^emph>\<open>Query\<close> panel in \<^emph>\<open>Print Context\<close> mode prints information from
-  the theory or proof context, or proof state. See also the Isar commands
+  The \<^emph>\<open>Query\<close> panel in \<^emph>\<open>Print Context\<close> mode prints information from the
+  theory or proof context, or proof state. See also the Isar commands
   @{command_ref print_context}, @{command_ref print_cases}, @{command_ref
   print_term_bindings}, @{command_ref print_theorems}, @{command_ref
   print_state} described in @{cite "isabelle-isar-ref"}.
@@ -1040,12 +1013,11 @@
 
 text \<open>
   Formally processed text (prover input or output) contains rich markup
-  information that can be explored further by using the \<^verbatim>\<open>CONTROL\<close>
-  modifier key on Linux and Windows, or \<^verbatim>\<open>COMMAND\<close> on Mac OS X.
-  Hovering with the mouse while the modifier is pressed reveals a
-  \<^emph>\<open>tooltip\<close> (grey box over the text with a yellow popup) and/or a
-  \<^emph>\<open>hyperlink\<close> (black rectangle over the text with change of mouse
-  pointer); see also \figref{fig:tooltip}.
+  information that can be explored further by using the \<^verbatim>\<open>CONTROL\<close> modifier
+  key on Linux and Windows, or \<^verbatim>\<open>COMMAND\<close> on Mac OS X. Hovering with the mouse
+  while the modifier is pressed reveals a \<^emph>\<open>tooltip\<close> (grey box over the text
+  with a yellow popup) and/or a \<^emph>\<open>hyperlink\<close> (black rectangle over the text
+  with change of mouse pointer); see also \figref{fig:tooltip}.
 
   \begin{figure}[htb]
   \begin{center}
@@ -1055,9 +1027,9 @@
   \label{fig:tooltip}
   \end{figure}
 
-  Tooltip popups use the same rendering mechanisms as the main text
-  area, and further tooltips and/or hyperlinks may be exposed
-  recursively by the same mechanism; see \figref{fig:nested-tooltips}.
+  Tooltip popups use the same rendering mechanisms as the main text area, and
+  further tooltips and/or hyperlinks may be exposed recursively by the same
+  mechanism; see \figref{fig:nested-tooltips}.
 
   \begin{figure}[htb]
   \begin{center}
@@ -1067,25 +1039,24 @@
   \label{fig:nested-tooltips}
   \end{figure}
 
-  The tooltip popup window provides some controls to \<^emph>\<open>close\<close> or
-  \<^emph>\<open>detach\<close> the window, turning it into a separate \<^emph>\<open>Info\<close>
-  window managed by jEdit.  The \<^verbatim>\<open>ESCAPE\<close> key closes
-  \<^emph>\<open>all\<close> popups, which is particularly relevant when nested
-  tooltips are stacking up.
+  The tooltip popup window provides some controls to \<^emph>\<open>close\<close> or \<^emph>\<open>detach\<close> the
+  window, turning it into a separate \<^emph>\<open>Info\<close> window managed by jEdit. The
+  \<^verbatim>\<open>ESCAPE\<close> key closes \<^emph>\<open>all\<close> popups, which is particularly relevant when
+  nested tooltips are stacking up.
 
   \<^medskip>
-  A black rectangle in the text indicates a hyperlink that may be
-  followed by a mouse click (while the \<^verbatim>\<open>CONTROL\<close> or \<^verbatim>\<open>COMMAND\<close> modifier
-  key is still pressed). Such jumps to other text locations
-  are recorded by the \<^emph>\<open>Navigator\<close> plugin, which is bundled with
-  Isabelle/jEdit and enabled by default, including navigation arrows in the
-  main jEdit toolbar.
+  A black rectangle in the text indicates a hyperlink that may be followed by
+  a mouse click (while the \<^verbatim>\<open>CONTROL\<close> or \<^verbatim>\<open>COMMAND\<close> modifier key is still
+  pressed). Such jumps to other text locations are recorded by the
+  \<^emph>\<open>Navigator\<close> plugin, which is bundled with Isabelle/jEdit and enabled by
+  default, including navigation arrows in the main jEdit toolbar.
 
-  Also note that the link target may be a file that is itself not
-  subject to formal document processing of the editor session and thus
-  prevents further exploration: the chain of hyperlinks may end in
-  some source file of the underlying logic image, or within the
-  ML bootstrap sources of Isabelle/Pure.\<close>
+  Also note that the link target may be a file that is itself not subject to
+  formal document processing of the editor session and thus prevents further
+  exploration: the chain of hyperlinks may end in some source file of the
+  underlying logic image, or within the ML bootstrap sources of
+  Isabelle/Pure.
+\<close>
 
 
 section \<open>Completion \label{sec:completion}\<close>
@@ -1100,16 +1071,16 @@
 
   \<^medskip>
   \<^emph>\<open>Explicit completion\<close> is triggered by the action @{action_ref
-  "isabelle.complete"}, which is bound to the keyboard shortcut \<^verbatim>\<open>C+b\<close>,
-  and thus overrides the jEdit default for @{action_ref "complete-word"}.
+  "isabelle.complete"}, which is bound to the keyboard shortcut \<^verbatim>\<open>C+b\<close>, and
+  thus overrides the jEdit default for @{action_ref "complete-word"}.
 
-  \<^emph>\<open>Implicit completion\<close> hooks into the regular keyboard input stream of
-  the editor, with some event filtering and optional delays.
+  \<^emph>\<open>Implicit completion\<close> hooks into the regular keyboard input stream of the
+  editor, with some event filtering and optional delays.
 
   \<^medskip>
-  Completion options may be configured in \<^emph>\<open>Plugin Options~/
-  Isabelle~/ General~/ Completion\<close>. These are explained in further detail
-  below, whenever relevant. There is also a summary of options in
+  Completion options may be configured in \<^emph>\<open>Plugin Options~/ Isabelle~/
+  General~/ Completion\<close>. These are explained in further detail below, whenever
+  relevant. There is also a summary of options in
   \secref{sec:completion-options}.
 
   The asynchronous nature of PIDE interaction means that information from the
@@ -1132,19 +1103,17 @@
   kinds and purposes. The completion mechanism supports this by the following
   built-in templates:
 
-  \<^descr> \<^verbatim>\<open>`\<close> (single ASCII back-quote) supports \<^emph>\<open>quotations\<close>
-  via text cartouches. There are three selections, which are always presented
-  in the same order and do not depend on any context information. The default
-  choice produces a template ``\<open>\<open>\<box>\<close>\<close>'', where the box indicates the
-  cursor position after insertion; the other choices help to repair the block
-  structure of unbalanced text cartouches.
+    \<^descr> \<^verbatim>\<open>`\<close> (single ASCII back-quote) supports \<^emph>\<open>quotations\<close> via text
+    cartouches. There are three selections, which are always presented in the
+    same order and do not depend on any context information. The default
+    choice produces a template ``\<open>\<open>\<box>\<close>\<close>'', where the box indicates the cursor
+    position after insertion; the other choices help to repair the block
+    structure of unbalanced text cartouches.
 
-  \<^descr> \<^verbatim>\<open>@{\<close> is completed to the template ``\<open>@{\<box>}\<close>'',
-  where the box indicates the cursor position after insertion. Here it is
-  convenient to use the wildcard ``\<^verbatim>\<open>__\<close>'' or a more specific name
-  prefix to let semantic completion of name-space entries propose
-  antiquotation names.
-
+    \<^descr> \<^verbatim>\<open>@{\<close> is completed to the template ``\<open>@{\<box>}\<close>'', where the box indicates
+    the cursor position after insertion. Here it is convenient to use the
+    wildcard ``\<^verbatim>\<open>__\<close>'' or a more specific name prefix to let semantic
+    completion of name-space entries propose antiquotation names.
 
   With some practice, input of quoted sub-languages and antiquotations of
   embedded languages should work fluently. Note that national keyboard layouts
@@ -1193,10 +1162,9 @@
   When inserted into the text, the above examples all produce the same Unicode
   rendering \<open>\<forall>\<close> of the underlying symbol \<^verbatim>\<open>\<forall>\<close>.
 
-  A symbol abbreviation that is a plain word, like \<^verbatim>\<open>ALL\<close>, is
-  treated like a syntax keyword. Non-word abbreviations like \<^verbatim>\<open>-->\<close>
-  are inserted more aggressively, except for single-character abbreviations
-  like \<^verbatim>\<open>!\<close> above.
+  A symbol abbreviation that is a plain word, like \<^verbatim>\<open>ALL\<close>, is treated like a
+  syntax keyword. Non-word abbreviations like \<^verbatim>\<open>-->\<close> are inserted more
+  aggressively, except for single-character abbreviations like \<^verbatim>\<open>!\<close> above.
 
   \<^medskip>
   Symbol completion depends on the semantic language context
@@ -1217,17 +1185,17 @@
   @{system_option_ref completion_limit}. The completion mechanism takes this
   into account when collecting information on the prover side.
 
-  Already recognized names are \<^emph>\<open>not\<close> completed further, but completion
-  may be extended by appending a suffix of underscores. This provokes a failed
+  Already recognized names are \<^emph>\<open>not\<close> completed further, but completion may be
+  extended by appending a suffix of underscores. This provokes a failed
   lookup, and another completion attempt while ignoring the underscores. For
-  example, in a name space where \<^verbatim>\<open>foo\<close> and \<^verbatim>\<open>foobar\<close>
-  are known, the input \<^verbatim>\<open>foo\<close> remains unchanged, but \<^verbatim>\<open>foo_\<close> may be completed
-  to \<^verbatim>\<open>foo\<close> or \<^verbatim>\<open>foobar\<close>.
+  example, in a name space where \<^verbatim>\<open>foo\<close> and \<^verbatim>\<open>foobar\<close> are known, the input
+  \<^verbatim>\<open>foo\<close> remains unchanged, but \<^verbatim>\<open>foo_\<close> may be completed to \<^verbatim>\<open>foo\<close> or
+  \<^verbatim>\<open>foobar\<close>.
 
-  The special identifier ``\<^verbatim>\<open>__\<close>'' serves as a wild-card for
-  arbitrary completion: it exposes the name-space content to the completion
-  mechanism (truncated according to @{system_option completion_limit}). This
-  is occasionally useful to explore an unknown name-space, e.g.\ in some
+  The special identifier ``\<^verbatim>\<open>__\<close>'' serves as a wild-card for arbitrary
+  completion: it exposes the name-space content to the completion mechanism
+  (truncated according to @{system_option completion_limit}). This is
+  occasionally useful to explore an unknown name-space, e.g.\ in some
   template.
 \<close>
 
@@ -1239,9 +1207,9 @@
   source text, e.g.\ for the argument of a load command
   (\secref{sec:aux-files}), the completion mechanism explores the directory
   content and offers the result as completion popup. Relative path
-  specifications are understood wrt.\ the \<^emph>\<open>master directory\<close> of the
-  document node (\secref{sec:buffer-node}) of the enclosing editor buffer;
-  this requires a proper theory, not an auxiliary file.
+  specifications are understood wrt.\ the \<^emph>\<open>master directory\<close> of the document
+  node (\secref{sec:buffer-node}) of the enclosing editor buffer; this
+  requires a proper theory, not an auxiliary file.
 
   A suffix of slashes may be used to continue the exploration of an already
   recognized directory name.
@@ -1274,10 +1242,10 @@
   default keyboard shortcut \<^verbatim>\<open>C+b\<close>.
 
   \<^medskip>
-  Dictionary lookup uses some educated guesses about lower-case,
-  upper-case, and capitalized words. This is oriented on common use in
-  English, where this aspect is not decisive for proper spelling, in contrast
-  to German, for example.
+  Dictionary lookup uses some educated guesses about lower-case, upper-case,
+  and capitalized words. This is oriented on common use in English, where this
+  aspect is not decisive for proper spelling, in contrast to German, for
+  example.
 \<close>
 
 
@@ -1296,14 +1264,14 @@
   symbol completion for ML source, but within ML strings, comments,
   antiquotations.
 
-  The prover may produce \<^emph>\<open>no completion\<close> markup in exceptional
-  situations, to tell that some language keywords should be excluded from
-  further completion attempts. For example, \<^verbatim>\<open>:\<close> within accepted
-  Isar syntax looses its meaning as abbreviation for symbol \<open>\<in>\<close>.
+  The prover may produce \<^emph>\<open>no completion\<close> markup in exceptional situations, to
+  tell that some language keywords should be excluded from further completion
+  attempts. For example, \<^verbatim>\<open>:\<close> within accepted Isar syntax looses its meaning
+  as abbreviation for symbol \<open>\<in>\<close>.
 
   \<^medskip>
-  The completion context is \<^emph>\<open>ignored\<close> for built-in templates and
-  symbols in their explicit form ``\<^verbatim>\<open>\<foobar>\<close>''; see also
+  The completion context is \<^emph>\<open>ignored\<close> for built-in templates and symbols in
+  their explicit form ``\<^verbatim>\<open>\<foobar>\<close>''; see also
   \secref{sec:completion-varieties}. This allows to complete within broken
   input that escapes its normal semantic context, e.g.\ antiquotations or
   string literals in ML, which do not allow arbitrary backslash sequences.
@@ -1317,56 +1285,55 @@
   optional delay after keyboard input according to @{system_option
   jedit_completion_delay}.
 
-  \<^descr>[Explicit completion] works via action @{action_ref
-  "isabelle.complete"} with keyboard shortcut \<^verbatim>\<open>C+b\<close>. This
-  overrides the shortcut for @{action_ref "complete-word"} in jEdit, but it is
-  possible to restore the original jEdit keyboard mapping of @{action
-  "complete-word"} via \<^emph>\<open>Global Options~/ Shortcuts\<close> and invent a
-  different one for @{action "isabelle.complete"}.
+  \<^descr>[Explicit completion] works via action @{action_ref "isabelle.complete"}
+  with keyboard shortcut \<^verbatim>\<open>C+b\<close>. This overrides the shortcut for @{action_ref
+  "complete-word"} in jEdit, but it is possible to restore the original jEdit
+  keyboard mapping of @{action "complete-word"} via \<^emph>\<open>Global Options~/
+  Shortcuts\<close> and invent a different one for @{action "isabelle.complete"}.
 
   \<^descr>[Explicit spell-checker completion] works via @{action_ref
   "isabelle.complete-word"}, which is exposed in the jEdit context menu, if
   the mouse points to a word that the spell-checker can complete.
 
-  \<^descr>[Implicit completion] works via regular keyboard input of the editor.
-  It depends on further side-conditions:
+  \<^descr>[Implicit completion] works via regular keyboard input of the editor. It
+  depends on further side-conditions:
 
-    \<^enum> The system option @{system_option_ref jedit_completion} needs to
-    be enabled (default).
+    \<^enum> The system option @{system_option_ref jedit_completion} needs to be
+    enabled (default).
 
-    \<^enum> Completion of syntax keywords requires at least 3 relevant
-    characters in the text.
+    \<^enum> Completion of syntax keywords requires at least 3 relevant characters in
+    the text.
 
-    \<^enum> The system option @{system_option_ref jedit_completion_delay}
-    determines an additional delay (0.5 by default), before opening a completion
-    popup.  The delay gives the prover a chance to provide semantic completion
+    \<^enum> The system option @{system_option_ref jedit_completion_delay} determines
+    an additional delay (0.5 by default), before opening a completion popup.
+    The delay gives the prover a chance to provide semantic completion
     information, notably the context (\secref{sec:completion-context}).
 
     \<^enum> The system option @{system_option_ref jedit_completion_immediate}
     (enabled by default) controls whether replacement text should be inserted
     immediately without popup, regardless of @{system_option
-    jedit_completion_delay}. This aggressive mode of completion is restricted to
-    Isabelle symbols and their abbreviations (\secref{sec:symbols}).
+    jedit_completion_delay}. This aggressive mode of completion is restricted
+    to Isabelle symbols and their abbreviations (\secref{sec:symbols}).
 
-    \<^enum> Completion of symbol abbreviations with only one relevant
-    character in the text always enforces an explicit popup,
-    regardless of @{system_option_ref jedit_completion_immediate}.
+    \<^enum> Completion of symbol abbreviations with only one relevant character in
+    the text always enforces an explicit popup, regardless of
+    @{system_option_ref jedit_completion_immediate}.
 \<close>
 
 
 subsection \<open>Completion popup \label{sec:completion-popup}\<close>
 
 text \<open>
-  A \<^emph>\<open>completion popup\<close> is a minimally invasive GUI component over the
-  text area that offers a selection of completion items to be inserted into
-  the text, e.g.\ by mouse clicks. Items are sorted dynamically, according to
-  the frequency of selection, with persistent history. The popup may interpret
-  special keys \<^verbatim>\<open>ENTER\<close>, \<^verbatim>\<open>TAB\<close>, \<^verbatim>\<open>ESCAPE\<close>,
-  \<^verbatim>\<open>UP\<close>, \<^verbatim>\<open>DOWN\<close>, \<^verbatim>\<open>PAGE_UP\<close>, \<^verbatim>\<open>PAGE_DOWN\<close>, but all other key events are
-  passed to the underlying text area.
-  This allows to ignore unwanted completions most of the time and continue
-  typing quickly. Thus the popup serves as a mechanism of confirmation of
-  proposed items, but the default is to continue without completion.
+  A \<^emph>\<open>completion popup\<close> is a minimally invasive GUI component over the text
+  area that offers a selection of completion items to be inserted into the
+  text, e.g.\ by mouse clicks. Items are sorted dynamically, according to the
+  frequency of selection, with persistent history. The popup may interpret
+  special keys \<^verbatim>\<open>ENTER\<close>, \<^verbatim>\<open>TAB\<close>, \<^verbatim>\<open>ESCAPE\<close>, \<^verbatim>\<open>UP\<close>, \<^verbatim>\<open>DOWN\<close>, \<^verbatim>\<open>PAGE_UP\<close>,
+  \<^verbatim>\<open>PAGE_DOWN\<close>, but all other key events are passed to the underlying text
+  area. This allows to ignore unwanted completions most of the time and
+  continue typing quickly. Thus the popup serves as a mechanism of
+  confirmation of proposed items, but the default is to continue without
+  completion.
 
   The meaning of special keys is as follows:
 
@@ -1383,9 +1350,9 @@
   \end{tabular}
   \<^medskip>
 
-  Movement within the popup is only active for multiple items.
-  Otherwise the corresponding key event retains its standard meaning
-  within the underlying text area.
+  Movement within the popup is only active for multiple items. Otherwise the
+  corresponding key event retains its standard meaning within the underlying
+  text area.
 \<close>
 
 
@@ -1401,32 +1368,31 @@
   all combinations make sense. At least the following important cases are
   well-defined:
 
-  \<^descr>[No selection.] The original is removed and the replacement inserted,
-  depending on the caret position.
+    \<^descr>[No selection.] The original is removed and the replacement inserted,
+    depending on the caret position.
 
-  \<^descr>[Rectangular selection of zero width.] This special case is treated by
-  jEdit as ``tall caret'' and insertion of completion imitates its normal
-  behaviour: separate copies of the replacement are inserted for each line of
-  the selection.
+    \<^descr>[Rectangular selection of zero width.] This special case is treated by
+    jEdit as ``tall caret'' and insertion of completion imitates its normal
+    behaviour: separate copies of the replacement are inserted for each line
+    of the selection.
 
-  \<^descr>[Other rectangular selection or multiple selections.] Here the original
-  is removed and the replacement is inserted for each line (or segment) of the
-  selection.
-
+    \<^descr>[Other rectangular selection or multiple selections.] Here the original
+    is removed and the replacement is inserted for each line (or segment) of
+    the selection.
 
-  Support for multiple selections is particularly useful for
-  \<^emph>\<open>HyperSearch\<close>: clicking on one of the items in the \<^emph>\<open>HyperSearch
-  Results\<close> window makes jEdit select all its occurrences in the corresponding
-  line of text. Then explicit completion can be invoked via \<^verbatim>\<open>C+b\<close>,
-  e.g.\ to replace occurrences of \<^verbatim>\<open>-->\<close> by \<open>\<longrightarrow>\<close>.
+  Support for multiple selections is particularly useful for \<^emph>\<open>HyperSearch\<close>:
+  clicking on one of the items in the \<^emph>\<open>HyperSearch Results\<close> window makes
+  jEdit select all its occurrences in the corresponding line of text. Then
+  explicit completion can be invoked via \<^verbatim>\<open>C+b\<close>, e.g.\ to replace occurrences
+  of \<^verbatim>\<open>-->\<close> by \<open>\<longrightarrow>\<close>.
 
   \<^medskip>
-  Insertion works by removing and inserting pieces of text from the
-  buffer. This counts as one atomic operation on the jEdit history. Thus
-  unintended completions may be reverted by the regular @{action undo} action
-  of jEdit. According to normal jEdit policies, the recovered text after
-  @{action undo} is selected: \<^verbatim>\<open>ESCAPE\<close> is required to reset the
-  selection and to continue typing more text.
+  Insertion works by removing and inserting pieces of text from the buffer.
+  This counts as one atomic operation on the jEdit history. Thus unintended
+  completions may be reverted by the regular @{action undo} action of jEdit.
+  According to normal jEdit policies, the recovered text after @{action undo}
+  is selected: \<^verbatim>\<open>ESCAPE\<close> is required to reset the selection and to continue
+  typing more text.
 \<close>
 
 
@@ -1434,8 +1400,8 @@
 
 text \<open>
   This is a summary of Isabelle/Scala system options that are relevant for
-  completion. They may be configured in \<^emph>\<open>Plugin Options~/ Isabelle~/
-  General\<close> as usual.
+  completion. They may be configured in \<^emph>\<open>Plugin Options~/ Isabelle~/ General\<close>
+  as usual.
 
   \<^item> @{system_option_def completion_limit} specifies the maximum number of
   items for various semantic completion operations (name-space entries etc.)
@@ -1444,10 +1410,10 @@
   regular jEdit key events (\secref{sec:completion-input}): it allows to
   disable implicit completion altogether.
 
-  \<^item> @{system_option_def jedit_completion_select_enter} and
-  @{system_option_def jedit_completion_select_tab} enable keys to select a
-  completion item from the popup (\secref{sec:completion-popup}). Note that a
-  regular mouse click on the list of items is always possible.
+  \<^item> @{system_option_def jedit_completion_select_enter} and @{system_option_def
+  jedit_completion_select_tab} enable keys to select a completion item from
+  the popup (\secref{sec:completion-popup}). Note that a regular mouse click
+  on the list of items is always possible.
 
   \<^item> @{system_option_def jedit_completion_context} specifies whether the
   language context provided by the prover should be used at all. Disabling
@@ -1459,17 +1425,17 @@
   jedit_completion_immediate} determine the handling of keyboard events for
   implicit completion (\secref{sec:completion-input}).
 
-  A @{system_option jedit_completion_delay}~\<^verbatim>\<open>> 0\<close> postpones the
-  processing of key events, until after the user has stopped typing for the
-  given time span, but @{system_option jedit_completion_immediate}~\<^verbatim>\<open>"= true\<close>
-  means that abbreviations of Isabelle symbols are handled nonetheless.
+  A @{system_option jedit_completion_delay}~\<^verbatim>\<open>> 0\<close> postpones the processing of
+  key events, until after the user has stopped typing for the given time span,
+  but @{system_option jedit_completion_immediate}~\<^verbatim>\<open>"= true\<close> means that
+  abbreviations of Isabelle symbols are handled nonetheless.
 
   \<^item> @{system_option_def jedit_completion_path_ignore} specifies ``glob''
   patterns to ignore in file-system path completion (separated by colons),
   e.g.\ backup files ending with tilde.
 
-  \<^item> @{system_option_def spell_checker} is a global guard for all
-  spell-checker operations: it allows to disable that mechanism altogether.
+  \<^item> @{system_option_def spell_checker} is a global guard for all spell-checker
+  operations: it allows to disable that mechanism altogether.
 
   \<^item> @{system_option_def spell_checker_dictionary} determines the current
   dictionary, taken from the colon-separated list in the settings variable
@@ -1478,9 +1444,9 @@
   permanent dictionary updates is stored in the directory @{file_unchecked
   "$ISABELLE_HOME_USER/dictionaries"}, in a separate file for each dictionary.
 
-  \<^item> @{system_option_def spell_checker_elements} specifies a
-  comma-separated list of markup elements that delimit words in the source
-  that is subject to spell-checking, including various forms of comments.
+  \<^item> @{system_option_def spell_checker_elements} specifies a comma-separated
+  list of markup elements that delimit words in the source that is subject to
+  spell-checking, including various forms of comments.
 \<close>
 
 
@@ -1489,22 +1455,21 @@
 text \<open>
   Continuous document processing works asynchronously in the background.
   Visible document source that has been evaluated may get augmented by
-  additional results of \<^emph>\<open>asynchronous print functions\<close>. The canonical
-  example is proof state output, which is always enabled. More heavy-weight
-  print functions may be applied, in order to prove or disprove parts of the
-  formal text by other means.
+  additional results of \<^emph>\<open>asynchronous print functions\<close>. The canonical example
+  is proof state output, which is always enabled. More heavy-weight print
+  functions may be applied, in order to prove or disprove parts of the formal
+  text by other means.
 
-  Isabelle/HOL provides various automatically tried tools that operate
-  on outermost goal statements (e.g.\ @{command lemma}, @{command
-  theorem}), independently of the state of the current proof attempt.
-  They work implicitly without any arguments.  Results are output as
-  \<^emph>\<open>information messages\<close>, which are indicated in the text area by
-  blue squiggles and a blue information sign in the gutter (see
-  \figref{fig:auto-tools}).  The message content may be shown as for
-  other output (see also \secref{sec:output}).  Some tools
-  produce output with \<^emph>\<open>sendback\<close> markup, which means that
-  clicking on certain parts of the output inserts that text into the
-  source in the proper place.
+  Isabelle/HOL provides various automatically tried tools that operate on
+  outermost goal statements (e.g.\ @{command lemma}, @{command theorem}),
+  independently of the state of the current proof attempt. They work
+  implicitly without any arguments. Results are output as \<^emph>\<open>information
+  messages\<close>, which are indicated in the text area by blue squiggles and a blue
+  information sign in the gutter (see \figref{fig:auto-tools}). The message
+  content may be shown as for other output (see also \secref{sec:output}).
+  Some tools produce output with \<^emph>\<open>sendback\<close> markup, which means that clicking
+  on certain parts of the output inserts that text into the source in the
+  proper place.
 
   \begin{figure}[htb]
   \begin{center}
@@ -1515,85 +1480,81 @@
   \end{figure}
 
   \<^medskip>
-  The following Isabelle system options control the behavior
-  of automatically tried tools (see also the jEdit dialog window
-  \<^emph>\<open>Plugin Options~/ Isabelle~/ General~/ Automatically tried
-  tools\<close>):
+  The following Isabelle system options control the behavior of automatically
+  tried tools (see also the jEdit dialog window \<^emph>\<open>Plugin Options~/ Isabelle~/
+  General~/ Automatically tried tools\<close>):
 
-  \<^item> @{system_option_ref auto_methods} controls automatic use of a
-  combination of standard proof methods (@{method auto}, @{method
-  simp}, @{method blast}, etc.).  This corresponds to the Isar command
-  @{command_ref "try0"} @{cite "isabelle-isar-ref"}.
+  \<^item> @{system_option_ref auto_methods} controls automatic use of a combination
+  of standard proof methods (@{method auto}, @{method simp}, @{method blast},
+  etc.). This corresponds to the Isar command @{command_ref "try0"} @{cite
+  "isabelle-isar-ref"}.
 
   The tool is disabled by default, since unparameterized invocation of
-  standard proof methods often consumes substantial CPU resources
-  without leading to success.
+  standard proof methods often consumes substantial CPU resources without
+  leading to success.
 
-  \<^item> @{system_option_ref auto_nitpick} controls a slightly reduced
-  version of @{command_ref nitpick}, which tests for counterexamples using
-  first-order relational logic. See also the Nitpick manual
-  @{cite "isabelle-nitpick"}.
+  \<^item> @{system_option_ref auto_nitpick} controls a slightly reduced version of
+  @{command_ref nitpick}, which tests for counterexamples using first-order
+  relational logic. See also the Nitpick manual @{cite "isabelle-nitpick"}.
 
-  This tool is disabled by default, due to the extra overhead of
-  invoking an external Java process for each attempt to disprove a
-  subgoal.
+  This tool is disabled by default, due to the extra overhead of invoking an
+  external Java process for each attempt to disprove a subgoal.
 
   \<^item> @{system_option_ref auto_quickcheck} controls automatic use of
-  @{command_ref quickcheck}, which tests for counterexamples using a
-  series of assignments for free variables of a subgoal.
+  @{command_ref quickcheck}, which tests for counterexamples using a series of
+  assignments for free variables of a subgoal.
 
-  This tool is \<^emph>\<open>enabled\<close> by default.  It requires little
-  overhead, but is a bit weaker than @{command nitpick}.
+  This tool is \<^emph>\<open>enabled\<close> by default. It requires little overhead, but is a
+  bit weaker than @{command nitpick}.
 
-  \<^item> @{system_option_ref auto_sledgehammer} controls a significantly
-  reduced version of @{command_ref sledgehammer}, which attempts to prove
-  a subgoal using external automatic provers. See also the
-  Sledgehammer manual @{cite "isabelle-sledgehammer"}.
+  \<^item> @{system_option_ref auto_sledgehammer} controls a significantly reduced
+  version of @{command_ref sledgehammer}, which attempts to prove a subgoal
+  using external automatic provers. See also the Sledgehammer manual @{cite
+  "isabelle-sledgehammer"}.
 
-  This tool is disabled by default, due to the relatively heavy nature
-  of Sledgehammer.
+  This tool is disabled by default, due to the relatively heavy nature of
+  Sledgehammer.
 
   \<^item> @{system_option_ref auto_solve_direct} controls automatic use of
-  @{command_ref solve_direct}, which checks whether the current subgoals
-  can be solved directly by an existing theorem.  This also helps to
-  detect duplicate lemmas.
+  @{command_ref solve_direct}, which checks whether the current subgoals can
+  be solved directly by an existing theorem. This also helps to detect
+  duplicate lemmas.
 
   This tool is \<^emph>\<open>enabled\<close> by default.
 
 
-  Invocation of automatically tried tools is subject to some global
-  policies of parallel execution, which may be configured as follows:
+  Invocation of automatically tried tools is subject to some global policies
+  of parallel execution, which may be configured as follows:
 
-  \<^item> @{system_option_ref auto_time_limit} (default 2.0) determines the
-  timeout (in seconds) for each tool execution.
+  \<^item> @{system_option_ref auto_time_limit} (default 2.0) determines the timeout
+  (in seconds) for each tool execution.
 
-  \<^item> @{system_option_ref auto_time_start} (default 1.0) determines the
-  start delay (in seconds) for automatically tried tools, after the
-  main command evaluation is finished.
+  \<^item> @{system_option_ref auto_time_start} (default 1.0) determines the start
+  delay (in seconds) for automatically tried tools, after the main command
+  evaluation is finished.
 
 
-  Each tool is submitted independently to the pool of parallel
-  execution tasks in Isabelle/ML, using hardwired priorities according
-  to its relative ``heaviness''.  The main stages of evaluation and
-  printing of proof states take precedence, but an already running
-  tool is not canceled and may thus reduce reactivity of proof
-  document processing.
+  Each tool is submitted independently to the pool of parallel execution tasks
+  in Isabelle/ML, using hardwired priorities according to its relative
+  ``heaviness''. The main stages of evaluation and printing of proof states
+  take precedence, but an already running tool is not canceled and may thus
+  reduce reactivity of proof document processing.
 
-  Users should experiment how the available CPU resources (number of
-  cores) are best invested to get additional feedback from prover in
-  the background, by using a selection of weaker or stronger tools.
+  Users should experiment how the available CPU resources (number of cores)
+  are best invested to get additional feedback from prover in the background,
+  by using a selection of weaker or stronger tools.
 \<close>
 
 
 section \<open>Sledgehammer \label{sec:sledgehammer}\<close>
 
-text \<open>The \<^emph>\<open>Sledgehammer\<close> panel (\figref{fig:sledgehammer})
-  provides a view on some independent execution of the Isar command
-  @{command_ref sledgehammer}, with process indicator (spinning wheel) and
-  GUI elements for important Sledgehammer arguments and options.  Any
-  number of Sledgehammer panels may be active, according to the
-  standard policies of Dockable Window Management in jEdit.  Closing
-  such windows also cancels the corresponding prover tasks.
+text \<open>
+  The \<^emph>\<open>Sledgehammer\<close> panel (\figref{fig:sledgehammer}) provides a view on
+  some independent execution of the Isar command @{command_ref sledgehammer},
+  with process indicator (spinning wheel) and GUI elements for important
+  Sledgehammer arguments and options. Any number of Sledgehammer panels may be
+  active, according to the standard policies of Dockable Window Management in
+  jEdit. Closing such windows also cancels the corresponding prover tasks.
 
   \begin{figure}[htb]
   \begin{center}
@@ -1603,34 +1564,37 @@
   \label{fig:sledgehammer}
   \end{figure}
 
-  The \<^emph>\<open>Apply\<close> button attaches a fresh invocation of @{command
-  sledgehammer} to the command where the cursor is pointing in the
-  text --- this should be some pending proof problem.  Further buttons
-  like \<^emph>\<open>Cancel\<close> and \<^emph>\<open>Locate\<close> help to manage the running
-  process.
+  The \<^emph>\<open>Apply\<close> button attaches a fresh invocation of @{command sledgehammer}
+  to the command where the cursor is pointing in the text --- this should be
+  some pending proof problem. Further buttons like \<^emph>\<open>Cancel\<close> and \<^emph>\<open>Locate\<close>
+  help to manage the running process.
 
-  Results appear incrementally in the output window of the panel.
-  Proposed proof snippets are marked-up as \<^emph>\<open>sendback\<close>, which
-  means a single mouse click inserts the text into a suitable place of
-  the original source.  Some manual editing may be required
-  nonetheless, say to remove earlier proof attempts.\<close>
+  Results appear incrementally in the output window of the panel. Proposed
+  proof snippets are marked-up as \<^emph>\<open>sendback\<close>, which means a single mouse
+  click inserts the text into a suitable place of the original source. Some
+  manual editing may be required nonetheless, say to remove earlier proof
+  attempts.
+\<close>
 
 
 chapter \<open>Isabelle document preparation\<close>
 
-text \<open>The ultimate purpose of Isabelle is to produce nicely rendered documents
+text \<open>
+  The ultimate purpose of Isabelle is to produce nicely rendered documents
   with the Isabelle document preparation system, which is based on {\LaTeX};
   see also @{cite "isabelle-system" and "isabelle-isar-ref"}. Isabelle/jEdit
-  provides some additional support for document editing.\<close>
+  provides some additional support for document editing.
+\<close>
 
 
 section \<open>Document outline\<close>
 
-text \<open>Theory sources may contain document markup commands, such as
-  @{command_ref chapter}, @{command_ref section}, @{command subsection}. The
-  Isabelle SideKick parser (\secref{sec:sidekick}) represents this document
-  outline as structured tree view, with formal statements and proofs nested
-  inside; see \figref{fig:sidekick-document}.
+text \<open>
+  Theory sources may contain document markup commands, such as @{command_ref
+  chapter}, @{command_ref section}, @{command subsection}. The Isabelle
+  SideKick parser (\secref{sec:sidekick}) represents this document outline as
+  structured tree view, with formal statements and proofs nested inside; see
+  \figref{fig:sidekick-document}.
 
   \begin{figure}[htb]
   \begin{center}
@@ -1641,25 +1605,27 @@
   \end{figure}
 
   It is also possible to use text folding according to this structure, by
-  adjusting \<^emph>\<open>Utilities / Buffer Options / Folding mode\<close> of jEdit. The
-  default mode \<^verbatim>\<open>isabelle\<close> uses the structure of formal definitions,
-  statements, and proofs. The alternative mode \<^verbatim>\<open>sidekick\<close> uses the
-  document structure of the SideKick parser, as explained above.\<close>
+  adjusting \<^emph>\<open>Utilities / Buffer Options / Folding mode\<close> of jEdit. The default
+  mode \<^verbatim>\<open>isabelle\<close> uses the structure of formal definitions, statements, and
+  proofs. The alternative mode \<^verbatim>\<open>sidekick\<close> uses the document structure of the
+  SideKick parser, as explained above.
+\<close>
 
 
 section \<open>Citations and Bib{\TeX} entries\<close>
 
-text \<open>Citations are managed by {\LaTeX} and Bib{\TeX} in \<^verbatim>\<open>.bib\<close>
-  files. The Isabelle session build process and the @{tool latex} tool @{cite
+text \<open>
+  Citations are managed by {\LaTeX} and Bib{\TeX} in \<^verbatim>\<open>.bib\<close> files. The
+  Isabelle session build process and the @{tool latex} tool @{cite
   "isabelle-system"} are smart enough to assemble the result, based on the
   session directory layout.
 
   The document antiquotation \<open>@{cite}\<close> is described in @{cite
   "isabelle-isar-ref"}. Within the Prover IDE it provides semantic markup for
   tooltips, hyperlinks, and completion for Bib{\TeX} database entries.
-  Isabelle/jEdit does \<^emph>\<open>not\<close> know about the actual Bib{\TeX} environment
-  used in {\LaTeX} batch-mode, but it can take citations from those \<^verbatim>\<open>.bib\<close>
-  files that happen to be open in the editor; see \figref{fig:cite-completion}.
+  Isabelle/jEdit does \<^emph>\<open>not\<close> know about the actual Bib{\TeX} environment used
+  in {\LaTeX} batch-mode, but it can take citations from those \<^verbatim>\<open>.bib\<close> files
+  that happen to be open in the editor; see \figref{fig:cite-completion}.
 
   \begin{figure}[htb]
   \begin{center}
@@ -1669,9 +1635,9 @@
   \label{fig:cite-completion}
   \end{figure}
 
-  Isabelle/jEdit also provides some support for editing \<^verbatim>\<open>.bib\<close>
-  files themselves. There is syntax highlighting based on entry types
-  (according to standard Bib{\TeX} styles), a context-menu to compose entries
+  Isabelle/jEdit also provides some support for editing \<^verbatim>\<open>.bib\<close> files
+  themselves. There is syntax highlighting based on entry types (according to
+  standard Bib{\TeX} styles), a context-menu to compose entries
   systematically, and a SideKick tree view of the overall content; see
   \figref{fig:bibtex-mode}.
 
@@ -1689,35 +1655,33 @@
 
 section \<open>Timing\<close>
 
-text \<open>Managed evaluation of commands within PIDE documents includes
-  timing information, which consists of elapsed (wall-clock) time, CPU
-  time, and GC (garbage collection) time.  Note that in a
-  multithreaded system it is difficult to measure execution time
-  precisely: elapsed time is closer to the real requirements of
-  runtime resources than CPU or GC time, which are both subject to
-  influences from the parallel environment that are outside the scope
-  of the current command transaction.
+text \<open>
+  Managed evaluation of commands within PIDE documents includes timing
+  information, which consists of elapsed (wall-clock) time, CPU time, and GC
+  (garbage collection) time. Note that in a multithreaded system it is
+  difficult to measure execution time precisely: elapsed time is closer to the
+  real requirements of runtime resources than CPU or GC time, which are both
+  subject to influences from the parallel environment that are outside the
+  scope of the current command transaction.
 
-  The \<^emph>\<open>Timing\<close> panel provides an overview of cumulative command
-  timings for each document node.  Commands with elapsed time below
-  the given threshold are ignored in the grand total.  Nodes are
-  sorted according to their overall timing.  For the document node
-  that corresponds to the current buffer, individual command timings
-  are shown as well.  A double-click on a theory node or command moves
-  the editor focus to that particular source position.
+  The \<^emph>\<open>Timing\<close> panel provides an overview of cumulative command timings for
+  each document node. Commands with elapsed time below the given threshold are
+  ignored in the grand total. Nodes are sorted according to their overall
+  timing. For the document node that corresponds to the current buffer,
+  individual command timings are shown as well. A double-click on a theory
+  node or command moves the editor focus to that particular source position.
 
-  It is also possible to reveal individual timing information via some
-  tooltip for the corresponding command keyword, using the technique
-  of mouse hovering with \<^verbatim>\<open>CONTROL\<close>/\<^verbatim>\<open>COMMAND\<close>
-  modifier key as explained in \secref{sec:tooltips-hyperlinks}.
-  Actual display of timing depends on the global option
-  @{system_option_ref jedit_timing_threshold}, which can be configured in
-  \<^emph>\<open>Plugin Options~/ Isabelle~/ General\<close>.
+  It is also possible to reveal individual timing information via some tooltip
+  for the corresponding command keyword, using the technique of mouse hovering
+  with \<^verbatim>\<open>CONTROL\<close>~/ \<^verbatim>\<open>COMMAND\<close> modifier key as explained in
+  \secref{sec:tooltips-hyperlinks}. Actual display of timing depends on the
+  global option @{system_option_ref jedit_timing_threshold}, which can be
+  configured in \<^emph>\<open>Plugin Options~/ Isabelle~/ General\<close>.
 
   \<^medskip>
-  The \<^emph>\<open>Monitor\<close> panel visualizes various data collections about
-  recent activity of the Isabelle/ML task farm and the underlying ML runtime
-  system. The display is continuously updated according to @{system_option_ref
+  The \<^emph>\<open>Monitor\<close> panel visualizes various data collections about recent
+  activity of the Isabelle/ML task farm and the underlying ML runtime system.
+  The display is continuously updated according to @{system_option_ref
   editor_chart_delay}. Note that the painting of the chart takes considerable
   runtime itself --- on the Java Virtual Machine that runs Isabelle/Scala, not
   Isabelle/ML. Internally, the Isabelle/Scala module \<^verbatim>\<open>isabelle.ML_Statistics\<close>
@@ -1727,32 +1691,30 @@
 
 section \<open>Low-level output\<close>
 
-text \<open>Prover output is normally shown directly in the main text area
-  or secondary \<^emph>\<open>Output\<close> panels, as explained in
-  \secref{sec:output}.
+text \<open>
+  Prover output is normally shown directly in the main text area or secondary
+  \<^emph>\<open>Output\<close> panels, as explained in \secref{sec:output}.
 
-  Beyond this, it is occasionally useful to inspect low-level output
-  channels via some of the following additional panels:
+  Beyond this, it is occasionally useful to inspect low-level output channels
+  via some of the following additional panels:
 
-  \<^item> \<^emph>\<open>Protocol\<close> shows internal messages between the
-  Isabelle/Scala and Isabelle/ML side of the PIDE document editing protocol.
-  Recording of messages starts with the first activation of the
-  corresponding dockable window; earlier messages are lost.
+  \<^item> \<^emph>\<open>Protocol\<close> shows internal messages between the Isabelle/Scala and
+  Isabelle/ML side of the PIDE document editing protocol. Recording of
+  messages starts with the first activation of the corresponding dockable
+  window; earlier messages are lost.
 
-  Actual display of protocol messages causes considerable slowdown, so
-  it is important to undock all \<^emph>\<open>Protocol\<close> panels for production
-  work.
+  Actual display of protocol messages causes considerable slowdown, so it is
+  important to undock all \<^emph>\<open>Protocol\<close> panels for production work.
 
   \<^item> \<^emph>\<open>Raw Output\<close> shows chunks of text from the \<^verbatim>\<open>stdout\<close> and \<^verbatim>\<open>stderr\<close>
-  channels of the prover process.
-  Recording of output starts with the first activation of the
-  corresponding dockable window; earlier output is lost.
+  channels of the prover process. Recording of output starts with the first
+  activation of the corresponding dockable window; earlier output is lost.
 
-  The implicit stateful nature of physical I/O channels makes it
-  difficult to relate raw output to the actual command from where it
-  was originating.  Parallel execution may add to the confusion.
-  Peeking at physical process I/O is only the last resort to diagnose
-  problems with tools that are not PIDE compliant.
+  The implicit stateful nature of physical I/O channels makes it difficult to
+  relate raw output to the actual command from where it was originating.
+  Parallel execution may add to the confusion. Peeking at physical process I/O
+  is only the last resort to diagnose problems with tools that are not PIDE
+  compliant.
 
   Under normal circumstances, prover output always works via managed message
   channels (corresponding to @{ML writeln}, @{ML warning}, @{ML
@@ -1762,50 +1724,45 @@
 
   \<^item> \<^emph>\<open>Syslog\<close> shows system messages that might be relevant to diagnose
   problems with the startup or shutdown phase of the prover process; this also
-  includes raw output on \<^verbatim>\<open>stderr\<close>. Isabelle/ML also provides an
-  explicit @{ML Output.system_message} operation, which is occasionally useful
-  for diagnostic purposes within the system infrastructure itself.
+  includes raw output on \<^verbatim>\<open>stderr\<close>. Isabelle/ML also provides an explicit @{ML
+  Output.system_message} operation, which is occasionally useful for
+  diagnostic purposes within the system infrastructure itself.
 
-  A limited amount of syslog messages are buffered, independently of
-  the docking state of the \<^emph>\<open>Syslog\<close> panel.  This allows to
-  diagnose serious problems with Isabelle/PIDE process management,
-  outside of the actual protocol layer.
+  A limited amount of syslog messages are buffered, independently of the
+  docking state of the \<^emph>\<open>Syslog\<close> panel. This allows to diagnose serious
+  problems with Isabelle/PIDE process management, outside of the actual
+  protocol layer.
 
-  Under normal situations, such low-level system output can be
-  ignored.
+  Under normal situations, such low-level system output can be ignored.
 \<close>
 
 
 chapter \<open>Known problems and workarounds \label{sec:problems}\<close>
 
 text \<open>
-  \<^item> \<^bold>\<open>Problem:\<close> Odd behavior of some diagnostic commands with
-  global side-effects, like writing a physical file.
+  \<^item> \<^bold>\<open>Problem:\<close> Odd behavior of some diagnostic commands with global
+  side-effects, like writing a physical file.
 
-  \<^bold>\<open>Workaround:\<close> Copy/paste complete command text from
-  elsewhere, or disable continuous checking temporarily.
-
-  \<^item> \<^bold>\<open>Problem:\<close> No direct support to remove document nodes from the
-  collection of theories.
+  \<^bold>\<open>Workaround:\<close> Copy/paste complete command text from elsewhere, or disable
+  continuous checking temporarily.
 
-  \<^bold>\<open>Workaround:\<close> Clear the buffer content of unused files and close
-  \<^emph>\<open>without\<close> saving changes.
+  \<^item> \<^bold>\<open>Problem:\<close> No direct support to remove document nodes from the collection
+  of theories.
 
-  \<^item> \<^bold>\<open>Problem:\<close> Keyboard shortcuts \<^verbatim>\<open>C+PLUS\<close> and
-  \<^verbatim>\<open>C+MINUS\<close> for adjusting the editor font size depend on
-  platform details and national keyboards.
+  \<^bold>\<open>Workaround:\<close> Clear the buffer content of unused files and close \<^emph>\<open>without\<close>
+  saving changes.
 
-  \<^bold>\<open>Workaround:\<close> Rebind keys via \<^emph>\<open>Global Options~/
-  Shortcuts\<close>.
+  \<^item> \<^bold>\<open>Problem:\<close> Keyboard shortcuts \<^verbatim>\<open>C+PLUS\<close> and \<^verbatim>\<open>C+MINUS\<close> for adjusting the
+  editor font size depend on platform details and national keyboards.
+
+  \<^bold>\<open>Workaround:\<close> Rebind keys via \<^emph>\<open>Global Options~/ Shortcuts\<close>.
 
   \<^item> \<^bold>\<open>Problem:\<close> The Mac OS X key sequence \<^verbatim>\<open>COMMAND+COMMA\<close> for application
-  \<^emph>\<open>Preferences\<close> is in conflict with the
-  jEdit default keyboard shortcut for \<^emph>\<open>Incremental Search Bar\<close> (action
-  @{action_ref "quick-search"}).
+  \<^emph>\<open>Preferences\<close> is in conflict with the jEdit default keyboard shortcut for
+  \<^emph>\<open>Incremental Search Bar\<close> (action @{action_ref "quick-search"}).
 
-  \<^bold>\<open>Workaround:\<close> Rebind key via \<^emph>\<open>Global Options~/
-  Shortcuts\<close> according to national keyboard, e.g.\ \<^verbatim>\<open>COMMAND+SLASH\<close>
-  on English ones.
+  \<^bold>\<open>Workaround:\<close> Rebind key via \<^emph>\<open>Global Options~/ Shortcuts\<close> according to
+  national keyboard, e.g.\ \<^verbatim>\<open>COMMAND+SLASH\<close> on English ones.
 
   \<^item> \<^bold>\<open>Problem:\<close> On Mac OS X with native Apple look-and-feel, some exotic
   national keyboards may cause a conflict of menu accelerator keys with
@@ -1815,44 +1772,42 @@
   \<^bold>\<open>Workaround:\<close> Disable the native Apple menu bar via Java runtime option
   \<^verbatim>\<open>-Dapple.laf.useScreenMenuBar=false\<close>.
 
-  \<^item> \<^bold>\<open>Problem:\<close> Mac OS X system fonts sometimes lead to
-  character drop-outs in the main text area.
+  \<^item> \<^bold>\<open>Problem:\<close> Mac OS X system fonts sometimes lead to character drop-outs in
+  the main text area.
 
-  \<^bold>\<open>Workaround:\<close> Use the default \<^verbatim>\<open>IsabelleText\<close> font.
-  (Do not install that font on the system.)
-
-  \<^item> \<^bold>\<open>Problem:\<close> Some Linux/X11 input methods such as IBus
-  tend to disrupt key event handling of Java/AWT/Swing.
+  \<^bold>\<open>Workaround:\<close> Use the default \<^verbatim>\<open>IsabelleText\<close> font. (Do not install that
+  font on the system.)
 
-  \<^bold>\<open>Workaround:\<close> Do not use X11 input methods. Note that environment
-  variable \<^verbatim>\<open>XMODIFIERS\<close> is reset by default within Isabelle
-  settings.
+  \<^item> \<^bold>\<open>Problem:\<close> Some Linux/X11 input methods such as IBus tend to disrupt key
+  event handling of Java/AWT/Swing.
 
-  \<^item> \<^bold>\<open>Problem:\<close> Some Linux/X11 window managers that are
-  not ``re-parenting'' cause problems with additional windows opened
-  by Java. This affects either historic or neo-minimalistic window
-  managers like \<^verbatim>\<open>awesome\<close> or \<^verbatim>\<open>xmonad\<close>.
+  \<^bold>\<open>Workaround:\<close> Do not use X11 input methods. Note that environment variable
+  \<^verbatim>\<open>XMODIFIERS\<close> is reset by default within Isabelle settings.
+
+  \<^item> \<^bold>\<open>Problem:\<close> Some Linux/X11 window managers that are not ``re-parenting''
+  cause problems with additional windows opened by Java. This affects either
+  historic or neo-minimalistic window managers like \<^verbatim>\<open>awesome\<close> or \<^verbatim>\<open>xmonad\<close>.
 
   \<^bold>\<open>Workaround:\<close> Use a regular re-parenting X11 window manager.
 
-  \<^item> \<^bold>\<open>Problem:\<close> Various forks of Linux/X11 window managers and
-  desktop environments (like Gnome) disrupt the handling of menu popups and
-  mouse positions of Java/AWT/Swing.
+  \<^item> \<^bold>\<open>Problem:\<close> Various forks of Linux/X11 window managers and desktop
+  environments (like Gnome) disrupt the handling of menu popups and mouse
+  positions of Java/AWT/Swing.
 
   \<^bold>\<open>Workaround:\<close> Use mainstream versions of Linux desktops.
 
-  \<^item> \<^bold>\<open>Problem:\<close> Native Windows look-and-feel with global font
-  scaling leads to bad GUI rendering of various tree views.
+  \<^item> \<^bold>\<open>Problem:\<close> Native Windows look-and-feel with global font scaling leads to
+  bad GUI rendering of various tree views.
 
-  \<^bold>\<open>Workaround:\<close> Use \<^emph>\<open>Metal\<close> look-and-feel and re-adjust its
-  primary and secondary font as explained in \secref{sec:hdpi}.
+  \<^bold>\<open>Workaround:\<close> Use \<^emph>\<open>Metal\<close> look-and-feel and re-adjust its primary and
+  secondary font as explained in \secref{sec:hdpi}.
 
   \<^item> \<^bold>\<open>Problem:\<close> Full-screen mode via jEdit action @{action_ref
-  "toggle-full-screen"} (default keyboard shortcut \<^verbatim>\<open>F11\<close>) works on
-  Windows, but not on Mac OS X or various Linux/X11 window managers.
+  "toggle-full-screen"} (default keyboard shortcut \<^verbatim>\<open>F11\<close>) works on Windows,
+  but not on Mac OS X or various Linux/X11 window managers.
 
-  \<^bold>\<open>Workaround:\<close> Use native full-screen control of the window
-  manager (notably on Mac OS X).
+  \<^bold>\<open>Workaround:\<close> Use native full-screen control of the window manager (notably
+  on Mac OS X).
 \<close>
 
 end
\ No newline at end of file
--- a/src/Doc/Locales/Examples1.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Locales/Examples1.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -84,6 +84,6 @@
   In order to allow for the desired replacement, interpretation
   accepts \emph{equations} in addition to the parameter instantiation.
   These follow the locale expression and are indicated with the
-  keyword \isakeyword{where}.  This is the revised interpretation:
+  keyword \isakeyword{rewrites}.  This is the revised interpretation:
 \<close>
 end
--- a/src/Doc/Locales/Examples2.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Locales/Examples2.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -2,7 +2,7 @@
 imports Examples
 begin
   interpretation %visible int: partial_order "op \<le> :: [int, int] \<Rightarrow> bool"
-    where "int.less x y = (x < y)"
+    rewrites "int.less x y = (x < y)"
   proof -
     txt \<open>\normalsize The goals are now:
       @{subgoals [display]}
--- a/src/Doc/Locales/Examples3.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Locales/Examples3.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -17,7 +17,7 @@
   repeat the example from the previous section to illustrate this.\<close>
 
   interpretation %visible int: partial_order "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool"
-    where "int.less x y = (x < y)"
+    rewrites "int.less x y = (x < y)"
   proof -
     show "partial_order (op \<le> :: int \<Rightarrow> int \<Rightarrow> bool)"
       by unfold_locales auto
@@ -47,7 +47,7 @@
   so they can be used in a later example.\<close>
 
   interpretation %visible int: lattice "op \<le> :: int \<Rightarrow> int \<Rightarrow> bool"
-    where int_min_eq: "int.meet x y = min x y"
+    rewrites int_min_eq: "int.meet x y = min x y"
       and int_max_eq: "int.join x y = max x y"
   proof -
     show "lattice (op \<le> :: int \<Rightarrow> int \<Rightarrow> bool)"
@@ -486,13 +486,13 @@
   proof unfold_locales
     fix f g
     have "partial_order.is_inf (\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x) f g (\<lambda>x. f x \<sqinter> g x)"
-      apply (rule is_infI) apply rule+ apply (drule spec, assumption)+ done
+      apply (rule f.is_infI) apply rule+ apply (drule spec, assumption)+ done
     then show "\<exists>inf. partial_order.is_inf (\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x) f g inf"
       by fast
   next
     fix f g
     have "partial_order.is_sup (\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x) f g (\<lambda>x. f x \<squnion> g x)"
-      apply (rule is_supI) apply rule+ apply (drule spec, assumption)+ done
+      apply (rule f.is_supI) apply rule+ apply (drule spec, assumption)+ done
     then show "\<exists>sup. partial_order.is_sup (\<lambda>f g. \<forall>x. f x \<sqsubseteq> g x) f g sup"
       by fast
   qed
@@ -611,7 +611,7 @@
 
   \textit{equation} & ::= & [ \textit{attr-name} ``\textbf{:}'' ]
     \textit{prop} \\
-  \textit{equations} & ::= &  \textbf{where} \textit{equation} ( \textbf{and}
+  \textit{equations} & ::= &  \textbf{rewrites} \textit{equation} ( \textbf{and}
     \textit{equation} )$^*$  \\
   \textit{toplevel} & ::=
   & \textbf{sublocale} \textit{name} ( ``$<$'' $|$
--- a/src/Doc/Nitpick/document/root.tex	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Nitpick/document/root.tex	Tue Nov 10 14:43:29 2015 +0000
@@ -342,8 +342,7 @@
 
 \prew
 \textbf{nitpick} [\textit{card} $'a$~= 1--50]\footnote{The symbol `--'
-can be entered as \texttt{-} (hyphen) or
-\texttt{\char`\\\char`\<emdash\char`\>}.} \\[2\smallskipamount]
+is entered as \texttt{-} (hyphen).} \\[2\smallskipamount]
 \slshape Nitpick found no counterexample.
 \postw
 
@@ -1769,7 +1768,7 @@
 \item[\labelitemi] \qtybf{int\/}: An integer. Negative integers are prefixed with a hyphen.
 \item[\labelitemi] \qtybf{smart\_int\/}: An integer or \textit{smart}.
 \item[\labelitemi] \qtybf{int\_range}: An integer (e.g., 3) or a range
-of nonnegative integers (e.g., $1$--$4$). The range symbol `--' can be entered as \texttt{-} (hyphen) or \texttt{\char`\\\char`\<emdash\char`\>}.
+of nonnegative integers (e.g., $1$--$4$). The range symbol `--' is entered as \texttt{-} (hyphen).
 \item[\labelitemi] \qtybf{int\_seq}: A comma-separated sequence of ranges of integers (e.g.,~1{,}3{,}\allowbreak6--8).
 \item[\labelitemi] \qtybf{float}: An floating-point number (e.g., 0.5 or 60)
 expressing a number of seconds.
--- a/src/Doc/Sugar/Sugar.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Sugar/Sugar.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -137,7 +137,7 @@
 suppresses question marks; variables that end in digits,
 e.g. @{text"x1"}, are still printed with a trailing @{text".0"},
 e.g. @{text"x1.0"}, their internal index. This can be avoided by
-turning the last digit into a subscript: write \verb!x\<^sub>1! and
+turning the last digit into a subscript: write \<^verbatim>\<open>x\<^sub>1\<close> and
 obtain the much nicer @{text"x\<^sub>1"}. *}
 
 (*<*)declare [[show_question_marks = false]](*>*)
--- a/src/Doc/System/Basics.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/System/Basics.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,35 +1,34 @@
+(*:wrap=hard:maxLineLen=78:*)
+
 theory Basics
 imports Base
 begin
 
 chapter \<open>The Isabelle system environment\<close>
 
-text \<open>This manual describes Isabelle together with related tools and
-  user interfaces as seen from a system oriented view.  See also the
-  \<^emph>\<open>Isabelle/Isar Reference Manual\<close> @{cite "isabelle-isar-ref"} for
-  the actual Isabelle input language and related concepts, and
-  \<^emph>\<open>The Isabelle/Isar Implementation
+text \<open>
+  This manual describes Isabelle together with related tools and user
+  interfaces as seen from a system oriented view. See also the \<^emph>\<open>Isabelle/Isar
+  Reference Manual\<close> @{cite "isabelle-isar-ref"} for the actual Isabelle input
+  language and related concepts, and \<^emph>\<open>The Isabelle/Isar Implementation
   Manual\<close> @{cite "isabelle-implementation"} for the main concepts of the
   underlying implementation in Isabelle/ML.
 
   \<^medskip>
-  The Isabelle system environment provides the following
-  basic infrastructure to integrate tools smoothly.
+  The Isabelle system environment provides the following basic infrastructure
+  to integrate tools smoothly.
 
-  \<^enum> The \<^emph>\<open>Isabelle settings\<close> mechanism provides process
-  environment variables to all Isabelle executables (including tools
-  and user interfaces).
+  \<^enum> The \<^emph>\<open>Isabelle settings\<close> mechanism provides process environment variables
+  to all Isabelle executables (including tools and user interfaces).
 
-  \<^enum> The raw \<^emph>\<open>Isabelle process\<close> (@{executable_ref
-  "isabelle_process"}) runs logic sessions either interactively or in
-  batch mode.  In particular, this view abstracts over the invocation
-  of the actual ML system to be used.  Regular users rarely need to
-  care about the low-level process.
+  \<^enum> The raw \<^emph>\<open>Isabelle process\<close> (@{executable_ref "isabelle_process"}) runs
+  logic sessions either interactively or in batch mode. In particular, this
+  view abstracts over the invocation of the actual ML system to be used.
+  Regular users rarely need to care about the low-level process.
 
-  \<^enum> The main \<^emph>\<open>Isabelle tool wrapper\<close> (@{executable_ref
-  isabelle}) provides a generic startup environment Isabelle related
-  utilities, user interfaces etc.  Such tools automatically benefit
-  from the settings mechanism.
+  \<^enum> The main \<^emph>\<open>Isabelle tool wrapper\<close> (@{executable_ref isabelle}) provides a
+  generic startup environment Isabelle related utilities, user interfaces etc.
+  Such tools automatically benefit from the settings mechanism.
 \<close>
 
 
@@ -37,299 +36,270 @@
 
 text \<open>
   The Isabelle system heavily depends on the \<^emph>\<open>settings
-  mechanism\<close>\indexbold{settings}.  Essentially, this is a statically
-  scoped collection of environment variables, such as @{setting
-  ISABELLE_HOME}, @{setting ML_SYSTEM}, @{setting ML_HOME}.  These
-  variables are \<^emph>\<open>not\<close> intended to be set directly from the shell,
-  though.  Isabelle employs a somewhat more sophisticated scheme of
-  \<^emph>\<open>settings files\<close> --- one for site-wide defaults, another for
-  additional user-specific modifications.  With all configuration
-  variables in clearly defined places, this scheme is more
-  maintainable and user-friendly than global shell environment
-  variables.
+  mechanism\<close>\indexbold{settings}. Essentially, this is a statically scoped
+  collection of environment variables, such as @{setting ISABELLE_HOME},
+  @{setting ML_SYSTEM}, @{setting ML_HOME}. These variables are \<^emph>\<open>not\<close>
+  intended to be set directly from the shell, though. Isabelle employs a
+  somewhat more sophisticated scheme of \<^emph>\<open>settings files\<close> --- one for
+  site-wide defaults, another for additional user-specific modifications. With
+  all configuration variables in clearly defined places, this scheme is more
+  maintainable and user-friendly than global shell environment variables.
 
-  In particular, we avoid the typical situation where prospective
-  users of a software package are told to put several things into
-  their shell startup scripts, before being able to actually run the
-  program. Isabelle requires none such administrative chores of its
-  end-users --- the executables can be invoked straight away.
-  Occasionally, users would still want to put the @{file
-  "$ISABELLE_HOME/bin"} directory into their shell's search path, but
+  In particular, we avoid the typical situation where prospective users of a
+  software package are told to put several things into their shell startup
+  scripts, before being able to actually run the program. Isabelle requires
+  none such administrative chores of its end-users --- the executables can be
+  invoked straight away. Occasionally, users would still want to put the
+  @{file "$ISABELLE_HOME/bin"} directory into their shell's search path, but
   this is not required.
 \<close>
 
 
 subsection \<open>Bootstrapping the environment \label{sec:boot}\<close>
 
-text \<open>Isabelle executables need to be run within a proper settings
-  environment.  This is bootstrapped as described below, on the first
-  invocation of one of the outer wrapper scripts (such as
-  @{executable_ref isabelle}).  This happens only once for each
-  process tree, i.e.\ the environment is passed to subprocesses
-  according to regular Unix conventions.
+text \<open>
+  Isabelle executables need to be run within a proper settings environment.
+  This is bootstrapped as described below, on the first invocation of one of
+  the outer wrapper scripts (such as @{executable_ref isabelle}). This happens
+  only once for each process tree, i.e.\ the environment is passed to
+  subprocesses according to regular Unix conventions.
+
+    \<^enum> The special variable @{setting_def ISABELLE_HOME} is determined
+    automatically from the location of the binary that has been run.
 
-  \<^enum> The special variable @{setting_def ISABELLE_HOME} is
-  determined automatically from the location of the binary that has
-  been run.
-  
-  You should not try to set @{setting ISABELLE_HOME} manually. Also
-  note that the Isabelle executables either have to be run from their
-  original location in the distribution directory, or via the
-  executable objects created by the @{tool install} tool.  Symbolic
-  links are admissible, but a plain copy of the @{file
-  "$ISABELLE_HOME/bin"} files will not work!
+    You should not try to set @{setting ISABELLE_HOME} manually. Also note
+    that the Isabelle executables either have to be run from their original
+    location in the distribution directory, or via the executable objects
+    created by the @{tool install} tool. Symbolic links are admissible, but a
+    plain copy of the @{file "$ISABELLE_HOME/bin"} files will not work!
+
+    \<^enum> The file @{file "$ISABELLE_HOME/etc/settings"} is run as a
+    @{executable_ref bash} shell script with the auto-export option for
+    variables enabled.
 
-  \<^enum> The file @{file "$ISABELLE_HOME/etc/settings"} is run as a
-  @{executable_ref bash} shell script with the auto-export option for
-  variables enabled.
-  
-  This file holds a rather long list of shell variable assigments,
-  thus providing the site-wide default settings.  The Isabelle
-  distribution already contains a global settings file with sensible
-  defaults for most variables.  When installing the system, only a few
-  of these may have to be adapted (probably @{setting ML_SYSTEM}
-  etc.).
-  
-  \<^enum> The file @{file_unchecked "$ISABELLE_HOME_USER/etc/settings"} (if it
-  exists) is run in the same way as the site default settings. Note
-  that the variable @{setting ISABELLE_HOME_USER} has already been set
-  before --- usually to something like \<^verbatim>\<open>$USER_HOME/.isabelle/IsabelleXXXX\<close>.
-  
-  Thus individual users may override the site-wide defaults.
-  Typically, a user settings file contains only a few lines, with some
-  assignments that are actually changed.  Never copy the central
-  @{file "$ISABELLE_HOME/etc/settings"} file!
+    This file holds a rather long list of shell variable assignments, thus
+    providing the site-wide default settings. The Isabelle distribution
+    already contains a global settings file with sensible defaults for most
+    variables. When installing the system, only a few of these may have to be
+    adapted (probably @{setting ML_SYSTEM} etc.).
+
+    \<^enum> The file @{file_unchecked "$ISABELLE_HOME_USER/etc/settings"} (if it
+    exists) is run in the same way as the site default settings. Note that the
+    variable @{setting ISABELLE_HOME_USER} has already been set before ---
+    usually to something like \<^verbatim>\<open>$USER_HOME/.isabelle/IsabelleXXXX\<close>.
 
+    Thus individual users may override the site-wide defaults. Typically, a
+    user settings file contains only a few lines, with some assignments that
+    are actually changed. Never copy the central @{file
+    "$ISABELLE_HOME/etc/settings"} file!
 
-  Since settings files are regular GNU @{executable_def bash} scripts,
-  one may use complex shell commands, such as \<^verbatim>\<open>if\<close> or
-  \<^verbatim>\<open>case\<close> statements to set variables depending on the
-  system architecture or other environment variables.  Such advanced
-  features should be added only with great care, though. In
-  particular, external environment references should be kept at a
+  Since settings files are regular GNU @{executable_def bash} scripts, one may
+  use complex shell commands, such as \<^verbatim>\<open>if\<close> or \<^verbatim>\<open>case\<close> statements to set
+  variables depending on the system architecture or other environment
+  variables. Such advanced features should be added only with great care,
+  though. In particular, external environment references should be kept at a
   minimum.
 
   \<^medskip>
   A few variables are somewhat special:
 
-  \<^item> @{setting_def ISABELLE_PROCESS} and @{setting_def ISABELLE_TOOL} are set
-  automatically to the absolute path names of the @{executable
-  "isabelle_process"} and @{executable isabelle} executables,
-  respectively.
-  
-  \<^item> @{setting_ref ISABELLE_OUTPUT} will have the identifiers of
-  the Isabelle distribution (cf.\ @{setting ISABELLE_IDENTIFIER}) and
-  the ML system (cf.\ @{setting ML_IDENTIFIER}) appended automatically
-  to its value.
+    \<^item> @{setting_def ISABELLE_PROCESS} and @{setting_def ISABELLE_TOOL} are set
+    automatically to the absolute path names of the @{executable
+    "isabelle_process"} and @{executable isabelle} executables, respectively.
 
+    \<^item> @{setting_ref ISABELLE_OUTPUT} will have the identifiers of the Isabelle
+    distribution (cf.\ @{setting ISABELLE_IDENTIFIER}) and the ML system (cf.\
+    @{setting ML_IDENTIFIER}) appended automatically to its value.
 
   \<^medskip>
-  Note that the settings environment may be inspected with
-  the @{tool getenv} tool.  This might help to figure out the effect
-  of complex settings scripts.\<close>
+  Note that the settings environment may be inspected with the @{tool getenv}
+  tool. This might help to figure out the effect of complex settings scripts.
+\<close>
 
 
 subsection \<open>Common variables\<close>
 
 text \<open>
-  This is a reference of common Isabelle settings variables. Note that
-  the list is somewhat open-ended. Third-party utilities or interfaces
-  may add their own selection. Variables that are special in some
-  sense are marked with \<open>\<^sup>*\<close>.
+  This is a reference of common Isabelle settings variables. Note that the
+  list is somewhat open-ended. Third-party utilities or interfaces may add
+  their own selection. Variables that are special in some sense are marked
+  with \<open>\<^sup>*\<close>.
 
-  \<^descr>[@{setting_def USER_HOME}\<open>\<^sup>*\<close>] Is the cross-platform
-  user home directory.  On Unix systems this is usually the same as
-  @{setting HOME}, but on Windows it is the regular home directory of
-  the user, not the one of within the Cygwin root
-  file-system.\footnote{Cygwin itself offers another choice whether
-  its HOME should point to the @{file_unchecked "/home"} directory tree or the
-  Windows user home.}
+  \<^descr>[@{setting_def USER_HOME}\<open>\<^sup>*\<close>] Is the cross-platform user home directory.
+  On Unix systems this is usually the same as @{setting HOME}, but on Windows
+  it is the regular home directory of the user, not the one of within the
+  Cygwin root file-system.\<^footnote>\<open>Cygwin itself offers another choice whether its
+  HOME should point to the @{file_unchecked "/home"} directory tree or the
+  Windows user home.\<close>
 
-  \<^descr>[@{setting_def ISABELLE_HOME}\<open>\<^sup>*\<close>] is the location of the
-  top-level Isabelle distribution directory. This is automatically
-  determined from the Isabelle executable that has been invoked.  Do
-  not attempt to set @{setting ISABELLE_HOME} yourself from the shell!
-  
-  \<^descr>[@{setting_def ISABELLE_HOME_USER}] is the user-specific
-  counterpart of @{setting ISABELLE_HOME}. The default value is
-  relative to @{file_unchecked "$USER_HOME/.isabelle"}, under rare
-  circumstances this may be changed in the global setting file.
-  Typically, the @{setting ISABELLE_HOME_USER} directory mimics
-  @{setting ISABELLE_HOME} to some extend. In particular, site-wide
-  defaults may be overridden by a private
-  \<^verbatim>\<open>$ISABELLE_HOME_USER/etc/settings\<close>.
+  \<^descr>[@{setting_def ISABELLE_HOME}\<open>\<^sup>*\<close>] is the location of the top-level
+  Isabelle distribution directory. This is automatically determined from the
+  Isabelle executable that has been invoked. Do not attempt to set @{setting
+  ISABELLE_HOME} yourself from the shell!
 
-  \<^descr>[@{setting_def ISABELLE_PLATFORM_FAMILY}\<open>\<^sup>*\<close>] is
-  automatically set to the general platform family: \<^verbatim>\<open>linux\<close>,
-  \<^verbatim>\<open>macos\<close>, \<^verbatim>\<open>windows\<close>.  Note that
+  \<^descr>[@{setting_def ISABELLE_HOME_USER}] is the user-specific counterpart of
+  @{setting ISABELLE_HOME}. The default value is relative to @{file_unchecked
+  "$USER_HOME/.isabelle"}, under rare circumstances this may be changed in the
+  global setting file. Typically, the @{setting ISABELLE_HOME_USER} directory
+  mimics @{setting ISABELLE_HOME} to some extend. In particular, site-wide
+  defaults may be overridden by a private \<^verbatim>\<open>$ISABELLE_HOME_USER/etc/settings\<close>.
+
+  \<^descr>[@{setting_def ISABELLE_PLATFORM_FAMILY}\<open>\<^sup>*\<close>] is automatically set to the
+  general platform family: \<^verbatim>\<open>linux\<close>, \<^verbatim>\<open>macos\<close>, \<^verbatim>\<open>windows\<close>. Note that
   platform-dependent tools usually need to refer to the more specific
   identification according to @{setting ISABELLE_PLATFORM}, @{setting
   ISABELLE_PLATFORM32}, @{setting ISABELLE_PLATFORM64}.
 
-  \<^descr>[@{setting_def ISABELLE_PLATFORM}\<open>\<^sup>*\<close>] is automatically
-  set to a symbolic identifier for the underlying hardware and
-  operating system.  The Isabelle platform identification always
-  refers to the 32 bit variant, even this is a 64 bit machine.  Note
-  that the ML or Java runtime may have a different idea, depending on
-  which binaries are actually run.
+  \<^descr>[@{setting_def ISABELLE_PLATFORM}\<open>\<^sup>*\<close>] is automatically set to a symbolic
+  identifier for the underlying hardware and operating system. The Isabelle
+  platform identification always refers to the 32 bit variant, even this is a
+  64 bit machine. Note that the ML or Java runtime may have a different idea,
+  depending on which binaries are actually run.
 
-  \<^descr>[@{setting_def ISABELLE_PLATFORM64}\<open>\<^sup>*\<close>] is similar to
-  @{setting ISABELLE_PLATFORM} but refers to the proper 64 bit variant
-  on a platform that supports this; the value is empty for 32 bit.
-  Note that the following bash expression (including the quotes)
-  prefers the 64 bit platform, if that is available:
+  \<^descr>[@{setting_def ISABELLE_PLATFORM64}\<open>\<^sup>*\<close>] is similar to @{setting
+  ISABELLE_PLATFORM} but refers to the proper 64 bit variant on a platform
+  that supports this; the value is empty for 32 bit. Note that the following
+  bash expression (including the quotes) prefers the 64 bit platform, if that
+  is available:
 
   @{verbatim [display] \<open>"${ISABELLE_PLATFORM64:-$ISABELLE_PLATFORM}"\<close>}
 
-  \<^descr>[@{setting_def ISABELLE_PROCESS}\<open>\<^sup>*\<close>, @{setting
-  ISABELLE_TOOL}\<open>\<^sup>*\<close>] are automatically set to the full path
-  names of the @{executable "isabelle_process"} and @{executable
-  isabelle} executables, respectively.  Thus other tools and scripts
-  need not assume that the @{file "$ISABELLE_HOME/bin"} directory is
-  on the current search path of the shell.
-  
-  \<^descr>[@{setting_def ISABELLE_IDENTIFIER}\<open>\<^sup>*\<close>] refers
-  to the name of this Isabelle distribution, e.g.\ ``\<^verbatim>\<open>Isabelle2012\<close>''.
+  \<^descr>[@{setting_def ISABELLE_PROCESS}\<open>\<^sup>*\<close>, @{setting ISABELLE_TOOL}\<open>\<^sup>*\<close>] are
+  automatically set to the full path names of the @{executable
+  "isabelle_process"} and @{executable isabelle} executables, respectively.
+  Thus other tools and scripts need not assume that the @{file
+  "$ISABELLE_HOME/bin"} directory is on the current search path of the shell.
 
-  \<^descr>[@{setting_def ML_SYSTEM}, @{setting_def ML_HOME},
-  @{setting_def ML_OPTIONS}, @{setting_def ML_PLATFORM}, @{setting_def
-  ML_IDENTIFIER}\<open>\<^sup>*\<close>] specify the underlying ML system
-  to be used for Isabelle.  There is only a fixed set of admissable
-  @{setting ML_SYSTEM} names (see the @{file
+  \<^descr>[@{setting_def ISABELLE_IDENTIFIER}\<open>\<^sup>*\<close>] refers to the name of this
+  Isabelle distribution, e.g.\ ``\<^verbatim>\<open>Isabelle2012\<close>''.
+
+  \<^descr>[@{setting_def ML_SYSTEM}, @{setting_def ML_HOME}, @{setting_def
+  ML_OPTIONS}, @{setting_def ML_PLATFORM}, @{setting_def ML_IDENTIFIER}\<open>\<^sup>*\<close>]
+  specify the underlying ML system to be used for Isabelle. There is only a
+  fixed set of admissable @{setting ML_SYSTEM} names (see the @{file
   "$ISABELLE_HOME/etc/settings"} file of the distribution).
-  
+
   The actual compiler binary will be run from the directory @{setting
-  ML_HOME}, with @{setting ML_OPTIONS} as first arguments on the
-  command line.  The optional @{setting ML_PLATFORM} may specify the
-  binary format of ML heap images, which is useful for cross-platform
-  installations.  The value of @{setting ML_IDENTIFIER} is
-  automatically obtained by composing the values of @{setting
-  ML_SYSTEM}, @{setting ML_PLATFORM} and the Isabelle version values.
+  ML_HOME}, with @{setting ML_OPTIONS} as first arguments on the command line.
+  The optional @{setting ML_PLATFORM} may specify the binary format of ML heap
+  images, which is useful for cross-platform installations. The value of
+  @{setting ML_IDENTIFIER} is automatically obtained by composing the values
+  of @{setting ML_SYSTEM}, @{setting ML_PLATFORM} and the Isabelle version
+  values.
 
-  \<^descr>[@{setting_def ML_SYSTEM_POLYML}\<open>\<^sup>*\<close>] is \<^verbatim>\<open>true\<close>
-  for @{setting ML_SYSTEM} values derived from Poly/ML, as opposed to
-  SML/NJ where it is empty.  This is particularly useful with the
-  build option @{system_option condition}
-  (\secref{sec:system-options}) to restrict big sessions to something
-  that SML/NJ can still handle.
+  \<^descr>[@{setting_def ML_SYSTEM_POLYML}\<open>\<^sup>*\<close>] is \<^verbatim>\<open>true\<close> for @{setting ML_SYSTEM}
+  values derived from Poly/ML, as opposed to SML/NJ where it is empty. This is
+  particularly useful with the build option @{system_option condition}
+  (\secref{sec:system-options}) to restrict big sessions to something that
+  SML/NJ can still handle.
 
-  \<^descr>[@{setting_def ISABELLE_JDK_HOME}] needs to point to a full JDK
-  (Java Development Kit) installation with \<^verbatim>\<open>javac\<close> and
-  \<^verbatim>\<open>jar\<close> executables.  This is essential for Isabelle/Scala
-  and other JVM-based tools to work properly.  Note that conventional
-  \<^verbatim>\<open>JAVA_HOME\<close> usually points to the JRE (Java Runtime
+  \<^descr>[@{setting_def ISABELLE_JDK_HOME}] needs to point to a full JDK (Java
+  Development Kit) installation with \<^verbatim>\<open>javac\<close> and \<^verbatim>\<open>jar\<close> executables. This is
+  essential for Isabelle/Scala and other JVM-based tools to work properly.
+  Note that conventional \<^verbatim>\<open>JAVA_HOME\<close> usually points to the JRE (Java Runtime
   Environment), not JDK.
-  
-  \<^descr>[@{setting_def ISABELLE_PATH}] is a list of directories
-  (separated by colons) where Isabelle logic images may reside.  When
-  looking up heaps files, the value of @{setting ML_IDENTIFIER} is
-  appended to each component internally.
-  
-  \<^descr>[@{setting_def ISABELLE_OUTPUT}\<open>\<^sup>*\<close>] is a
-  directory where output heap files should be stored by default. The
-  ML system and Isabelle version identifier is appended here, too.
-  
-  \<^descr>[@{setting_def ISABELLE_BROWSER_INFO}] is the directory where
-  theory browser information (HTML text, graph data, and printable
-  documents) is stored (see also \secref{sec:info}).  The default
-  value is @{file_unchecked "$ISABELLE_HOME_USER/browser_info"}.
-  
-  \<^descr>[@{setting_def ISABELLE_LOGIC}] specifies the default logic to
-  load if none is given explicitely by the user.  The default value is
-  \<^verbatim>\<open>HOL\<close>.
-  
-  \<^descr>[@{setting_def ISABELLE_LINE_EDITOR}] specifies the
-  line editor for the @{tool_ref console} interface.
+
+  \<^descr>[@{setting_def ISABELLE_PATH}] is a list of directories (separated by
+  colons) where Isabelle logic images may reside. When looking up heaps files,
+  the value of @{setting ML_IDENTIFIER} is appended to each component
+  internally.
+
+  \<^descr>[@{setting_def ISABELLE_OUTPUT}\<open>\<^sup>*\<close>] is a directory where output heap files
+  should be stored by default. The ML system and Isabelle version identifier
+  is appended here, too.
+
+  \<^descr>[@{setting_def ISABELLE_BROWSER_INFO}] is the directory where theory
+  browser information (HTML text, graph data, and printable documents) is
+  stored (see also \secref{sec:info}). The default value is @{file_unchecked
+  "$ISABELLE_HOME_USER/browser_info"}.
+
+  \<^descr>[@{setting_def ISABELLE_LOGIC}] specifies the default logic to load if none
+  is given explicitely by the user. The default value is \<^verbatim>\<open>HOL\<close>.
+
+  \<^descr>[@{setting_def ISABELLE_LINE_EDITOR}] specifies the line editor for the
+  @{tool_ref console} interface.
 
-  \<^descr>[@{setting_def ISABELLE_LATEX}, @{setting_def
-  ISABELLE_PDFLATEX}, @{setting_def ISABELLE_BIBTEX}] refer to {\LaTeX}
-  related tools for Isabelle document preparation (see also
-  \secref{sec:tool-latex}).
-  
-  \<^descr>[@{setting_def ISABELLE_TOOLS}] is a colon separated list of
-  directories that are scanned by @{executable isabelle} for external
-  utility programs (see also \secref{sec:isabelle-tool}).
-  
-  \<^descr>[@{setting_def ISABELLE_DOCS}] is a colon separated list of
-  directories with documentation files.
+  \<^descr>[@{setting_def ISABELLE_LATEX}, @{setting_def ISABELLE_PDFLATEX},
+  @{setting_def ISABELLE_BIBTEX}] refer to {\LaTeX} related tools for Isabelle
+  document preparation (see also \secref{sec:tool-latex}).
+
+  \<^descr>[@{setting_def ISABELLE_TOOLS}] is a colon separated list of directories
+  that are scanned by @{executable isabelle} for external utility programs
+  (see also \secref{sec:isabelle-tool}).
 
-  \<^descr>[@{setting_def PDF_VIEWER}] specifies the program to be used
-  for displaying \<^verbatim>\<open>pdf\<close> files.
+  \<^descr>[@{setting_def ISABELLE_DOCS}] is a colon separated list of directories
+  with documentation files.
+
+  \<^descr>[@{setting_def PDF_VIEWER}] specifies the program to be used for displaying
+  \<^verbatim>\<open>pdf\<close> files.
 
-  \<^descr>[@{setting_def DVI_VIEWER}] specifies the program to be used
-  for displaying \<^verbatim>\<open>dvi\<close> files.
-  
-  \<^descr>[@{setting_def ISABELLE_TMP_PREFIX}\<open>\<^sup>*\<close>] is the
-  prefix from which any running @{executable "isabelle_process"}
-  derives an individual directory for temporary files.
+  \<^descr>[@{setting_def DVI_VIEWER}] specifies the program to be used for displaying
+  \<^verbatim>\<open>dvi\<close> files.
+
+  \<^descr>[@{setting_def ISABELLE_TMP_PREFIX}\<open>\<^sup>*\<close>] is the prefix from which any
+  running @{executable "isabelle_process"} derives an individual directory for
+  temporary files.
 \<close>
 
 
 subsection \<open>Additional components \label{sec:components}\<close>
 
-text \<open>Any directory may be registered as an explicit \<^emph>\<open>Isabelle
-  component\<close>.  The general layout conventions are that of the main
-  Isabelle distribution itself, and the following two files (both
-  optional) have a special meaning:
+text \<open>
+  Any directory may be registered as an explicit \<^emph>\<open>Isabelle component\<close>. The
+  general layout conventions are that of the main Isabelle distribution
+  itself, and the following two files (both optional) have a special meaning:
 
-  \<^item> \<^verbatim>\<open>etc/settings\<close> holds additional settings that are
-  initialized when bootstrapping the overall Isabelle environment,
-  cf.\ \secref{sec:boot}.  As usual, the content is interpreted as a
-  \<^verbatim>\<open>bash\<close> script.  It may refer to the component's enclosing
-  directory via the \<^verbatim>\<open>COMPONENT\<close> shell variable.
+    \<^item> \<^verbatim>\<open>etc/settings\<close> holds additional settings that are initialized when
+    bootstrapping the overall Isabelle environment, cf.\ \secref{sec:boot}. As
+    usual, the content is interpreted as a \<^verbatim>\<open>bash\<close> script. It may refer to the
+    component's enclosing directory via the \<^verbatim>\<open>COMPONENT\<close> shell variable.
 
-  For example, the following setting allows to refer to files within
-  the component later on, without having to hardwire absolute paths:
-  @{verbatim [display] \<open>MY_COMPONENT_HOME="$COMPONENT"\<close>}
+    For example, the following setting allows to refer to files within the
+    component later on, without having to hardwire absolute paths:
+    @{verbatim [display] \<open>MY_COMPONENT_HOME="$COMPONENT"\<close>}
 
-  Components can also add to existing Isabelle settings such as
-  @{setting_def ISABELLE_TOOLS}, in order to provide
-  component-specific tools that can be invoked by end-users.  For
-  example:
-  @{verbatim [display] \<open>ISABELLE_TOOLS="$ISABELLE_TOOLS:$COMPONENT/lib/Tools"\<close>}
+    Components can also add to existing Isabelle settings such as
+    @{setting_def ISABELLE_TOOLS}, in order to provide component-specific
+    tools that can be invoked by end-users. For example:
+    @{verbatim [display] \<open>ISABELLE_TOOLS="$ISABELLE_TOOLS:$COMPONENT/lib/Tools"\<close>}
 
-  \<^item> \<^verbatim>\<open>etc/components\<close> holds a list of further
-  sub-components of the same structure.  The directory specifications
-  given here can be either absolute (with leading \<^verbatim>\<open>/\<close>) or
-  relative to the component's main directory.
+    \<^item> \<^verbatim>\<open>etc/components\<close> holds a list of further sub-components of the same
+    structure. The directory specifications given here can be either absolute
+    (with leading \<^verbatim>\<open>/\<close>) or relative to the component's main directory.
 
-
-  The root of component initialization is @{setting ISABELLE_HOME}
-  itself.  After initializing all of its sub-components recursively,
-  @{setting ISABELLE_HOME_USER} is included in the same manner (if
-  that directory exists).  This allows to install private components
-  via @{file_unchecked "$ISABELLE_HOME_USER/etc/components"}, although it is
-  often more convenient to do that programmatically via the
-  \<^verbatim>\<open>init_component\<close> shell function in the \<^verbatim>\<open>etc/settings\<close>
-  script of \<^verbatim>\<open>$ISABELLE_HOME_USER\<close> (or any other component
-  directory).  For example:
+  The root of component initialization is @{setting ISABELLE_HOME} itself.
+  After initializing all of its sub-components recursively, @{setting
+  ISABELLE_HOME_USER} is included in the same manner (if that directory
+  exists). This allows to install private components via @{file_unchecked
+  "$ISABELLE_HOME_USER/etc/components"}, although it is often more convenient
+  to do that programmatically via the \<^verbatim>\<open>init_component\<close> shell function in the
+  \<^verbatim>\<open>etc/settings\<close> script of \<^verbatim>\<open>$ISABELLE_HOME_USER\<close> (or any other component
+  directory). For example:
   @{verbatim [display] \<open>init_component "$HOME/screwdriver-2.0"\<close>}
 
-  This is tolerant wrt.\ missing component directories, but might
-  produce a warning.
+  This is tolerant wrt.\ missing component directories, but might produce a
+  warning.
 
   \<^medskip>
-  More complex situations may be addressed by initializing
-  components listed in a given catalog file, relatively to some base
-  directory:
+  More complex situations may be addressed by initializing components listed
+  in a given catalog file, relatively to some base directory:
   @{verbatim [display] \<open>init_components "$HOME/my_component_store" "some_catalog_file"\<close>}
 
-  The component directories listed in the catalog file are treated as
-  relative to the given base directory.
+  The component directories listed in the catalog file are treated as relative
+  to the given base directory.
 
-  See also \secref{sec:tool-components} for some tool-support for
-  resolving components that are formally initialized but not installed
-  yet.
+  See also \secref{sec:tool-components} for some tool-support for resolving
+  components that are formally initialized but not installed yet.
 \<close>
 
 
 section \<open>The raw Isabelle process \label{sec:isabelle-process}\<close>
 
 text \<open>
-  The @{executable_def "isabelle_process"} executable runs bare-bones
-  Isabelle logic sessions --- either interactively or in batch mode.
-  It provides an abstraction over the underlying ML system, and over
-  the actual heap file locations.  Its usage is:
+  The @{executable_def "isabelle_process"} executable runs bare-bones Isabelle
+  logic sessions --- either interactively or in batch mode. It provides an
+  abstraction over the underlying ML system, and over the actual heap file
+  locations. Its usage is:
   @{verbatim [display]
 \<open>Usage: isabelle_process [OPTIONS] [INPUT] [OUTPUT]
 
@@ -349,123 +319,111 @@
   actual file names (containing at least one /).
   If INPUT is "RAW_ML_SYSTEM", just start the bare bones ML system.\<close>}
 
-  Input files without path specifications are looked up in the
-  @{setting ISABELLE_PATH} setting, which may consist of multiple
-  components separated by colons --- these are tried in the given
-  order with the value of @{setting ML_IDENTIFIER} appended
-  internally.  In a similar way, base names are relative to the
-  directory specified by @{setting ISABELLE_OUTPUT}.  In any case,
-  actual file locations may also be given by including at least one
-  slash (\<^verbatim>\<open>/\<close>) in the name (hint: use \<^verbatim>\<open>./\<close> to
-  refer to the current directory).
+  Input files without path specifications are looked up in the @{setting
+  ISABELLE_PATH} setting, which may consist of multiple components separated
+  by colons --- these are tried in the given order with the value of @{setting
+  ML_IDENTIFIER} appended internally. In a similar way, base names are
+  relative to the directory specified by @{setting ISABELLE_OUTPUT}. In any
+  case, actual file locations may also be given by including at least one
+  slash (\<^verbatim>\<open>/\<close>) in the name (hint: use \<^verbatim>\<open>./\<close> to refer to the current
+  directory).
 \<close>
 
 
 subsubsection \<open>Options\<close>
 
 text \<open>
-  If the input heap file does not have write permission bits set, or
-  the \<^verbatim>\<open>-r\<close> option is given explicitly, then the session
-  started will be read-only.  That is, the ML world cannot be
-  committed back into the image file.  Otherwise, a writable session
-  enables commits into either the input file, or into another output
-  heap file (if that is given as the second argument on the command
+  If the input heap file does not have write permission bits set, or the \<^verbatim>\<open>-r\<close>
+  option is given explicitly, then the session started will be read-only. That
+  is, the ML world cannot be committed back into the image file. Otherwise, a
+  writable session enables commits into either the input file, or into another
+  output heap file (if that is given as the second argument on the command
   line).
 
-  The read-write state of sessions is determined at startup only, it
-  cannot be changed intermediately. Also note that heap images may
-  require considerable amounts of disk space (hundreds of MB or some GB).
-  Users are responsible for themselves to dispose their heap files
-  when they are no longer needed.
+  The read-write state of sessions is determined at startup only, it cannot be
+  changed intermediately. Also note that heap images may require considerable
+  amounts of disk space (hundreds of MB or some GB). Users are responsible for
+  themselves to dispose their heap files when they are no longer needed.
 
   \<^medskip>
-  The \<^verbatim>\<open>-w\<close> option makes the output heap file
-  read-only after terminating.  Thus subsequent invocations cause the
-  logic image to be read-only automatically.
+  The \<^verbatim>\<open>-w\<close> option makes the output heap file read-only after terminating.
+  Thus subsequent invocations cause the logic image to be read-only
+  automatically.
 
   \<^medskip>
-  Using the \<^verbatim>\<open>-e\<close> option, arbitrary ML code may be
-  passed to the Isabelle session from the command line. Multiple
-  \<^verbatim>\<open>-e\<close> options are evaluated in the given order. Strange things
-  may happen when erroneous ML code is provided. Also make sure that
-  the ML commands are terminated properly by semicolon.
+  Using the \<^verbatim>\<open>-e\<close> option, arbitrary ML code may be passed to the Isabelle
+  session from the command line. Multiple \<^verbatim>\<open>-e\<close> options are evaluated in the
+  given order. Strange things may happen when erroneous ML code is provided.
+  Also make sure that the ML commands are terminated properly by semicolon.
 
   \<^medskip>
-  The \<^verbatim>\<open>-m\<close> option adds identifiers of print modes
-  to be made active for this session. Typically, this is used by some
-  user interface, e.g.\ to enable output of proper mathematical
-  symbols.
+  The \<^verbatim>\<open>-m\<close> option adds identifiers of print modes to be made active for this
+  session. Typically, this is used by some user interface, e.g.\ to enable
+  output of proper mathematical symbols.
 
   \<^medskip>
-  Isabelle normally enters an interactive top-level loop
-  (after processing the \<^verbatim>\<open>-e\<close> texts). The \<^verbatim>\<open>-q\<close>
-  option inhibits interaction, thus providing a pure batch mode
-  facility.
+  Isabelle normally enters an interactive top-level loop (after processing the
+  \<^verbatim>\<open>-e\<close> texts). The \<^verbatim>\<open>-q\<close> option inhibits interaction, thus providing a pure
+  batch mode facility.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-o\<close> allows to override Isabelle system
-  options for this process, see also \secref{sec:system-options}.
-  Alternatively, option \<^verbatim>\<open>-O\<close> specifies the full environment of
-  system options as a file in YXML format (according to the Isabelle/Scala
-  function \<^verbatim>\<open>isabelle.Options.encode\<close>).
+  Option \<^verbatim>\<open>-o\<close> allows to override Isabelle system options for this process,
+  see also \secref{sec:system-options}. Alternatively, option \<^verbatim>\<open>-O\<close> specifies
+  the full environment of system options as a file in YXML format (according
+  to the Isabelle/Scala function \<^verbatim>\<open>isabelle.Options.encode\<close>).
 
   \<^medskip>
-  The \<^verbatim>\<open>-P\<close> option starts the Isabelle process wrapper
-  for Isabelle/Scala, with a private protocol running over the specified TCP
-  socket. Isabelle/ML and Isabelle/Scala provide various programming
-  interfaces to invoke protocol functions over untyped strings and XML
-  trees.
+  The \<^verbatim>\<open>-P\<close> option starts the Isabelle process wrapper for Isabelle/Scala,
+  with a private protocol running over the specified TCP socket. Isabelle/ML
+  and Isabelle/Scala provide various programming interfaces to invoke protocol
+  functions over untyped strings and XML trees.
 
   \<^medskip>
-  The \<^verbatim>\<open>-S\<close> option makes the Isabelle process more
-  secure by disabling some critical operations, notably runtime
-  compilation and evaluation of ML source code.
+  The \<^verbatim>\<open>-S\<close> option makes the Isabelle process more secure by disabling some
+  critical operations, notably runtime compilation and evaluation of ML source
+  code.
 \<close>
 
 
 subsubsection \<open>Examples\<close>
 
 text \<open>
-  Run an interactive session of the default object-logic (as specified
-  by the @{setting ISABELLE_LOGIC} setting) like this:
+  Run an interactive session of the default object-logic (as specified by the
+  @{setting ISABELLE_LOGIC} setting) like this:
   @{verbatim [display] \<open>isabelle_process\<close>}
 
-  Usually @{setting ISABELLE_LOGIC} refers to one of the standard
-  logic images, which are read-only by default.  A writable session
-  --- based on \<^verbatim>\<open>HOL\<close>, but output to \<^verbatim>\<open>Test\<close> (in the
-  directory specified by the @{setting ISABELLE_OUTPUT} setting) ---
-  may be invoked as follows:
+  Usually @{setting ISABELLE_LOGIC} refers to one of the standard logic
+  images, which are read-only by default. A writable session --- based on
+  \<^verbatim>\<open>HOL\<close>, but output to \<^verbatim>\<open>Test\<close> (in the directory specified by the @{setting
+  ISABELLE_OUTPUT} setting) --- may be invoked as follows:
   @{verbatim [display] \<open>isabelle_process HOL Test\<close>}
 
-  Ending this session normally (e.g.\ by typing control-D) dumps the
-  whole ML system state into \<^verbatim>\<open>Test\<close> (be prepared for more
-  than 100\,MB):
+  Ending this session normally (e.g.\ by typing control-D) dumps the whole ML
+  system state into \<^verbatim>\<open>Test\<close> (be prepared for more than 100\,MB):
 
-  The \<^verbatim>\<open>Test\<close> session may be continued later (still in
-  writable state) by: @{verbatim [display] \<open>isabelle_process Test\<close>}
+  The \<^verbatim>\<open>Test\<close> session may be continued later (still in writable state) by:
+  @{verbatim [display] \<open>isabelle_process Test\<close>}
 
   A read-only \<^verbatim>\<open>Test\<close> session may be started by:
   @{verbatim [display] \<open>isabelle_process -r Test\<close>}
 
   \<^bigskip>
-  The next example demonstrates batch execution of Isabelle.
-  We retrieve the \<^verbatim>\<open>Main\<close> theory value from the theory loader
-  within ML (observe the delicate quoting rules for the Bash shell
-  vs.\ ML):
+  The next example demonstrates batch execution of Isabelle. We retrieve the
+  \<^verbatim>\<open>Main\<close> theory value from the theory loader within ML (observe the delicate
+  quoting rules for the Bash shell vs.\ ML):
   @{verbatim [display] \<open>isabelle_process -e 'Thy_Info.get_theory "Main";' -q -r HOL\<close>}
 
-  Note that the output text will be interspersed with additional junk
-  messages by the ML runtime environment.  The \<^verbatim>\<open>-W\<close> option
-  allows to communicate with the Isabelle process via an external
-  program in a more robust fashion.
+  Note that the output text will be interspersed with additional junk messages
+  by the ML runtime environment. The \<^verbatim>\<open>-W\<close> option allows to communicate with
+  the Isabelle process via an external program in a more robust fashion.
 \<close>
 
 
 section \<open>The Isabelle tool wrapper \label{sec:isabelle-tool}\<close>
 
 text \<open>
-  All Isabelle related tools and interfaces are called via a common
-  wrapper --- @{executable isabelle}:
+  All Isabelle related tools and interfaces are called via a common wrapper
+  --- @{executable isabelle}:
   @{verbatim [display]
 \<open>Usage: isabelle TOOL [ARGS ...]
 
@@ -474,20 +432,19 @@
 Available tools:
   ...\<close>}
 
-  In principle, Isabelle tools are ordinary executable scripts that
-  are run within the Isabelle settings environment, see
-  \secref{sec:settings}.  The set of available tools is collected by
-  @{executable isabelle} from the directories listed in the @{setting
-  ISABELLE_TOOLS} setting.  Do not try to call the scripts directly
-  from the shell.  Neither should you add the tool directories to your
-  shell's search path!
+  In principle, Isabelle tools are ordinary executable scripts that are run
+  within the Isabelle settings environment, see \secref{sec:settings}. The set
+  of available tools is collected by @{executable isabelle} from the
+  directories listed in the @{setting ISABELLE_TOOLS} setting. Do not try to
+  call the scripts directly from the shell. Neither should you add the tool
+  directories to your shell's search path!
 \<close>
 
 
 subsubsection \<open>Examples\<close>
 
-text \<open>Show the list of available documentation of the Isabelle
-  distribution:
+text \<open>
+  Show the list of available documentation of the Isabelle distribution:
   @{verbatim [display] \<open>isabelle doc\<close>}
 
   View a certain document as follows:
--- a/src/Doc/System/Misc.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/System/Misc.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,3 +1,5 @@
+(*:wrap=hard:maxLineLen=78:*)
+
 theory Misc
 imports Base
 begin
@@ -5,25 +7,27 @@
 chapter \<open>Miscellaneous tools \label{ch:tools}\<close>
 
 text \<open>
-  Subsequently we describe various Isabelle related utilities, given
-  in alphabetical order.
+  Subsequently we describe various Isabelle related utilities, given in
+  alphabetical order.
 \<close>
 
 
 section \<open>Theory graph browser \label{sec:browse}\<close>
 
-text \<open>The Isabelle graph browser is a general tool for visualizing
-  dependency graphs.  Certain nodes of the graph (i.e.\ theories) can
-  be grouped together in ``directories'', whose contents may be
-  hidden, thus enabling the user to collapse irrelevant portions of
-  information.  The browser is written in Java, it can be used both as
-  a stand-alone application and as an applet.\<close>
+text \<open>
+  The Isabelle graph browser is a general tool for visualizing dependency
+  graphs. Certain nodes of the graph (i.e.\ theories) can be grouped together
+  in ``directories'', whose contents may be hidden, thus enabling the user to
+  collapse irrelevant portions of information. The browser is written in Java,
+  it can be used both as a stand-alone application and as an applet.
+\<close>
 
 
 subsection \<open>Invoking the graph browser\<close>
 
-text \<open>The stand-alone version of the graph browser is wrapped up as
-  @{tool_def browser}:
+text \<open>
+  The stand-alone version of the graph browser is wrapped up as @{tool_def
+  browser}:
   @{verbatim [display]
 \<open>Usage: isabelle browser [OPTIONS] [GRAPHFILE]
 
@@ -32,35 +36,31 @@
     -c           cleanup -- remove GRAPHFILE after use
     -o FILE      output to FILE (ps, eps, pdf)\<close>}
 
-  When no file name is specified, the browser automatically changes to
-  the directory @{setting ISABELLE_BROWSER_INFO}.
+  When no file name is specified, the browser automatically changes to the
+  directory @{setting ISABELLE_BROWSER_INFO}.
 
   \<^medskip>
-  The \<^verbatim>\<open>-b\<close> option indicates that this is for
-  administrative build only, i.e.\ no browser popup if no files are
-  given.
+  The \<^verbatim>\<open>-b\<close> option indicates that this is for administrative build only, i.e.\
+  no browser popup if no files are given.
 
-  The \<^verbatim>\<open>-c\<close> option causes the input file to be removed
-  after use.
+  The \<^verbatim>\<open>-c\<close> option causes the input file to be removed after use.
 
-  The \<^verbatim>\<open>-o\<close> option indicates batch-mode operation, with the
-  output written to the indicated file; note that \<^verbatim>\<open>pdf\<close>
-  produces an \<^verbatim>\<open>eps\<close> copy as well.
+  The \<^verbatim>\<open>-o\<close> option indicates batch-mode operation, with the output written to
+  the indicated file; note that \<^verbatim>\<open>pdf\<close> produces an \<^verbatim>\<open>eps\<close> copy as well.
 
   \<^medskip>
-  The applet version of the browser is part of the standard
-  WWW theory presentation, see the link ``theory dependencies'' within
-  each session index.
+  The applet version of the browser is part of the standard WWW theory
+  presentation, see the link ``theory dependencies'' within each session
+  index.
 \<close>
 
 
 subsection \<open>Using the graph browser\<close>
 
 text \<open>
-  The browser's main window, which is shown in
-  \figref{fig:browserwindow}, consists of two sub-windows.  In the
-  left sub-window, the directory tree is displayed. The graph itself
-  is displayed in the right sub-window.
+  The browser's main window, which is shown in \figref{fig:browserwindow},
+  consists of two sub-windows. In the left sub-window, the directory tree is
+  displayed. The graph itself is displayed in the right sub-window.
 
   \begin{figure}[ht]
   \includegraphics[width=\textwidth]{browser_screenshot}
@@ -72,63 +72,57 @@
 subsubsection \<open>The directory tree window\<close>
 
 text \<open>
-  We describe the usage of the directory browser and the meaning of
-  the different items in the browser window.
+  We describe the usage of the directory browser and the meaning of the
+  different items in the browser window.
 
-  \<^item> A red arrow before a directory name indicates that the
-  directory is currently ``folded'', i.e.~the nodes in this directory
-  are collapsed to one single node. In the right sub-window, the names
-  of nodes corresponding to folded directories are enclosed in square
-  brackets and displayed in red color.
+  \<^item> A red arrow before a directory name indicates that the directory is
+  currently ``folded'', i.e.~the nodes in this directory are collapsed to one
+  single node. In the right sub-window, the names of nodes corresponding to
+  folded directories are enclosed in square brackets and displayed in red
+  color.
 
-  \<^item> A green downward arrow before a directory name indicates that
-  the directory is currently ``unfolded''. It can be folded by
-  clicking on the directory name.  Clicking on the name for a second
-  time unfolds the directory again.  Alternatively, a directory can
-  also be unfolded by clicking on the corresponding node in the right
-  sub-window.
+  \<^item> A green downward arrow before a directory name indicates that the
+  directory is currently ``unfolded''. It can be folded by clicking on the
+  directory name. Clicking on the name for a second time unfolds the directory
+  again. Alternatively, a directory can also be unfolded by clicking on the
+  corresponding node in the right sub-window.
 
-  \<^item> Blue arrows stand before ordinary node names. When clicking on
-  such a name (i.e.\ that of a theory), the graph display window
-  focuses to the corresponding node. Double clicking invokes a text
-  viewer window in which the contents of the theory file are
-  displayed.
+  \<^item> Blue arrows stand before ordinary node names. When clicking on such a name
+  (i.e.\ that of a theory), the graph display window focuses to the
+  corresponding node. Double clicking invokes a text viewer window in which
+  the contents of the theory file are displayed.
 \<close>
 
 
 subsubsection \<open>The graph display window\<close>
 
 text \<open>
-  When pointing on an ordinary node, an upward and a downward arrow is
-  shown.  Initially, both of these arrows are green. Clicking on the
-  upward or downward arrow collapses all predecessor or successor
-  nodes, respectively. The arrow's color then changes to red,
-  indicating that the predecessor or successor nodes are currently
-  collapsed. The node corresponding to the collapsed nodes has the
-  name ``\<^verbatim>\<open>[....]\<close>''. To uncollapse the nodes again, simply
-  click on the red arrow or on the node with the name ``\<^verbatim>\<open>[....]\<close>''.
-  Similar to the directory browser, the contents of
-  theory files can be displayed by double clicking on the
-  corresponding node.
+  When pointing on an ordinary node, an upward and a downward arrow is shown.
+  Initially, both of these arrows are green. Clicking on the upward or
+  downward arrow collapses all predecessor or successor nodes, respectively.
+  The arrow's color then changes to red, indicating that the predecessor or
+  successor nodes are currently collapsed. The node corresponding to the
+  collapsed nodes has the name ``\<^verbatim>\<open>[....]\<close>''. To uncollapse the nodes again,
+  simply click on the red arrow or on the node with the name ``\<^verbatim>\<open>[....]\<close>''.
+  Similar to the directory browser, the contents of theory files can be
+  displayed by double clicking on the corresponding node.
 \<close>
 
 
 subsubsection \<open>The ``File'' menu\<close>
 
 text \<open>
-  Due to Java Applet security restrictions this menu is only available
-  in the full application version. The meaning of the menu items is as
-  follows:
+  Due to Java Applet security restrictions this menu is only available in the
+  full application version. The meaning of the menu items is as follows:
 
   \<^descr>[Open \dots] Open a new graph file.
 
-  \<^descr>[Export to PostScript] Outputs the current graph in Postscript
-  format, appropriately scaled to fit on one single sheet of A4 paper.
-  The resulting file can be printed directly.
+  \<^descr>[Export to PostScript] Outputs the current graph in Postscript format,
+  appropriately scaled to fit on one single sheet of A4 paper. The resulting
+  file can be printed directly.
 
-  \<^descr>[Export to EPS] Outputs the current graph in Encapsulated
-  Postscript format. The resulting file can be included in other
-  documents.
+  \<^descr>[Export to EPS] Outputs the current graph in Encapsulated Postscript
+  format. The resulting file can be included in other documents.
 
   \<^descr>[Quit] Quit the graph browser.
 \<close>
@@ -150,22 +144,20 @@
 
   \<^descr>[\<open>vertex_name\<close>] The name of the vertex.
 
-  \<^descr>[\<open>vertex_ID\<close>] The vertex identifier. Note that there may
-  be several vertices with equal names, whereas identifiers must be
-  unique.
+  \<^descr>[\<open>vertex_ID\<close>] The vertex identifier. Note that there may be several
+  vertices with equal names, whereas identifiers must be unique.
 
-  \<^descr>[\<open>dir_name\<close>] The name of the ``directory'' the vertex
-  should be placed in.  A ``\<^verbatim>\<open>+\<close>'' sign after \<open>dir_name\<close> indicates that the nodes in the directory are initially
-  visible. Directories are initially invisible by default.
+  \<^descr>[\<open>dir_name\<close>] The name of the ``directory'' the vertex should be placed in.
+  A ``\<^verbatim>\<open>+\<close>'' sign after \<open>dir_name\<close> indicates that the nodes in the directory
+  are initially visible. Directories are initially invisible by default.
 
-  \<^descr>[\<open>path\<close>] The path of the corresponding theory file. This
-  is specified relatively to the path of the graph definition file.
+  \<^descr>[\<open>path\<close>] The path of the corresponding theory file. This is specified
+  relatively to the path of the graph definition file.
 
-  \<^descr>[List of successor/predecessor nodes] A ``\<^verbatim>\<open><\<close>''
-  sign before the list means that successor nodes are listed, a
-  ``\<^verbatim>\<open>>\<close>'' sign means that predecessor nodes are listed. If
-  neither ``\<^verbatim>\<open><\<close>'' nor ``\<^verbatim>\<open>>\<close>'' is found, the
-  browser assumes that successor nodes are listed.
+  \<^descr>[List of successor/predecessor nodes] A ``\<^verbatim>\<open><\<close>'' sign before the list means
+  that successor nodes are listed, a ``\<^verbatim>\<open>>\<close>'' sign means that predecessor
+  nodes are listed. If neither ``\<^verbatim>\<open><\<close>'' nor ``\<^verbatim>\<open>>\<close>'' is found, the browser
+  assumes that successor nodes are listed.
 \<close>
 
 
@@ -188,33 +180,28 @@
 
   ISABELLE_COMPONENT_REPOSITORY="http://isabelle.in.tum.de/components"\<close>}
 
-  Components are initialized as described in \secref{sec:components}
-  in a permissive manner, which can mark components as ``missing''.
-  This state is amended by letting @{tool "components"} download and
-  unpack components that are published on the default component
-  repository @{url "http://isabelle.in.tum.de/components/"} in
-  particular.
+  Components are initialized as described in \secref{sec:components} in a
+  permissive manner, which can mark components as ``missing''. This state is
+  amended by letting @{tool "components"} download and unpack components that
+  are published on the default component repository @{url
+  "http://isabelle.in.tum.de/components/"} in particular.
 
-  Option \<^verbatim>\<open>-R\<close> specifies an alternative component
-  repository.  Note that \<^verbatim>\<open>file:///\<close> URLs can be used for
-  local directories.
+  Option \<^verbatim>\<open>-R\<close> specifies an alternative component repository. Note that
+  \<^verbatim>\<open>file:///\<close> URLs can be used for local directories.
 
-  Option \<^verbatim>\<open>-a\<close> selects all missing components to be
-  resolved.  Explicit components may be named as command
-  line-arguments as well.  Note that components are uniquely
-  identified by their base name, while the installation takes place in
-  the location that was specified in the attempt to initialize the
-  component before.
+  Option \<^verbatim>\<open>-a\<close> selects all missing components to be resolved. Explicit
+  components may be named as command line-arguments as well. Note that
+  components are uniquely identified by their base name, while the
+  installation takes place in the location that was specified in the attempt
+  to initialize the component before.
 
-  Option \<^verbatim>\<open>-l\<close> lists the current state of available and
-  missing components with their location (full name) within the
-  file-system.
+  Option \<^verbatim>\<open>-l\<close> lists the current state of available and missing components
+  with their location (full name) within the file-system.
 
-  Option \<^verbatim>\<open>-I\<close> initializes the user settings file to
-  subscribe to the standard components specified in the Isabelle
-  repository clone --- this does not make any sense for regular
-  Isabelle releases.  If the file already exists, it needs to be
-  edited manually according to the printed explanation.
+  Option \<^verbatim>\<open>-I\<close> initializes the user settings file to subscribe to the standard
+  components specified in the Isabelle repository clone --- this does not make
+  any sense for regular Isabelle releases. If the file already exists, it
+  needs to be edited manually according to the printed explanation.
 \<close>
 
 
@@ -236,14 +223,14 @@
   Run Isabelle process with raw ML console and line editor
   (default ISABELLE_LINE_EDITOR).\<close>}
 
-  The \<^verbatim>\<open>-l\<close> option specifies the logic session name. By default,
-  its heap image is checked and built on demand, but the option \<^verbatim>\<open>-n\<close> skips that.
+  The \<^verbatim>\<open>-l\<close> option specifies the logic session name. By default, its heap
+  image is checked and built on demand, but the option \<^verbatim>\<open>-n\<close> skips that.
 
-  Options \<^verbatim>\<open>-d\<close>, \<^verbatim>\<open>-o\<close>, \<^verbatim>\<open>-s\<close> are passed
-  directly to @{tool build} (\secref{sec:tool-build}).
+  Options \<^verbatim>\<open>-d\<close>, \<^verbatim>\<open>-o\<close>, \<^verbatim>\<open>-s\<close> are passed directly to @{tool build}
+  (\secref{sec:tool-build}).
 
-  Options \<^verbatim>\<open>-m\<close>, \<^verbatim>\<open>-o\<close> are passed directly to the
-  underlying Isabelle process (\secref{sec:isabelle-process}).
+  Options \<^verbatim>\<open>-m\<close>, \<^verbatim>\<open>-o\<close> are passed directly to the underlying Isabelle process
+  (\secref{sec:isabelle-process}).
 
   The Isabelle process is run through the line editor that is specified via
   the settings variable @{setting ISABELLE_LINE_EDITOR} (e.g.\
@@ -259,19 +246,18 @@
 
 section \<open>Displaying documents \label{sec:tool-display}\<close>
 
-text \<open>The @{tool_def display} tool displays documents in DVI or PDF
-  format:
+text \<open>
+  The @{tool_def display} tool displays documents in DVI or PDF format:
   @{verbatim [display]
 \<open>Usage: isabelle display DOCUMENT
 
   Display DOCUMENT (in DVI or PDF format).\<close>}
 
   \<^medskip>
-  The settings @{setting DVI_VIEWER} and @{setting
-  PDF_VIEWER} determine the programs for viewing the corresponding
-  file formats.  Normally this opens the document via the desktop
-  environment, potentially in an asynchronous manner with re-use of
-  previews views.
+  The settings @{setting DVI_VIEWER} and @{setting PDF_VIEWER} determine the
+  programs for viewing the corresponding file formats. Normally this opens the
+  document via the desktop environment, potentially in an asynchronous manner
+  with re-use of previews views.
 \<close>
 
 
@@ -284,27 +270,27 @@
 
   View Isabelle documentation.\<close>}
 
-  If called without arguments, it lists all available documents. Each
-  line starts with an identifier, followed by a short description. Any
-  of these identifiers may be specified as arguments, in order to
-  display the corresponding document (see also
-  \secref{sec:tool-display}).
+  If called without arguments, it lists all available documents. Each line
+  starts with an identifier, followed by a short description. Any of these
+  identifiers may be specified as arguments, in order to display the
+  corresponding document (see also \secref{sec:tool-display}).
 
   \<^medskip>
-  The @{setting ISABELLE_DOCS} setting specifies the list of
-  directories (separated by colons) to be scanned for documentations.
+  The @{setting ISABELLE_DOCS} setting specifies the list of directories
+  (separated by colons) to be scanned for documentations.
 \<close>
 
 
 section \<open>Shell commands within the settings environment \label{sec:tool-env}\<close>
 
-text \<open>The @{tool_def env} tool is a direct wrapper for the standard
-  \<^verbatim>\<open>/usr/bin/env\<close> command on POSIX systems, running within
-  the Isabelle settings environment (\secref{sec:settings}).
+text \<open>
+  The @{tool_def env} tool is a direct wrapper for the standard
+  \<^verbatim>\<open>/usr/bin/env\<close> command on POSIX systems, running within the Isabelle
+  settings environment (\secref{sec:settings}).
 
-  The command-line arguments are that of the underlying version of
-  \<^verbatim>\<open>env\<close>.  For example, the following invokes an instance of
-  the GNU Bash shell within the Isabelle environment:
+  The command-line arguments are that of the underlying version of \<^verbatim>\<open>env\<close>. For
+  example, the following invokes an instance of the GNU Bash shell within the
+  Isabelle environment:
   @{verbatim [display] \<open>isabelle env bash\<close>}
 \<close>
 
@@ -325,38 +311,39 @@
 
   Get value of VARNAMES from the Isabelle settings.\<close>}
 
-  With the \<^verbatim>\<open>-a\<close> option, one may inspect the full process
-  environment that Isabelle related programs are run in. This usually
-  contains much more variables than are actually Isabelle settings.
-  Normally, output is a list of lines of the form \<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close>. The \<^verbatim>\<open>-b\<close> option
-  causes only the values to be printed.
+  With the \<^verbatim>\<open>-a\<close> option, one may inspect the full process environment that
+  Isabelle related programs are run in. This usually contains much more
+  variables than are actually Isabelle settings. Normally, output is a list of
+  lines of the form \<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close>. The \<^verbatim>\<open>-b\<close> option causes only the values
+  to be printed.
 
-  Option \<^verbatim>\<open>-d\<close> produces a dump of the complete environment
-  to the specified file.  Entries are terminated by the ASCII null
-  character, i.e.\ the C string terminator.
+  Option \<^verbatim>\<open>-d\<close> produces a dump of the complete environment to the specified
+  file. Entries are terminated by the ASCII null character, i.e.\ the C string
+  terminator.
 \<close>
 
 
 subsubsection \<open>Examples\<close>
 
-text \<open>Get the location of @{setting ISABELLE_HOME_USER} where
-  user-specific information is stored:
+text \<open>
+  Get the location of @{setting ISABELLE_HOME_USER} where user-specific
+  information is stored:
   @{verbatim [display] \<open>isabelle getenv ISABELLE_HOME_USER\<close>}
 
   \<^medskip>
-  Get the value only of the same settings variable, which is
-  particularly useful in shell scripts:
+  Get the value only of the same settings variable, which is particularly
+  useful in shell scripts:
   @{verbatim [display] \<open>isabelle getenv -b ISABELLE_OUTPUT\<close>}
 \<close>
 
 
 section \<open>Installing standalone Isabelle executables \label{sec:tool-install}\<close>
 
-text \<open>By default, the main Isabelle binaries (@{executable
-  "isabelle"} etc.)  are just run from their location within the
-  distribution directory, probably indirectly by the shell through its
-  @{setting PATH}.  Other schemes of installation are supported by the
-  @{tool_def install} tool:
+text \<open>
+  By default, the main Isabelle binaries (@{executable "isabelle"} etc.) are
+  just run from their location within the distribution directory, probably
+  indirectly by the shell through its @{setting PATH}. Other schemes of
+  installation are supported by the @{tool_def install} tool:
   @{verbatim [display]
 \<open>Usage: isabelle install [OPTIONS] BINDIR
 
@@ -367,24 +354,26 @@
   Install Isabelle executables with absolute references to the
   distribution directory.\<close>}
 
-  The \<^verbatim>\<open>-d\<close> option overrides the current Isabelle
-  distribution directory as determined by @{setting ISABELLE_HOME}.
+  The \<^verbatim>\<open>-d\<close> option overrides the current Isabelle distribution directory as
+  determined by @{setting ISABELLE_HOME}.
 
-  The \<open>BINDIR\<close> argument tells where executable wrapper scripts
-  for @{executable "isabelle_process"} and @{executable isabelle}
-  should be placed, which is typically a directory in the shell's
-  @{setting PATH}, such as \<^verbatim>\<open>$HOME/bin\<close>.
+  The \<open>BINDIR\<close> argument tells where executable wrapper scripts for
+  @{executable "isabelle_process"} and @{executable isabelle} should be
+  placed, which is typically a directory in the shell's @{setting PATH}, such
+  as \<^verbatim>\<open>$HOME/bin\<close>.
 
   \<^medskip>
-  It is also possible to make symbolic links of the main
-  Isabelle executables manually, but making separate copies outside
-  the Isabelle distribution directory will not work!\<close>
+  It is also possible to make symbolic links of the main Isabelle executables
+  manually, but making separate copies outside the Isabelle distribution
+  directory will not work!
+\<close>
 
 
 section \<open>Creating instances of the Isabelle logo\<close>
 
-text \<open>The @{tool_def logo} tool creates instances of the generic
-  Isabelle logo as EPS and PDF, for inclusion in {\LaTeX} documents.
+text \<open>
+  The @{tool_def logo} tool creates instances of the generic Isabelle logo as
+  EPS and PDF, for inclusion in {\LaTeX} documents.
   @{verbatim [display]
 \<open>Usage: isabelle logo [OPTIONS] XYZ
 
@@ -394,16 +383,15 @@
     -n NAME      alternative output base name (default "isabelle_xyx")
     -q           quiet mode\<close>}
 
-  Option \<^verbatim>\<open>-n\<close> specifies an alternative (base) name for the
-  generated files.  The default is \<^verbatim>\<open>isabelle_\<close>\<open>xyz\<close>
-  in lower-case.
+  Option \<^verbatim>\<open>-n\<close> specifies an alternative (base) name for the generated files.
+  The default is \<^verbatim>\<open>isabelle_\<close>\<open>xyz\<close> in lower-case.
 
   Option \<^verbatim>\<open>-q\<close> omits printing of the result file name.
 
   \<^medskip>
-  Implementors of Isabelle tools and applications are
-  encouraged to make derived Isabelle logos for their own projects
-  using this template.\<close>
+  Implementors of Isabelle tools and applications are encouraged to make
+  derived Isabelle logos for their own projects using this template.
+\<close>
 
 
 section \<open>Output the version identifier of the Isabelle distribution\<close>
@@ -419,53 +407,49 @@
   Display Isabelle version information.\<close>}
 
   \<^medskip>
-  The default is to output the full version string of the
-  Isabelle distribution, e.g.\ ``\<^verbatim>\<open>Isabelle2012: May 2012\<close>.
+  The default is to output the full version string of the Isabelle
+  distribution, e.g.\ ``\<^verbatim>\<open>Isabelle2012: May 2012\<close>.
 
-  The \<^verbatim>\<open>-i\<close> option produces a short identification derived
-  from the Mercurial id of the @{setting ISABELLE_HOME} directory.
+  The \<^verbatim>\<open>-i\<close> option produces a short identification derived from the Mercurial
+  id of the @{setting ISABELLE_HOME} directory.
 \<close>
 
 
 section \<open>Convert XML to YXML\<close>
 
 text \<open>
-  The @{tool_def yxml} tool converts a standard XML document (stdin)
-  to the much simpler and more efficient YXML format of Isabelle
-  (stdout).  The YXML format is defined as follows.
-
-  \<^enum> The encoding is always UTF-8.
+  The @{tool_def yxml} tool converts a standard XML document (stdin) to the
+  much simpler and more efficient YXML format of Isabelle (stdout). The YXML
+  format is defined as follows.
 
-  \<^enum> Body text is represented verbatim (no escaping, no special
-  treatment of white space, no named entities, no CDATA chunks, no
-  comments).
+    \<^enum> The encoding is always UTF-8.
 
-  \<^enum> Markup elements are represented via ASCII control characters
-  \<open>\<^bold>X = 5\<close> and \<open>\<^bold>Y = 6\<close> as follows:
+    \<^enum> Body text is represented verbatim (no escaping, no special treatment of
+    white space, no named entities, no CDATA chunks, no comments).
 
-  \begin{tabular}{ll}
-    XML & YXML \\\hline
-    \<^verbatim>\<open><\<close>\<open>name attribute\<close>\<^verbatim>\<open>=\<close>\<open>value \<dots>\<close>\<^verbatim>\<open>>\<close> &
-    \<open>\<^bold>X\<^bold>Yname\<^bold>Yattribute\<close>\<^verbatim>\<open>=\<close>\<open>value\<dots>\<^bold>X\<close> \\
-    \<^verbatim>\<open></\<close>\<open>name\<close>\<^verbatim>\<open>>\<close> & \<open>\<^bold>X\<^bold>Y\<^bold>X\<close> \\
-  \end{tabular}
+    \<^enum> Markup elements are represented via ASCII control characters \<open>\<^bold>X = 5\<close>
+    and \<open>\<^bold>Y = 6\<close> as follows:
 
-  There is no special case for empty body text, i.e.\ \<^verbatim>\<open><foo/>\<close>
-  is treated like \<^verbatim>\<open><foo></foo>\<close>.  Also note that
-  \<open>\<^bold>X\<close> and \<open>\<^bold>Y\<close> may never occur in
-  well-formed XML documents.
+    \begin{tabular}{ll}
+      XML & YXML \\\hline
+      \<^verbatim>\<open><\<close>\<open>name attribute\<close>\<^verbatim>\<open>=\<close>\<open>value \<dots>\<close>\<^verbatim>\<open>>\<close> &
+      \<open>\<^bold>X\<^bold>Yname\<^bold>Yattribute\<close>\<^verbatim>\<open>=\<close>\<open>value\<dots>\<^bold>X\<close> \\
+      \<^verbatim>\<open></\<close>\<open>name\<close>\<^verbatim>\<open>>\<close> & \<open>\<^bold>X\<^bold>Y\<^bold>X\<close> \\
+    \end{tabular}
 
+    There is no special case for empty body text, i.e.\ \<^verbatim>\<open><foo/>\<close> is treated
+    like \<^verbatim>\<open><foo></foo>\<close>. Also note that \<open>\<^bold>X\<close> and \<open>\<^bold>Y\<close> may never occur in
+    well-formed XML documents.
 
   Parsing YXML is pretty straight-forward: split the text into chunks
-  separated by \<open>\<^bold>X\<close>, then split each chunk into
-  sub-chunks separated by \<open>\<^bold>Y\<close>.  Markup chunks start
-  with an empty sub-chunk, and a second empty sub-chunk indicates
-  close of an element.  Any other non-empty chunk consists of plain
-  text.  For example, see @{file "~~/src/Pure/PIDE/yxml.ML"} or
-  @{file "~~/src/Pure/PIDE/yxml.scala"}.
+  separated by \<open>\<^bold>X\<close>, then split each chunk into sub-chunks separated by \<open>\<^bold>Y\<close>.
+  Markup chunks start with an empty sub-chunk, and a second empty sub-chunk
+  indicates close of an element. Any other non-empty chunk consists of plain
+  text. For example, see @{file "~~/src/Pure/PIDE/yxml.ML"} or @{file
+  "~~/src/Pure/PIDE/yxml.scala"}.
 
-  YXML documents may be detected quickly by checking that the first
-  two characters are \<open>\<^bold>X\<^bold>Y\<close>.
+  YXML documents may be detected quickly by checking that the first two
+  characters are \<open>\<^bold>X\<^bold>Y\<close>.
 \<close>
 
 end
\ No newline at end of file
--- a/src/Doc/System/Presentation.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/System/Presentation.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,24 +1,27 @@
+(*:wrap=hard:maxLineLen=78:*)
+
 theory Presentation
 imports Base
 begin
 
 chapter \<open>Presenting theories \label{ch:present}\<close>
 
-text \<open>Isabelle provides several ways to present the outcome of
-  formal developments, including WWW-based browsable libraries or
-  actual printable documents.  Presentation is centered around the
-  concept of \<^emph>\<open>sessions\<close> (\chref{ch:session}).  The global session
-  structure is that of a tree, with Isabelle Pure at its root, further
-  object-logics derived (e.g.\ HOLCF from HOL, and HOL from Pure), and
-  application sessions further on in the hierarchy.
+text \<open>
+  Isabelle provides several ways to present the outcome of formal
+  developments, including WWW-based browsable libraries or actual printable
+  documents. Presentation is centered around the concept of \<^emph>\<open>sessions\<close>
+  (\chref{ch:session}). The global session structure is that of a tree, with
+  Isabelle Pure at its root, further object-logics derived (e.g.\ HOLCF from
+  HOL, and HOL from Pure), and application sessions further on in the
+  hierarchy.
 
-  The tools @{tool_ref mkroot} and @{tool_ref build} provide the
-  primary means for managing Isabelle sessions, including proper setup
-  for presentation; @{tool build} takes care to have @{executable_ref
-  "isabelle_process"} run any additional stages required for document
-  preparation, notably the @{tool_ref document} and @{tool_ref latex}.
-  The complete tool chain for managing batch-mode Isabelle sessions is
-  illustrated in \figref{fig:session-tools}.
+  The tools @{tool_ref mkroot} and @{tool_ref build} provide the primary means
+  for managing Isabelle sessions, including proper setup for presentation;
+  @{tool build} takes care to have @{executable_ref "isabelle_process"} run
+  any additional stages required for document preparation, notably the
+  @{tool_ref document} and @{tool_ref latex}. The complete tool chain for
+  managing batch-mode Isabelle sessions is illustrated in
+  \figref{fig:session-tools}.
 
   \begin{figure}[htbp]
   \begin{center}
@@ -53,58 +56,55 @@
 text \<open>
   \index{theory browsing information|bold}
 
-  As a side-effect of building sessions, Isabelle is able to generate
-  theory browsing information, including HTML documents that show the
-  theory sources and the relationship with its ancestors and
-  descendants.  Besides the HTML file that is generated for every
-  theory, Isabelle stores links to all theories of a session in an
-  index file.  As a second hierarchy, groups of sessions are organized
-  as \<^emph>\<open>chapters\<close>, with a separate index.  Note that the implicit
-  tree structure of the session build hierarchy is \<^emph>\<open>not\<close> relevant
+  As a side-effect of building sessions, Isabelle is able to generate theory
+  browsing information, including HTML documents that show the theory sources
+  and the relationship with its ancestors and descendants. Besides the HTML
+  file that is generated for every theory, Isabelle stores links to all
+  theories of a session in an index file. As a second hierarchy, groups of
+  sessions are organized as \<^emph>\<open>chapters\<close>, with a separate index. Note that the
+  implicit tree structure of the session build hierarchy is \<^emph>\<open>not\<close> relevant
   for the presentation.
 
-  Isabelle also generates graph files that represent the theory
-  dependencies within a session.  There is a graph browser Java applet
-  embedded in the generated HTML pages, and also a stand-alone
-  application that allows browsing theory graphs without having to
-  start a WWW client first.  The latter version also includes features
-  such as generating Postscript files, which are not available in the
-  applet version.  See \secref{sec:browse} for further information.
+  Isabelle also generates graph files that represent the theory dependencies
+  within a session. There is a graph browser Java applet embedded in the
+  generated HTML pages, and also a stand-alone application that allows
+  browsing theory graphs without having to start a WWW client first. The
+  latter version also includes features such as generating Postscript files,
+  which are not available in the applet version. See \secref{sec:browse} for
+  further information.
 
   \<^medskip>
-  The easiest way to let Isabelle generate theory browsing information
-  for existing sessions is to invoke @{tool build} with suitable
-  options:
+  The easiest way to let Isabelle generate theory browsing information for
+  existing sessions is to invoke @{tool build} with suitable options:
   @{verbatim [display] \<open>isabelle build -o browser_info -v -c FOL\<close>}
 
-  The presentation output will appear in \<^verbatim>\<open>$ISABELLE_BROWSER_INFO/FOL/FOL\<close>
-  as reported by the above verbose invocation of the build process.
+  The presentation output will appear in \<^verbatim>\<open>$ISABELLE_BROWSER_INFO/FOL/FOL\<close> as
+  reported by the above verbose invocation of the build process.
 
   Many Isabelle sessions (such as \<^verbatim>\<open>HOL-Library\<close> in @{file
-  "~~/src/HOL/Library"}) also provide actual printable documents.
-  These are prepared automatically as well if enabled like this:
+  "~~/src/HOL/Library"}) also provide actual printable documents. These are
+  prepared automatically as well if enabled like this:
   @{verbatim [display] \<open>isabelle build -o browser_info -o document=pdf -v -c HOL-Library\<close>}
 
-  Enabling both browser info and document preparation simultaneously
-  causes an appropriate ``document'' link to be included in the HTML
-  index.  Documents may be generated independently of browser
-  information as well, see \secref{sec:tool-document} for further
-  details.
+  Enabling both browser info and document preparation simultaneously causes an
+  appropriate ``document'' link to be included in the HTML index. Documents
+  may be generated independently of browser information as well, see
+  \secref{sec:tool-document} for further details.
 
   \<^bigskip>
-  The theory browsing information is stored in a
-  sub-directory directory determined by the @{setting_ref
-  ISABELLE_BROWSER_INFO} setting plus a prefix corresponding to the
-  session chapter and identifier.  In order to present Isabelle
-  applications on the web, the corresponding subdirectory from
-  @{setting ISABELLE_BROWSER_INFO} can be put on a WWW server.\<close>
+  The theory browsing information is stored in a sub-directory directory
+  determined by the @{setting_ref ISABELLE_BROWSER_INFO} setting plus a prefix
+  corresponding to the session chapter and identifier. In order to present
+  Isabelle applications on the web, the corresponding subdirectory from
+  @{setting ISABELLE_BROWSER_INFO} can be put on a WWW server.
+\<close>
 
 
 section \<open>Preparing session root directories \label{sec:tool-mkroot}\<close>
 
-text \<open>The @{tool_def mkroot} tool configures a given directory as
-  session root, with some \<^verbatim>\<open>ROOT\<close> file and optional document
-  source directory.  Its usage is:
+text \<open>
+  The @{tool_def mkroot} tool configures a given directory as session root,
+  with some \<^verbatim>\<open>ROOT\<close> file and optional document source directory. Its usage is:
   @{verbatim [display]
 \<open>Usage: isabelle mkroot [OPTIONS] [DIR]
 
@@ -114,46 +114,45 @@
 
   Prepare session root DIR (default: current directory).\<close>}
 
-  The results are placed in the given directory \<open>dir\<close>, which
-  refers to the current directory by default.  The @{tool mkroot} tool
-  is conservative in the sense that it does not overwrite existing
-  files or directories.  Earlier attempts to generate a session root
-  need to be deleted manually.
+  The results are placed in the given directory \<open>dir\<close>, which refers to the
+  current directory by default. The @{tool mkroot} tool is conservative in the
+  sense that it does not overwrite existing files or directories. Earlier
+  attempts to generate a session root need to be deleted manually.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-d\<close> indicates that the session shall be
-  accompanied by a formal document, with \<open>DIR\<close>\<^verbatim>\<open>/document/root.tex\<close>
-  as its {\LaTeX} entry point (see also \chref{ch:present}).
+  Option \<^verbatim>\<open>-d\<close> indicates that the session shall be accompanied by a formal
+  document, with \<open>DIR\<close>\<^verbatim>\<open>/document/root.tex\<close> as its {\LaTeX} entry point (see
+  also \chref{ch:present}).
 
-  Option \<^verbatim>\<open>-n\<close> allows to specify an alternative session
-  name; otherwise the base name of the given directory is used.
+  Option \<^verbatim>\<open>-n\<close> allows to specify an alternative session name; otherwise the
+  base name of the given directory is used.
 
   \<^medskip>
-  The implicit Isabelle settings variable @{setting
-  ISABELLE_LOGIC} specifies the parent session, and @{setting
-  ISABELLE_DOCUMENT_FORMAT} the document format to be filled filled
-  into the generated \<^verbatim>\<open>ROOT\<close> file.
+  The implicit Isabelle settings variable @{setting ISABELLE_LOGIC} specifies
+  the parent session, and @{setting ISABELLE_DOCUMENT_FORMAT} the document
+  format to be filled filled into the generated \<^verbatim>\<open>ROOT\<close> file.
 \<close>
 
 
 subsubsection \<open>Examples\<close>
 
-text \<open>Produce session \<^verbatim>\<open>Test\<close> (with document preparation)
-  within a separate directory of the same name:
+text \<open>
+  Produce session \<^verbatim>\<open>Test\<close> (with document preparation) within a separate
+  directory of the same name:
   @{verbatim [display] \<open>isabelle mkroot -d Test && isabelle build -D Test\<close>}
 
   \<^medskip>
-  Upgrade the current directory into a session ROOT with
-  document preparation, and build it:
+  Upgrade the current directory into a session ROOT with document preparation,
+  and build it:
   @{verbatim [display] \<open>isabelle mkroot -d && isabelle build -D .\<close>}
 \<close>
 
 
 section \<open>Preparing Isabelle session documents \label{sec:tool-document}\<close>
 
-text \<open>The @{tool_def document} tool prepares logic session
-  documents, processing the sources as provided by the user and
-  generated by Isabelle.  Its usage is:
+text \<open>
+  The @{tool_def document} tool prepares logic session documents, processing
+  the sources as provided by the user and generated by Isabelle. Its usage is:
   @{verbatim [display]
 \<open>Usage: isabelle document [OPTIONS] [DIR]
 
@@ -168,90 +167,83 @@
 
   This tool is usually run automatically as part of the Isabelle build
   process, provided document preparation has been enabled via suitable
-  options.  It may be manually invoked on the generated browser
-  information document output as well, e.g.\ in case of errors
-  encountered in the batch run.
+  options. It may be manually invoked on the generated browser information
+  document output as well, e.g.\ in case of errors encountered in the batch
+  run.
 
   \<^medskip>
-  The \<^verbatim>\<open>-c\<close> option tells @{tool document} to
-  dispose the document sources after successful operation!  This is
-  the right thing to do for sources generated by an Isabelle process,
-  but take care of your files in manual document preparation!
+  The \<^verbatim>\<open>-c\<close> option tells @{tool document} to dispose the document sources
+  after successful operation! This is the right thing to do for sources
+  generated by an Isabelle process, but take care of your files in manual
+  document preparation!
 
   \<^medskip>
-  The \<^verbatim>\<open>-n\<close> and \<^verbatim>\<open>-o\<close> option specify
-  the final output file name and format, the default is ``\<^verbatim>\<open>document.dvi\<close>''.
-  Note that the result will appear in the parent of the target \<^verbatim>\<open>DIR\<close>.
+  The \<^verbatim>\<open>-n\<close> and \<^verbatim>\<open>-o\<close> option specify the final output file name and format,
+  the default is ``\<^verbatim>\<open>document.dvi\<close>''. Note that the result will appear in the
+  parent of the target \<^verbatim>\<open>DIR\<close>.
 
   \<^medskip>
-  The \<^verbatim>\<open>-t\<close> option tells {\LaTeX} how to interpret
-  tagged Isabelle command regions.  Tags are specified as a comma
-  separated list of modifier/name pairs: ``\<^verbatim>\<open>+\<close>\<open>foo\<close>'' (or just ``\<open>foo\<close>'')
-  means to keep, ``\<^verbatim>\<open>-\<close>\<open>foo\<close>'' to drop, and ``\<^verbatim>\<open>/\<close>\<open>foo\<close>'' to
-  fold text tagged as \<open>foo\<close>.  The builtin default is equivalent
-  to the tag specification ``\<^verbatim>\<open>+theory,+proof,+ML,+visible,-invisible\<close>'';
-  see also the {\LaTeX} macros \<^verbatim>\<open>\isakeeptag\<close>, \<^verbatim>\<open>\isadroptag\<close>, and
-  \<^verbatim>\<open>\isafoldtag\<close>, in @{file "~~/lib/texinputs/isabelle.sty"}.
+  The \<^verbatim>\<open>-t\<close> option tells {\LaTeX} how to interpret tagged Isabelle command
+  regions. Tags are specified as a comma separated list of modifier/name
+  pairs: ``\<^verbatim>\<open>+\<close>\<open>foo\<close>'' (or just ``\<open>foo\<close>'') means to keep, ``\<^verbatim>\<open>-\<close>\<open>foo\<close>'' to
+  drop, and ``\<^verbatim>\<open>/\<close>\<open>foo\<close>'' to fold text tagged as \<open>foo\<close>. The builtin default is
+  equivalent to the tag specification
+  ``\<^verbatim>\<open>+theory,+proof,+ML,+visible,-invisible\<close>''; see also the {\LaTeX} macros
+  \<^verbatim>\<open>\isakeeptag\<close>, \<^verbatim>\<open>\isadroptag\<close>, and \<^verbatim>\<open>\isafoldtag\<close>, in @{file
+  "~~/lib/texinputs/isabelle.sty"}.
 
   \<^medskip>
-  Document preparation requires a \<^verbatim>\<open>document\<close>
-  directory within the session sources.  This directory is supposed to
-  contain all the files needed to produce the final document --- apart
-  from the actual theories which are generated by Isabelle.
+  Document preparation requires a \<^verbatim>\<open>document\<close> directory within the session
+  sources. This directory is supposed to contain all the files needed to
+  produce the final document --- apart from the actual theories which are
+  generated by Isabelle.
 
   \<^medskip>
-  For most practical purposes, @{tool document} is smart
-  enough to create any of the specified output formats, taking
-  \<^verbatim>\<open>root.tex\<close> supplied by the user as a starting point.  This
-  even includes multiple runs of {\LaTeX} to accommodate references
-  and bibliographies (the latter assumes \<^verbatim>\<open>root.bib\<close> within
-  the same directory).
+  For most practical purposes, @{tool document} is smart enough to create any
+  of the specified output formats, taking \<^verbatim>\<open>root.tex\<close> supplied by the user as
+  a starting point. This even includes multiple runs of {\LaTeX} to
+  accommodate references and bibliographies (the latter assumes \<^verbatim>\<open>root.bib\<close>
+  within the same directory).
 
-  In more complex situations, a separate \<^verbatim>\<open>build\<close> script for
-  the document sources may be given.  It is invoked with command-line
-  arguments for the document format and the document variant name.
-  The script needs to produce corresponding output files, e.g.\
-  \<^verbatim>\<open>root.pdf\<close> for target format \<^verbatim>\<open>pdf\<close> (and default
-  variants).  The main work can be again delegated to @{tool latex},
-  but it is also possible to harvest generated {\LaTeX} sources and
-  copy them elsewhere.
+  In more complex situations, a separate \<^verbatim>\<open>build\<close> script for the document
+  sources may be given. It is invoked with command-line arguments for the
+  document format and the document variant name. The script needs to produce
+  corresponding output files, e.g.\ \<^verbatim>\<open>root.pdf\<close> for target format \<^verbatim>\<open>pdf\<close> (and
+  default variants). The main work can be again delegated to @{tool latex},
+  but it is also possible to harvest generated {\LaTeX} sources and copy them
+  elsewhere.
 
   \<^medskip>
-  When running the session, Isabelle copies the content of
-  the original \<^verbatim>\<open>document\<close> directory into its proper place
-  within @{setting ISABELLE_BROWSER_INFO}, according to the session
-  path and document variant.  Then, for any processed theory \<open>A\<close>
-  some {\LaTeX} source is generated and put there as \<open>A\<close>\<^verbatim>\<open>.tex\<close>.
-  Furthermore, a list of all generated theory
-  files is put into \<^verbatim>\<open>session.tex\<close>.  Typically, the root
-  {\LaTeX} file provided by the user would include \<^verbatim>\<open>session.tex\<close>
-  to get a document containing all the theories.
+  When running the session, Isabelle copies the content of the original
+  \<^verbatim>\<open>document\<close> directory into its proper place within @{setting
+  ISABELLE_BROWSER_INFO}, according to the session path and document variant.
+  Then, for any processed theory \<open>A\<close> some {\LaTeX} source is generated and put
+  there as \<open>A\<close>\<^verbatim>\<open>.tex\<close>. Furthermore, a list of all generated theory files is
+  put into \<^verbatim>\<open>session.tex\<close>. Typically, the root {\LaTeX} file provided by the
+  user would include \<^verbatim>\<open>session.tex\<close> to get a document containing all the
+  theories.
 
-  The {\LaTeX} versions of the theories require some macros defined in
-  @{file "~~/lib/texinputs/isabelle.sty"}.  Doing \<^verbatim>\<open>\usepackage{isabelle}\<close>
-  in \<^verbatim>\<open>root.tex\<close> should be fine; the underlying @{tool latex} already
-  includes an appropriate path specification for {\TeX} inputs.
+  The {\LaTeX} versions of the theories require some macros defined in @{file
+  "~~/lib/texinputs/isabelle.sty"}. Doing \<^verbatim>\<open>\usepackage{isabelle}\<close> in
+  \<^verbatim>\<open>root.tex\<close> should be fine; the underlying @{tool latex} already includes an
+  appropriate path specification for {\TeX} inputs.
 
-  If the text contains any references to Isabelle symbols (such as
-  \<^verbatim>\<open>\<forall>\<close>) then \<^verbatim>\<open>isabellesym.sty\<close> should be included as well.
-  This package contains a standard set of {\LaTeX} macro definitions
-  \<^verbatim>\<open>\isasym\<close>\<open>foo\<close> corresponding to \<^verbatim>\<open>\\<close>\<^verbatim>\<open><\<close>\<open>foo\<close>\<^verbatim>\<open>>\<close>,
-  see @{cite "isabelle-implementation"} for a
-  complete list of predefined Isabelle symbols.  Users may invent
-  further symbols as well, just by providing {\LaTeX} macros in a
-  similar fashion as in @{file "~~/lib/texinputs/isabellesym.sty"} of
-  the Isabelle distribution.
+  If the text contains any references to Isabelle symbols (such as \<^verbatim>\<open>\<forall>\<close>) then
+  \<^verbatim>\<open>isabellesym.sty\<close> should be included as well. This package contains a
+  standard set of {\LaTeX} macro definitions \<^verbatim>\<open>\isasym\<close>\<open>foo\<close> corresponding to
+  \<^verbatim>\<open>\\<close>\<^verbatim>\<open><\<close>\<open>foo\<close>\<^verbatim>\<open>>\<close>, see @{cite "isabelle-implementation"} for a complete list
+  of predefined Isabelle symbols. Users may invent further symbols as well,
+  just by providing {\LaTeX} macros in a similar fashion as in @{file
+  "~~/lib/texinputs/isabellesym.sty"} of the Isabelle distribution.
 
-  For proper setup of DVI and PDF documents (with hyperlinks and
-  bookmarks), we recommend to include @{file
-  "~~/lib/texinputs/pdfsetup.sty"} as well.
+  For proper setup of DVI and PDF documents (with hyperlinks and bookmarks),
+  we recommend to include @{file "~~/lib/texinputs/pdfsetup.sty"} as well.
 
   \<^medskip>
-  As a final step of Isabelle document preparation, @{tool
-  document}~\<^verbatim>\<open>-c\<close> is run on the resulting copy of the
-  \<^verbatim>\<open>document\<close> directory.  Thus the actual output document is
-  built and installed in its proper place.  The generated sources are
-  deleted after successful run of {\LaTeX} and friends.
+  As a final step of Isabelle document preparation, @{tool document}~\<^verbatim>\<open>-c\<close> is
+  run on the resulting copy of the \<^verbatim>\<open>document\<close> directory. Thus the actual
+  output document is built and installed in its proper place. The generated
+  sources are deleted after successful run of {\LaTeX} and friends.
 
   Some care is needed if the document output location is configured
   differently, say within a directory whose content is still required
@@ -262,8 +254,9 @@
 section \<open>Running {\LaTeX} within the Isabelle environment
   \label{sec:tool-latex}\<close>
 
-text \<open>The @{tool_def latex} tool provides the basic interface for
-  Isabelle document preparation.  Its usage is:
+text \<open>
+  The @{tool_def latex} tool provides the basic interface for Isabelle
+  document preparation. Its usage is:
   @{verbatim [display]
 \<open>Usage: isabelle latex [OPTIONS] [FILE]
 
@@ -274,32 +267,30 @@
   Run LaTeX (and related tools) on FILE (default root.tex),
   producing the specified output format.\<close>}
 
-  Appropriate {\LaTeX}-related programs are run on the input file,
-  according to the given output format: @{executable latex},
-  @{executable pdflatex}, @{executable dvips}, @{executable bibtex}
-  (for \<^verbatim>\<open>bbl\<close>), and @{executable makeindex} (for \<^verbatim>\<open>idx\<close>).
-  The actual commands are determined from the settings
-  environment (@{setting ISABELLE_PDFLATEX} etc.).
+  Appropriate {\LaTeX}-related programs are run on the input file, according
+  to the given output format: @{executable latex}, @{executable pdflatex},
+  @{executable dvips}, @{executable bibtex} (for \<^verbatim>\<open>bbl\<close>), and @{executable
+  makeindex} (for \<^verbatim>\<open>idx\<close>). The actual commands are determined from the
+  settings environment (@{setting ISABELLE_PDFLATEX} etc.).
 
-  The \<^verbatim>\<open>sty\<close> output format causes the Isabelle style files to
-  be updated from the distribution.  This is useful in special
-  situations where the document sources are to be processed another
-  time by separate tools.
+  The \<^verbatim>\<open>sty\<close> output format causes the Isabelle style files to be updated from
+  the distribution. This is useful in special situations where the document
+  sources are to be processed another time by separate tools.
 
-  The \<^verbatim>\<open>syms\<close> output is for internal use; it generates lists
-  of symbols that are available without loading additional {\LaTeX}
-  packages.
+  The \<^verbatim>\<open>syms\<close> output is for internal use; it generates lists of symbols that
+  are available without loading additional {\LaTeX} packages.
 \<close>
 
 
 subsubsection \<open>Examples\<close>
 
-text \<open>Invoking @{tool latex} by hand may be occasionally useful when
-  debugging failed attempts of the automatic document preparation
-  stage of batch-mode Isabelle.  The abortive process leaves the
-  sources at a certain place within @{setting ISABELLE_BROWSER_INFO},
-  see the runtime error message for details.  This enables users to
-  inspect {\LaTeX} runs in further detail, e.g.\ like this:
+text \<open>
+  Invoking @{tool latex} by hand may be occasionally useful when debugging
+  failed attempts of the automatic document preparation stage of batch-mode
+  Isabelle. The abortive process leaves the sources at a certain place within
+  @{setting ISABELLE_BROWSER_INFO}, see the runtime error message for details.
+  This enables users to inspect {\LaTeX} runs in further detail, e.g.\ like
+  this:
 
   @{verbatim [display]
 \<open>cd "$(isabelle getenv -b ISABELLE_BROWSER_INFO)/Unsorted/Test/document"
--- a/src/Doc/System/Scala.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/System/Scala.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,43 +1,47 @@
+(*:wrap=hard:maxLineLen=78:*)
+
 theory Scala
 imports Base
 begin
 
 chapter \<open>Isabelle/Scala development tools\<close>
 
-text \<open>Isabelle/ML and Isabelle/Scala are the two main language
-environments for Isabelle tool implementations.  There are some basic
-command-line tools to work with the underlying Java Virtual Machine,
-the Scala toplevel and compiler.  Note that Isabelle/jEdit
-@{cite "isabelle-jedit"} provides a Scala Console for interactive
-experimentation within the running application.\<close>
+text \<open>
+  Isabelle/ML and Isabelle/Scala are the two main language environments for
+  Isabelle tool implementations. There are some basic command-line tools to
+  work with the underlying Java Virtual Machine, the Scala toplevel and
+  compiler. Note that Isabelle/jEdit @{cite "isabelle-jedit"} provides a Scala
+  Console for interactive experimentation within the running application.
+\<close>
 
 
 section \<open>Java Runtime Environment within Isabelle \label{sec:tool-java}\<close>
 
-text \<open>The @{tool_def java} tool is a direct wrapper for the Java
-  Runtime Environment, within the regular Isabelle settings
-  environment (\secref{sec:settings}).  The command line arguments are
-  that of the underlying Java version.  It is run in \<^verbatim>\<open>-server\<close> mode
-  if possible, to improve performance (at the cost of extra startup time).
+text \<open>
+  The @{tool_def java} tool is a direct wrapper for the Java Runtime
+  Environment, within the regular Isabelle settings environment
+  (\secref{sec:settings}). The command line arguments are that of the
+  underlying Java version. It is run in \<^verbatim>\<open>-server\<close> mode if possible, to
+  improve performance (at the cost of extra startup time).
 
-  The \<^verbatim>\<open>java\<close> executable is the one within @{setting
-  ISABELLE_JDK_HOME}, according to the standard directory layout for
-  official JDK distributions.  The class loader is augmented such that
-  the name space of \<^verbatim>\<open>Isabelle/Pure.jar\<close> is available,
-  which is the main Isabelle/Scala module.
+  The \<^verbatim>\<open>java\<close> executable is the one within @{setting ISABELLE_JDK_HOME},
+  according to the standard directory layout for official JDK distributions.
+  The class loader is augmented such that the name space of
+  \<^verbatim>\<open>Isabelle/Pure.jar\<close> is available, which is the main Isabelle/Scala module.
 
-  For example, the following command-line invokes the main method of
-  class \<^verbatim>\<open>isabelle.GUI_Setup\<close>, which opens a windows with
-  some diagnostic information about the Isabelle environment:
+  For example, the following command-line invokes the main method of class
+  \<^verbatim>\<open>isabelle.GUI_Setup\<close>, which opens a windows with some diagnostic
+  information about the Isabelle environment:
   @{verbatim [display] \<open>isabelle java isabelle.GUI_Setup\<close>}
 \<close>
 
 
 section \<open>Scala toplevel \label{sec:tool-scala}\<close>
 
-text \<open>The @{tool_def scala} tool is a direct wrapper for the Scala
-  toplevel; see also @{tool java} above.  The command line arguments
-  are that of the underlying Scala version.
+text \<open>
+  The @{tool_def scala} tool is a direct wrapper for the Scala toplevel; see
+  also @{tool java} above. The command line arguments are that of the
+  underlying Scala version.
 
   This allows to interact with Isabelle/Scala in TTY mode like this:
   @{verbatim [display]
@@ -51,32 +55,33 @@
 
 section \<open>Scala compiler \label{sec:tool-scalac}\<close>
 
-text \<open>The @{tool_def scalac} tool is a direct wrapper for the Scala
-  compiler; see also @{tool scala} above.  The command line arguments
-  are that of the underlying Scala version.
+text \<open>
+  The @{tool_def scalac} tool is a direct wrapper for the Scala compiler; see
+  also @{tool scala} above. The command line arguments are that of the
+  underlying Scala version.
 
   This allows to compile further Scala modules, depending on existing
-  Isabelle/Scala functionality.  The resulting class or jar files can
-  be added to the Java classpath using the \<^verbatim>\<open>classpath\<close> Bash
-  function that is provided by the Isabelle process environment.  Thus
-  add-on components can register themselves in a modular manner, see
-  also \secref{sec:components}.
+  Isabelle/Scala functionality. The resulting class or jar files can be added
+  to the Java classpath using the \<^verbatim>\<open>classpath\<close> Bash function that is provided
+  by the Isabelle process environment. Thus add-on components can register
+  themselves in a modular manner, see also \secref{sec:components}.
 
-  Note that jEdit @{cite "isabelle-jedit"} has its own mechanisms for
-  adding plugin components, which needs special attention since
-  it overrides the standard Java class loader.\<close>
+  Note that jEdit @{cite "isabelle-jedit"} has its own mechanisms for adding
+  plugin components, which needs special attention since it overrides the
+  standard Java class loader.
+\<close>
 
 
 section \<open>Scala script wrapper\<close>
 
-text \<open>The executable @{executable
-  "$ISABELLE_HOME/bin/isabelle_scala_script"} allows to run
-  Isabelle/Scala source files stand-alone programs, by using a
+text \<open>
+  The executable @{executable "$ISABELLE_HOME/bin/isabelle_scala_script"}
+  allows to run Isabelle/Scala source files stand-alone programs, by using a
   suitable ``hash-bang'' line and executable file permissions.
 
-  The subsequent example assumes that the main Isabelle binaries have
-  been installed in some directory that is included in @{setting PATH}
-  (see also @{tool "install"}):
+  The subsequent example assumes that the main Isabelle binaries have been
+  installed in some directory that is included in @{setting PATH} (see also
+  @{tool "install"}):
   @{verbatim [display]
 \<open>#!/usr/bin/env isabelle_scala_script
 
@@ -84,8 +89,8 @@
 Console.println("browser_info = " + options.bool("browser_info"))
 Console.println("document = " + options.string("document"))\<close>}
 
-  Alternatively the full @{file
-  "$ISABELLE_HOME/bin/isabelle_scala_script"} may be specified in
-  expanded form.\<close>
+  Alternatively the full @{file "$ISABELLE_HOME/bin/isabelle_scala_script"}
+  may be specified in expanded form.
+\<close>
 
 end
--- a/src/Doc/System/Sessions.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/System/Sessions.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,52 +1,52 @@
+(*:wrap=hard:maxLineLen=78:*)
+
 theory Sessions
 imports Base
 begin
 
 chapter \<open>Isabelle sessions and build management \label{ch:session}\<close>
 
-text \<open>An Isabelle \<^emph>\<open>session\<close> consists of a collection of related
-  theories that may be associated with formal documents
-  (\chref{ch:present}).  There is also a notion of \<^emph>\<open>persistent
-  heap\<close> image to capture the state of a session, similar to
-  object-code in compiled programming languages.  Thus the concept of
-  session resembles that of a ``project'' in common IDE environments,
-  but the specific name emphasizes the connection to interactive
-  theorem proving: the session wraps-up the results of
-  user-interaction with the prover in a persistent form.
+text \<open>
+  An Isabelle \<^emph>\<open>session\<close> consists of a collection of related theories that may
+  be associated with formal documents (\chref{ch:present}). There is also a
+  notion of \<^emph>\<open>persistent heap\<close> image to capture the state of a session,
+  similar to object-code in compiled programming languages. Thus the concept
+  of session resembles that of a ``project'' in common IDE environments, but
+  the specific name emphasizes the connection to interactive theorem proving:
+  the session wraps-up the results of user-interaction with the prover in a
+  persistent form.
 
-  Application sessions are built on a given parent session, which may
-  be built recursively on other parents.  Following this path in the
-  hierarchy eventually leads to some major object-logic session like
-  \<open>HOL\<close>, which itself is based on \<open>Pure\<close> as the common
-  root of all sessions.
+  Application sessions are built on a given parent session, which may be built
+  recursively on other parents. Following this path in the hierarchy
+  eventually leads to some major object-logic session like \<open>HOL\<close>, which itself
+  is based on \<open>Pure\<close> as the common root of all sessions.
 
-  Processing sessions may take considerable time.  Isabelle build
-  management helps to organize this efficiently.  This includes
-  support for parallel build jobs, in addition to the multithreaded
-  theory and proof checking that is already provided by the prover
-  process itself.\<close>
+  Processing sessions may take considerable time. Isabelle build management
+  helps to organize this efficiently. This includes support for parallel build
+  jobs, in addition to the multithreaded theory and proof checking that is
+  already provided by the prover process itself.
+\<close>
 
 
 section \<open>Session ROOT specifications \label{sec:session-root}\<close>
 
-text \<open>Session specifications reside in files called \<^verbatim>\<open>ROOT\<close>
-  within certain directories, such as the home locations of registered
-  Isabelle components or additional project directories given by the
-  user.
+text \<open>
+  Session specifications reside in files called \<^verbatim>\<open>ROOT\<close> within certain
+  directories, such as the home locations of registered Isabelle components or
+  additional project directories given by the user.
 
-  The ROOT file format follows the lexical conventions of the
-  \<^emph>\<open>outer syntax\<close> of Isabelle/Isar, see also
-  @{cite "isabelle-isar-ref"}.  This defines common forms like
-  identifiers, names, quoted strings, verbatim text, nested comments
-  etc.  The grammar for @{syntax session_chapter} and @{syntax
-  session_entry} is given as syntax diagram below; each ROOT file may
-  contain multiple specifications like this.  Chapters help to
-  organize browser info (\secref{sec:info}), but have no formal
-  meaning.  The default chapter is ``\<open>Unsorted\<close>''.
+  The ROOT file format follows the lexical conventions of the \<^emph>\<open>outer syntax\<close>
+  of Isabelle/Isar, see also @{cite "isabelle-isar-ref"}. This defines common
+  forms like identifiers, names, quoted strings, verbatim text, nested
+  comments etc. The grammar for @{syntax session_chapter} and @{syntax
+  session_entry} is given as syntax diagram below; each ROOT file may contain
+  multiple specifications like this. Chapters help to organize browser info
+  (\secref{sec:info}), but have no formal meaning. The default chapter is
+  ``\<open>Unsorted\<close>''.
 
-  Isabelle/jEdit @{cite "isabelle-jedit"} includes a simple editing
-  mode \<^verbatim>\<open>isabelle-root\<close> for session ROOT files, which is
-  enabled by default for any file of that name.
+  Isabelle/jEdit @{cite "isabelle-jedit"} includes a simple editing mode
+  \<^verbatim>\<open>isabelle-root\<close> for session ROOT files, which is enabled by default for any
+  file of that name.
 
   @{rail \<open>
     @{syntax_def session_chapter}: @'chapter' @{syntax name}
@@ -77,151 +77,143 @@
     document_files: @'document_files' ('(' dir ')')? (@{syntax name}+)
   \<close>}
 
-  \<^descr> \isakeyword{session}~\<open>A = B + body\<close> defines a new
-  session \<open>A\<close> based on parent session \<open>B\<close>, with its
-  content given in \<open>body\<close> (theories and auxiliary source files).
-  Note that a parent (like \<open>HOL\<close>) is mandatory in practical
+  \<^descr> \isakeyword{session}~\<open>A = B + body\<close> defines a new session \<open>A\<close> based on
+  parent session \<open>B\<close>, with its content given in \<open>body\<close> (theories and auxiliary
+  source files). Note that a parent (like \<open>HOL\<close>) is mandatory in practical
   applications: only Isabelle/Pure can bootstrap itself from nothing.
 
-  All such session specifications together describe a hierarchy (tree)
-  of sessions, with globally unique names.  The new session name
-  \<open>A\<close> should be sufficiently long and descriptive to stand on
-  its own in a potentially large library.
+  All such session specifications together describe a hierarchy (tree) of
+  sessions, with globally unique names. The new session name \<open>A\<close> should be
+  sufficiently long and descriptive to stand on its own in a potentially large
+  library.
 
-  \<^descr> \isakeyword{session}~\<open>A (groups)\<close> indicates a
-  collection of groups where the new session is a member.  Group names
-  are uninterpreted and merely follow certain conventions.  For
-  example, the Isabelle distribution tags some important sessions by
-  the group name called ``\<open>main\<close>''.  Other projects may invent
-  their own conventions, but this requires some care to avoid clashes
+  \<^descr> \isakeyword{session}~\<open>A (groups)\<close> indicates a collection of groups where
+  the new session is a member. Group names are uninterpreted and merely follow
+  certain conventions. For example, the Isabelle distribution tags some
+  important sessions by the group name called ``\<open>main\<close>''. Other projects may
+  invent their own conventions, but this requires some care to avoid clashes
   within this unchecked name space.
 
-  \<^descr> \isakeyword{session}~\<open>A\<close>~\isakeyword{in}~\<open>dir\<close>
-  specifies an explicit directory for this session; by default this is
-  the current directory of the \<^verbatim>\<open>ROOT\<close> file.
+  \<^descr> \isakeyword{session}~\<open>A\<close>~\isakeyword{in}~\<open>dir\<close> specifies an explicit
+  directory for this session; by default this is the current directory of the
+  \<^verbatim>\<open>ROOT\<close> file.
 
-  All theories and auxiliary source files are located relatively to
-  the session directory.  The prover process is run within the same as
-  its current working directory.
-
-  \<^descr> \isakeyword{description}~\<open>text\<close> is a free-form
-  annotation for this session.
+  All theories and auxiliary source files are located relatively to the
+  session directory. The prover process is run within the same as its current
+  working directory.
 
-  \<^descr> \isakeyword{options}~\<open>[x = a, y = b, z]\<close> defines
-  separate options (\secref{sec:system-options}) that are used when
-  processing this session, but \<^emph>\<open>without\<close> propagation to child
-  sessions.  Note that \<open>z\<close> abbreviates \<open>z = true\<close> for
-  Boolean options.
+  \<^descr> \isakeyword{description}~\<open>text\<close> is a free-form annotation for this
+  session.
 
-  \<^descr> \isakeyword{theories}~\<open>options names\<close> specifies a
-  block of theories that are processed within an environment that is
-  augmented by the given options, in addition to the global session
-  options given before.  Any number of blocks of \isakeyword{theories}
-  may be given.  Options are only active for each
+  \<^descr> \isakeyword{options}~\<open>[x = a, y = b, z]\<close> defines separate options
+  (\secref{sec:system-options}) that are used when processing this session,
+  but \<^emph>\<open>without\<close> propagation to child sessions. Note that \<open>z\<close> abbreviates \<open>z =
+  true\<close> for Boolean options.
+
+  \<^descr> \isakeyword{theories}~\<open>options names\<close> specifies a block of theories that
+  are processed within an environment that is augmented by the given options,
+  in addition to the global session options given before. Any number of blocks
+  of \isakeyword{theories} may be given. Options are only active for each
   \isakeyword{theories} block separately.
 
-  \<^descr> \isakeyword{files}~\<open>files\<close> lists additional source
-  files that are involved in the processing of this session.  This
-  should cover anything outside the formal content of the theory
-  sources.  In contrast, files that are loaded formally
-  within a theory, e.g.\ via @{command "ML_file"}, need not be
+  \<^descr> \isakeyword{files}~\<open>files\<close> lists additional source files that are involved
+  in the processing of this session. This should cover anything outside the
+  formal content of the theory sources. In contrast, files that are loaded
+  formally within a theory, e.g.\ via @{command "ML_file"}, need not be
   declared again.
 
-  \<^descr> \isakeyword{document_files}~\<open>(\<close>\isakeyword{in}~\<open>base_dir) files\<close> lists source files for document preparation,
-  typically \<^verbatim>\<open>.tex\<close> and \<^verbatim>\<open>.sty\<close> for {\LaTeX}.
-  Only these explicitly given files are copied from the base directory
-  to the document output directory, before formal document processing
-  is started (see also \secref{sec:tool-document}).  The local path
-  structure of the \<open>files\<close> is preserved, which allows to
-  reconstruct the original directory hierarchy of \<open>base_dir\<close>.
+  \<^descr> \isakeyword{document_files}~\<open>(\<close>\isakeyword{in}~\<open>base_dir) files\<close> lists
+  source files for document preparation, typically \<^verbatim>\<open>.tex\<close> and \<^verbatim>\<open>.sty\<close> for
+  {\LaTeX}. Only these explicitly given files are copied from the base
+  directory to the document output directory, before formal document
+  processing is started (see also \secref{sec:tool-document}). The local path
+  structure of the \<open>files\<close> is preserved, which allows to reconstruct the
+  original directory hierarchy of \<open>base_dir\<close>.
 
   \<^descr> \isakeyword{document_files}~\<open>files\<close> abbreviates
-  \isakeyword{document_files}~\<open>(\<close>\isakeyword{in}~\<open>document) files\<close>, i.e.\ document sources are taken from the base
-  directory \<^verbatim>\<open>document\<close> within the session root directory.
+  \isakeyword{document_files}~\<open>(\<close>\isakeyword{in}~\<open>document) files\<close>, i.e.\
+  document sources are taken from the base directory \<^verbatim>\<open>document\<close> within the
+  session root directory.
 \<close>
 
 
 subsubsection \<open>Examples\<close>
 
-text \<open>See @{file "~~/src/HOL/ROOT"} for a diversity of practically
-  relevant situations, although it uses relatively complex
-  quasi-hierarchic naming conventions like \<open>HOL\<dash>SPARK\<close>,
-  \<open>HOL\<dash>SPARK\<dash>Examples\<close>.  An alternative is to use
-  unqualified names that are relatively long and descriptive, as in
-  the Archive of Formal Proofs (@{url "http://afp.sourceforge.net"}), for
-  example.\<close>
+text \<open>
+  See @{file "~~/src/HOL/ROOT"} for a diversity of practically relevant
+  situations, although it uses relatively complex quasi-hierarchic naming
+  conventions like \<^verbatim>\<open>HOL-SPARK\<close>, \<^verbatim>\<open>HOL-SPARK-Examples\<close>. An alternative is to
+  use unqualified names that are relatively long and descriptive, as in the
+  Archive of Formal Proofs (@{url "http://afp.sourceforge.net"}), for
+  example.
+\<close>
 
 
 section \<open>System build options \label{sec:system-options}\<close>
 
-text \<open>See @{file "~~/etc/options"} for the main defaults provided by
-  the Isabelle distribution.  Isabelle/jEdit @{cite "isabelle-jedit"}
-  includes a simple editing mode \<^verbatim>\<open>isabelle-options\<close> for
-  this file-format.
+text \<open>
+  See @{file "~~/etc/options"} for the main defaults provided by the Isabelle
+  distribution. Isabelle/jEdit @{cite "isabelle-jedit"} includes a simple
+  editing mode \<^verbatim>\<open>isabelle-options\<close> for this file-format.
 
-  The following options are particularly relevant to build Isabelle
-  sessions, in particular with document preparation
-  (\chref{ch:present}).
+  The following options are particularly relevant to build Isabelle sessions,
+  in particular with document preparation (\chref{ch:present}).
 
-  \<^item> @{system_option_def "browser_info"} controls output of HTML
-  browser info, see also \secref{sec:info}.
+    \<^item> @{system_option_def "browser_info"} controls output of HTML browser
+    info, see also \secref{sec:info}.
 
-  \<^item> @{system_option_def "document"} specifies the document output
-  format, see @{tool document} option \<^verbatim>\<open>-o\<close> in
-  \secref{sec:tool-document}.  In practice, the most relevant values
-  are \<^verbatim>\<open>document=false\<close> or \<^verbatim>\<open>document=pdf\<close>.
+    \<^item> @{system_option_def "document"} specifies the document output format,
+    see @{tool document} option \<^verbatim>\<open>-o\<close> in \secref{sec:tool-document}. In
+    practice, the most relevant values are \<^verbatim>\<open>document=false\<close> or
+    \<^verbatim>\<open>document=pdf\<close>.
 
-  \<^item> @{system_option_def "document_output"} specifies an
-  alternative directory for generated output of the document
-  preparation system; the default is within the @{setting
-  "ISABELLE_BROWSER_INFO"} hierarchy as explained in
-  \secref{sec:info}.  See also @{tool mkroot}, which generates a
-  default configuration with output readily available to the author of
-  the document.
+    \<^item> @{system_option_def "document_output"} specifies an alternative
+    directory for generated output of the document preparation system; the
+    default is within the @{setting "ISABELLE_BROWSER_INFO"} hierarchy as
+    explained in \secref{sec:info}. See also @{tool mkroot}, which generates a
+    default configuration with output readily available to the author of the
+    document.
 
-  \<^item> @{system_option_def "document_variants"} specifies document
-  variants as a colon-separated list of \<open>name=tags\<close> entries,
-  corresponding to @{tool document} options \<^verbatim>\<open>-n\<close> and
-  \<^verbatim>\<open>-t\<close>.
+    \<^item> @{system_option_def "document_variants"} specifies document variants as
+    a colon-separated list of \<open>name=tags\<close> entries, corresponding to @{tool
+    document} options \<^verbatim>\<open>-n\<close> and \<^verbatim>\<open>-t\<close>.
+
+    For example, \<^verbatim>\<open>document_variants=document:outline=/proof,/ML\<close> indicates
+    two documents: the one called \<^verbatim>\<open>document\<close> with default tags, and the other
+    called \<^verbatim>\<open>outline\<close> where proofs and ML sections are folded.
 
-  For example, \<^verbatim>\<open>document_variants=document:outline=/proof,/ML\<close> indicates
-  two documents: the one called \<^verbatim>\<open>document\<close> with default tags,
-  and the other called \<^verbatim>\<open>outline\<close> where proofs and ML
-  sections are folded.
+    Document variant names are just a matter of conventions. It is also
+    possible to use different document variant names (without tags) for
+    different document root entries, see also \secref{sec:tool-document}.
 
-  Document variant names are just a matter of conventions.  It is also
-  possible to use different document variant names (without tags) for
-  different document root entries, see also
-  \secref{sec:tool-document}.
+    \<^item> @{system_option_def "threads"} determines the number of worker threads
+    for parallel checking of theories and proofs. The default \<open>0\<close> means that a
+    sensible maximum value is determined by the underlying hardware. For
+    machines with many cores or with hyperthreading, this is often requires
+    manual adjustment (on the command-line or within personal settings or
+    preferences, not within a session \<^verbatim>\<open>ROOT\<close>).
 
-  \<^item> @{system_option_def "threads"} determines the number of worker
-  threads for parallel checking of theories and proofs.  The default
-  \<open>0\<close> means that a sensible maximum value is determined by the
-  underlying hardware.  For machines with many cores or with
-  hyperthreading, this is often requires manual adjustment (on the
-  command-line or within personal settings or preferences, not within
-  a session \<^verbatim>\<open>ROOT\<close>).
+    \<^item> @{system_option_def "condition"} specifies a comma-separated list of
+    process environment variables (or Isabelle settings) that are required for
+    the subsequent theories to be processed. Conditions are considered
+    ``true'' if the corresponding environment value is defined and non-empty.
 
-  \<^item> @{system_option_def "condition"} specifies a comma-separated
-  list of process environment variables (or Isabelle settings) that
-  are required for the subsequent theories to be processed.
-  Conditions are considered ``true'' if the corresponding environment
-  value is defined and non-empty.
+    For example, the \<^verbatim>\<open>condition=ISABELLE_FULL_TEST\<close> may be used to guard
+    extraordinary theories, which are meant to be enabled explicitly via some
+    shell prefix \<^verbatim>\<open>env ISABELLE_FULL_TEST=true\<close> before invoking @{tool build}.
 
-  For example, the \<^verbatim>\<open>condition=ISABELLE_FULL_TEST\<close> may be
-  used to guard extraordinary theories, which are meant to be enabled
-  explicitly via some shell prefix \<^verbatim>\<open>env ISABELLE_FULL_TEST=true\<close>
-  before invoking @{tool build}.
+    \<^item> @{system_option_def "timeout"} and @{system_option_def "timeout_scale"}
+    specify a real wall-clock timeout for the session as a whole: the two
+    values are multiplied and taken as the number of seconds. Typically,
+    @{system_option "timeout"} is given for individual sessions, and
+    @{system_option "timeout_scale"} as global adjustment to overall hardware
+    performance.
 
-  \<^item> @{system_option_def "timeout"} specifies a real wall-clock
-  timeout (in seconds) for the session as a whole.  The timer is
-  controlled outside the ML process by the JVM that runs
-  Isabelle/Scala.  Thus it is relatively reliable in canceling
-  processes that get out of control, even if there is a deadlock
-  without CPU time usage.
+    The timer is controlled outside the ML process by the JVM that runs
+    Isabelle/Scala. Thus it is relatively reliable in canceling processes that
+    get out of control, even if there is a deadlock without CPU time usage.
 
-
-  The @{tool_def options} tool prints Isabelle system options.  Its
+  The @{tool_def options} tool prints Isabelle system options. Its
   command-line usage is:
   @{verbatim [display]
 \<open>Usage: isabelle options [OPTIONS] [MORE_OPTIONS ...]
@@ -235,32 +227,29 @@
   Report Isabelle system options, augmented by MORE_OPTIONS given as
   arguments NAME=VAL or NAME.\<close>}
 
-  The command line arguments provide additional system options of the
-  form \<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close> or \<open>name\<close>
-  for Boolean options.
+  The command line arguments provide additional system options of the form
+  \<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close> or \<open>name\<close> for Boolean options.
+
+  Option \<^verbatim>\<open>-b\<close> augments the implicit environment of system options by the ones
+  of @{setting ISABELLE_BUILD_OPTIONS}, cf.\ \secref{sec:tool-build}.
 
-  Option \<^verbatim>\<open>-b\<close> augments the implicit environment of system
-  options by the ones of @{setting ISABELLE_BUILD_OPTIONS}, cf.\
-  \secref{sec:tool-build}.
+  Option \<^verbatim>\<open>-g\<close> prints the value of the given option. Option \<^verbatim>\<open>-l\<close> lists all
+  options with their declaration and current value.
 
-  Option \<^verbatim>\<open>-g\<close> prints the value of the given option.
-  Option \<^verbatim>\<open>-l\<close> lists all options with their declaration and
-  current value.
-
-  Option \<^verbatim>\<open>-x\<close> specifies a file to export the result in
-  YXML format, instead of printing it in human-readable form.
+  Option \<^verbatim>\<open>-x\<close> specifies a file to export the result in YXML format, instead
+  of printing it in human-readable form.
 \<close>
 
 
 section \<open>Invoking the build process \label{sec:tool-build}\<close>
 
-text \<open>The @{tool_def build} tool invokes the build process for
-  Isabelle sessions.  It manages dependencies between sessions,
-  related sources of theories and auxiliary files, and target heap
-  images.  Accordingly, it runs instances of the prover process with
-  optional document preparation.  Its command-line usage
-  is:\footnote{Isabelle/Scala provides the same functionality via
-  \<^verbatim>\<open>isabelle.Build.build\<close>.}
+text \<open>
+  The @{tool_def build} tool invokes the build process for Isabelle sessions.
+  It manages dependencies between sessions, related sources of theories and
+  auxiliary files, and target heap images. Accordingly, it runs instances of
+  the prover process with optional document preparation. Its command-line
+  usage is:\<^footnote>\<open>Isabelle/Scala provides the same functionality via
+  \<^verbatim>\<open>isabelle.Build.build\<close>.\<close>
   @{verbatim [display]
 \<open>Usage: isabelle build [OPTIONS] [SESSIONS ...]
 
@@ -291,98 +280,87 @@
   ML_OPTIONS="..."\<close>}
 
   \<^medskip>
-  Isabelle sessions are defined via session ROOT files as
-  described in (\secref{sec:session-root}).  The totality of sessions
-  is determined by collecting such specifications from all Isabelle
-  component directories (\secref{sec:components}), augmented by more
-  directories given via options \<^verbatim>\<open>-d\<close>~\<open>DIR\<close> on the
-  command line.  Each such directory may contain a session
+  Isabelle sessions are defined via session ROOT files as described in
+  (\secref{sec:session-root}). The totality of sessions is determined by
+  collecting such specifications from all Isabelle component directories
+  (\secref{sec:components}), augmented by more directories given via options
+  \<^verbatim>\<open>-d\<close>~\<open>DIR\<close> on the command line. Each such directory may contain a session
   \<^verbatim>\<open>ROOT\<close> file with several session specifications.
 
-  Any session root directory may refer recursively to further
-  directories of the same kind, by listing them in a catalog file
-  \<^verbatim>\<open>ROOTS\<close> line-by-line.  This helps to organize large
-  collections of session specifications, or to make \<^verbatim>\<open>-d\<close>
-  command line options persistent (say within
+  Any session root directory may refer recursively to further directories of
+  the same kind, by listing them in a catalog file \<^verbatim>\<open>ROOTS\<close> line-by-line. This
+  helps to organize large collections of session specifications, or to make
+  \<^verbatim>\<open>-d\<close> command line options persistent (say within
   \<^verbatim>\<open>$ISABELLE_HOME_USER/ROOTS\<close>).
 
   \<^medskip>
-  The subset of sessions to be managed is determined via
-  individual \<open>SESSIONS\<close> given as command-line arguments, or
-  session groups that are given via one or more options \<^verbatim>\<open>-g\<close>~\<open>NAME\<close>.
-  Option \<^verbatim>\<open>-a\<close> selects all sessions.
-  The build tool takes session dependencies into account: the set of
-  selected sessions is completed by including all ancestors.
+  The subset of sessions to be managed is determined via individual \<open>SESSIONS\<close>
+  given as command-line arguments, or session groups that are given via one or
+  more options \<^verbatim>\<open>-g\<close>~\<open>NAME\<close>. Option \<^verbatim>\<open>-a\<close> selects all sessions. The build tool
+  takes session dependencies into account: the set of selected sessions is
+  completed by including all ancestors.
 
   \<^medskip>
-  One or more options \<^verbatim>\<open>-x\<close>~\<open>NAME\<close> specify
-  sessions to be excluded. All descendents of excluded sessions are removed
-  from the selection as specified above. Option \<^verbatim>\<open>-X\<close> is
-  analogous to this, but excluded sessions are specified by session group
-  membership.
+  One or more options \<^verbatim>\<open>-x\<close>~\<open>NAME\<close> specify sessions to be excluded. All
+  descendents of excluded sessions are removed from the selection as specified
+  above. Option \<^verbatim>\<open>-X\<close> is analogous to this, but excluded sessions are
+  specified by session group membership.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-R\<close> reverses the selection in the sense
-  that it refers to its requirements: all ancestor sessions excluding
-  the original selection.  This allows to prepare the stage for some
-  build process with different options, before running the main build
-  itself (without option \<^verbatim>\<open>-R\<close>).
+  Option \<^verbatim>\<open>-R\<close> reverses the selection in the sense that it refers to its
+  requirements: all ancestor sessions excluding the original selection. This
+  allows to prepare the stage for some build process with different options,
+  before running the main build itself (without option \<^verbatim>\<open>-R\<close>).
 
   \<^medskip>
-  Option \<^verbatim>\<open>-D\<close> is similar to \<^verbatim>\<open>-d\<close>, but
-  selects all sessions that are defined in the given directories.
+  Option \<^verbatim>\<open>-D\<close> is similar to \<^verbatim>\<open>-d\<close>, but selects all sessions that are defined
+  in the given directories.
 
   \<^medskip>
   The build process depends on additional options
-  (\secref{sec:system-options}) that are passed to the prover
-  eventually.  The settings variable @{setting_ref
-  ISABELLE_BUILD_OPTIONS} allows to provide additional defaults, e.g.\
-  \<^verbatim>\<open>ISABELLE_BUILD_OPTIONS="document=pdf threads=4"\<close>. Moreover,
-  the environment of system build options may be augmented on the
-  command line via \<^verbatim>\<open>-o\<close>~\<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close> or \<^verbatim>\<open>-o\<close>~\<open>name\<close>, which
-  abbreviates \<^verbatim>\<open>-o\<close>~\<open>name\<close>\<^verbatim>\<open>=true\<close> for
-  Boolean options.  Multiple occurrences of \<^verbatim>\<open>-o\<close> on the
-  command-line are applied in the given order.
+  (\secref{sec:system-options}) that are passed to the prover eventually. The
+  settings variable @{setting_ref ISABELLE_BUILD_OPTIONS} allows to provide
+  additional defaults, e.g.\ \<^verbatim>\<open>ISABELLE_BUILD_OPTIONS="document=pdf threads=4"\<close>.
+  Moreover, the environment of system build options may be augmented on the
+  command line via \<^verbatim>\<open>-o\<close>~\<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close> or \<^verbatim>\<open>-o\<close>~\<open>name\<close>, which abbreviates
+  \<^verbatim>\<open>-o\<close>~\<open>name\<close>\<^verbatim>\<open>=true\<close> for Boolean options. Multiple occurrences of \<^verbatim>\<open>-o\<close> on
+  the command-line are applied in the given order.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-b\<close> ensures that heap images are
-  produced for all selected sessions.  By default, images are only
-  saved for inner nodes of the hierarchy of sessions, as required for
-  other sessions to continue later on.
+  Option \<^verbatim>\<open>-b\<close> ensures that heap images are produced for all selected
+  sessions. By default, images are only saved for inner nodes of the hierarchy
+  of sessions, as required for other sessions to continue later on.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-c\<close> cleans all descendants of the
-  selected sessions before performing the specified build operation.
+  Option \<^verbatim>\<open>-c\<close> cleans all descendants of the selected sessions before
+  performing the specified build operation.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-n\<close> omits the actual build process
-  after the preparatory stage (including optional cleanup).  Note that
-  the return code always indicates the status of the set of selected
-  sessions.
+  Option \<^verbatim>\<open>-n\<close> omits the actual build process after the preparatory stage
+  (including optional cleanup). Note that the return code always indicates the
+  status of the set of selected sessions.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-j\<close> specifies the maximum number of
-  parallel build jobs (prover processes).  Each prover process is
-  subject to a separate limit of parallel worker threads, cf.\ system
-  option @{system_option_ref threads}.
+  Option \<^verbatim>\<open>-j\<close> specifies the maximum number of parallel build jobs (prover
+  processes). Each prover process is subject to a separate limit of parallel
+  worker threads, cf.\ system option @{system_option_ref threads}.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-s\<close> enables \<^emph>\<open>system mode\<close>, which
-  means that resulting heap images and log files are stored in
-  @{file_unchecked "$ISABELLE_HOME/heaps"} instead of the default location
-  @{setting ISABELLE_OUTPUT} (which is normally in @{setting
-  ISABELLE_HOME_USER}, i.e.\ the user's home directory).
+  Option \<^verbatim>\<open>-s\<close> enables \<^emph>\<open>system mode\<close>, which means that resulting heap images
+  and log files are stored in @{file_unchecked "$ISABELLE_HOME/heaps"} instead
+  of the default location @{setting ISABELLE_OUTPUT} (which is normally in
+  @{setting ISABELLE_HOME_USER}, i.e.\ the user's home directory).
 
   \<^medskip>
-  Option \<^verbatim>\<open>-v\<close> increases the general level of
-  verbosity.  Option \<^verbatim>\<open>-l\<close> lists the source files that
-  contribute to a session.
+  Option \<^verbatim>\<open>-v\<close> increases the general level of verbosity. Option \<^verbatim>\<open>-l\<close> lists
+  the source files that contribute to a session.
 
   \<^medskip>
-  Option \<^verbatim>\<open>-k\<close> specifies a newly proposed keyword for
-  outer syntax (multiple uses allowed). The theory sources are checked for
-  conflicts wrt.\ this hypothetical change of syntax, e.g.\ to reveal
-  occurrences of identifiers that need to be quoted.\<close>
+  Option \<^verbatim>\<open>-k\<close> specifies a newly proposed keyword for outer syntax (multiple
+  uses allowed). The theory sources are checked for conflicts wrt.\ this
+  hypothetical change of syntax, e.g.\ to reveal occurrences of identifiers
+  that need to be quoted.
+\<close>
 
 
 subsubsection \<open>Examples\<close>
@@ -396,23 +374,22 @@
   @{verbatim [display] \<open>isabelle build -b -g main\<close>}
 
   \<^smallskip>
-  Provide a general overview of the status of all Isabelle
-  sessions, without building anything:
+  Provide a general overview of the status of all Isabelle sessions, without
+  building anything:
   @{verbatim [display] \<open>isabelle build -a -n -v\<close>}
 
   \<^smallskip>
-  Build all sessions with HTML browser info and PDF
-  document preparation:
+  Build all sessions with HTML browser info and PDF document preparation:
   @{verbatim [display] \<open>isabelle build -a -o browser_info -o document=pdf\<close>}
 
   \<^smallskip>
-  Build all sessions with a maximum of 8 parallel prover
-  processes and 4 worker threads each (on a machine with many cores):
+  Build all sessions with a maximum of 8 parallel prover processes and 4
+  worker threads each (on a machine with many cores):
   @{verbatim [display] \<open>isabelle build -a -j8 -o threads=4\<close>}
 
   \<^smallskip>
-  Build some session images with cleanup of their
-  descendants, while retaining their ancestry:
+  Build some session images with cleanup of their descendants, while retaining
+  their ancestry:
   @{verbatim [display] \<open>isabelle build -b -c HOL-Algebra HOL-Word\<close>}
 
   \<^smallskip>
@@ -420,14 +397,14 @@
   @{verbatim [display] \<open>isabelle build -a -n -c\<close>}
 
   \<^smallskip>
-  Build all sessions from some other directory hierarchy,
-  according to the settings variable \<^verbatim>\<open>AFP\<close> that happens to
-  be defined inside the Isabelle environment:
+  Build all sessions from some other directory hierarchy, according to the
+  settings variable \<^verbatim>\<open>AFP\<close> that happens to be defined inside the Isabelle
+  environment:
   @{verbatim [display] \<open>isabelle build -D '$AFP'\<close>}
 
   \<^smallskip>
-  Inform about the status of all sessions required for AFP,
-  without building anything yet:
+  Inform about the status of all sessions required for AFP, without building
+  anything yet:
   @{verbatim [display] \<open>isabelle build -D '$AFP' -R -v -n\<close>}
 \<close>
 
--- a/src/Doc/Tutorial/Documents/Documents.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Doc/Tutorial/Documents/Documents.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -112,7 +112,7 @@
   macros (see also \S\ref{sec:doc-prep-symbols}).  There are also a
   few predefined control symbols, such as \verb,\,\verb,<^sub>, and
   \verb,\,\verb,<^sup>, for sub- and superscript of the subsequent
-  printable symbol, respectively.  For example, \verb,A\<^sup>\<star>, is
+  printable symbol, respectively.  For example, \<^verbatim>\<open>A\<^sup>\<star>\<close>, is
   output as @{text "A\<^sup>\<star>"}.
 
   A number of symbols are considered letters by the Isabelle lexer and
@@ -125,7 +125,7 @@
   in the trailing part of an identifier. This means that the input
 
   \medskip
-  {\small\noindent \verb,\,\verb,<forall>\,\verb,<alpha>\<^sub>1.,~\verb,\,\verb,<alpha>\<^sub>1 = \,\verb,<Pi>\<^sub>\<A>,}
+  {\small\noindent \<^verbatim>\<open>\<forall>\<alpha>\<^sub>1. \<alpha>\<^sub>1 = \<Pi>\<^sub>\<A>\<close>}
 
   \medskip
   \noindent is recognized as the term @{term "\<forall>\<alpha>\<^sub>1. \<alpha>\<^sub>1 = \<Pi>\<^sub>\<A>"} 
--- a/src/FOL/ex/Locale_Test/Locale_Test1.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/FOL/ex/Locale_Test/Locale_Test1.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -298,7 +298,7 @@
 
 section \<open>Interpretation between locales: sublocales\<close>
 
-sublocale lgrp < right: rgrp
+sublocale lgrp < right?: rgrp
 print_facts
 proof unfold_locales
   {
@@ -504,7 +504,7 @@
 end
 
 interpretation x: logic_o "op \<and>" "Not"
-  where bool_logic_o: "x.lor_o(x, y) \<longleftrightarrow> x \<or> y"
+  rewrites bool_logic_o: "x.lor_o(x, y) \<longleftrightarrow> x \<or> y"
 proof -
   show bool_logic_o: "PROP logic_o(op \<and>, Not)" by unfold_locales fast+
   show "logic_o.lor_o(op \<and>, Not, x, y) \<longleftrightarrow> x \<or> y"
@@ -546,7 +546,7 @@
 lemmas less_thm = less_def
 end
 
-interpretation le: mixin gle where "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
+interpretation le: mixin gle rewrites "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
 proof -
   show "mixin(gle)" by unfold_locales (rule grefl)
   note reflexive = this[unfolded mixin_def]
@@ -588,7 +588,7 @@
 locale mixin4_mixin = mixin4_base
 
 interpretation le: mixin4_mixin gle
-  where "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
+  rewrites "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
 proof -
   show "mixin4_mixin(gle)" by unfold_locales (rule grefl)
   note reflexive = this[unfolded mixin4_mixin_def mixin4_base_def mixin_def]
@@ -601,7 +601,7 @@
 lemmas less_thm4 = less_def
 end
 
-locale mixin4_combined = le1: mixin4_mixin le' + le2: mixin4_copy le for le' le
+locale mixin4_combined = le1?: mixin4_mixin le' + le2?: mixin4_copy le for le' le
 begin
 lemmas less_thm4' = less_def
 end
@@ -620,7 +620,7 @@
 locale mixin5_inherited = mixin5_base
 
 interpretation le5: mixin5_base gle
-  where "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
+  rewrites "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
 proof -
   show "mixin5_base(gle)" by unfold_locales
   note reflexive = this[unfolded mixin5_base_def mixin_def]
@@ -648,7 +648,7 @@
 interpretation le6: mixin6_inherited gle
   by unfold_locales
 interpretation le6: mixin6_base gle
-  where "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
+  rewrites "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
 proof -
   show "mixin6_base(gle)" by unfold_locales
   note reflexive = this[unfolded mixin6_base_def mixin_def]
@@ -669,7 +669,7 @@
 locale mixin7_inherited = reflexive
 
 interpretation le7: mixin7_base gle
-  where "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
+  rewrites "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
 proof -
   show "mixin7_base(gle)" by unfold_locales
   note reflexive = this[unfolded mixin7_base_def mixin_def]
@@ -726,8 +726,8 @@
 
 end
 
-sublocale lgrp < "def": dgrp
-  where one_equation: "dgrp.one(prod) = one" and inv_equation: "dgrp.inv(prod, x) = inv(x)"
+sublocale lgrp < "def"?: dgrp
+  rewrites one_equation: "dgrp.one(prod) = one" and inv_equation: "dgrp.inv(prod, x) = inv(x)"
 proof -
   show "dgrp(prod)" by unfold_locales
   from this interpret d: dgrp .
@@ -766,7 +766,7 @@
 
 locale roundup = fixes x assumes true: "x \<longleftrightarrow> True"
 
-sublocale roundup \<subseteq> sub: roundup x where "x \<longleftrightarrow> True \<and> True"
+sublocale roundup \<subseteq> sub: roundup x rewrites "x \<longleftrightarrow> True \<and> True"
   apply unfold_locales apply (simp add: true) done
 lemma (in roundup) "True \<and> True \<longleftrightarrow> True" by (rule sub.true)
 
@@ -775,7 +775,7 @@
 
 locale container
 begin
-interpretation "private"!: roundup True by unfold_locales rule
+interpretation "private": roundup True by unfold_locales rule
 lemmas true_copy = private.true
 end
 
@@ -816,7 +816,7 @@
       and pnotnot: "\<And>x. pnot(pnot(x)) \<longleftrightarrow> x"
       and por_def: "\<And>x y. por(x, y) \<longleftrightarrow> pnot(pand(pnot(x), pnot(y)))"
     interpret loc: logic_o pand pnot
-      where por_eq: "\<And>x y. logic_o.lor_o(pand, pnot, x, y) \<longleftrightarrow> por(x, y)"  (* FIXME *)
+      rewrites por_eq: "\<And>x y. logic_o.lor_o(pand, pnot, x, y) \<longleftrightarrow> por(x, y)"  (* FIXME *)
     proof -
       show logic_o: "PROP logic_o(pand, pnot)" using passoc pnotnot by unfold_locales
       fix x y
--- a/src/FOL/ex/Locale_Test/Locale_Test2.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/FOL/ex/Locale_Test/Locale_Test2.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -9,7 +9,7 @@
 begin
 
 interpretation le1: mixin_thy_merge gle gle'
-  where "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
+  rewrites "reflexive.less(gle, x, y) \<longleftrightarrow> gless(x, y)"
 proof -
   show "mixin_thy_merge(gle, gle')" by unfold_locales
   note reflexive = this[unfolded mixin_thy_merge_def, THEN conjunct1]
--- a/src/FOL/ex/Locale_Test/Locale_Test3.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/FOL/ex/Locale_Test/Locale_Test3.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -9,7 +9,7 @@
 begin
 
 interpretation le2: mixin_thy_merge gle gle'
-  where "reflexive.less(gle', x, y) \<longleftrightarrow> gless'(x, y)"
+  rewrites "reflexive.less(gle', x, y) \<longleftrightarrow> gless'(x, y)"
 proof -
   show "mixin_thy_merge(gle, gle')" by unfold_locales
   note reflexive = this[unfolded mixin_thy_merge_def, THEN conjunct2]
--- a/src/HOL/Algebra/AbelCoset.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/AbelCoset.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -51,7 +51,7 @@
     kernel \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>
       \<lparr>carrier = carrier H, mult = add H, one = zero H\<rparr> h"
 
-locale abelian_group_hom = G: abelian_group G + H: abelian_group H
+locale abelian_group_hom = G?: abelian_group G + H?: abelian_group H
     for G (structure) and H (structure) +
   fixes h
   assumes a_group_hom: "group_hom \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>
--- a/src/HOL/Algebra/Group.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/Group.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -613,7 +613,7 @@
 
 text\<open>Basis for homomorphism proofs: we assume two groups @{term G} and
   @{term H}, with a homomorphism @{term h} between them\<close>
-locale group_hom = G: group G + H: group H for G (structure) and H (structure) +
+locale group_hom = G?: group G + H?: group H for G (structure) and H (structure) +
   fixes h
   assumes homh: "h \<in> hom G H"
 
--- a/src/HOL/Algebra/IntRing.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/IntRing.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -54,7 +54,7 @@
   with as few assumptions as possible.\<close>
 
 interpretation int: monoid \<Z>
-  where "carrier \<Z> = UNIV"
+  rewrites "carrier \<Z> = UNIV"
     and "mult \<Z> x y = x * y"
     and "one \<Z> = 1"
     and "pow \<Z> x n = x^n"
@@ -73,7 +73,7 @@
 qed
 
 interpretation int: comm_monoid \<Z>
-  where "finprod \<Z> f A = setprod f A"
+  rewrites "finprod \<Z> f A = setprod f A"
 proof -
   -- "Specification"
   show "comm_monoid \<Z>" by standard auto
@@ -88,7 +88,7 @@
 qed
 
 interpretation int: abelian_monoid \<Z>
-  where int_carrier_eq: "carrier \<Z> = UNIV"
+  rewrites int_carrier_eq: "carrier \<Z> = UNIV"
     and int_zero_eq: "zero \<Z> = 0"
     and int_add_eq: "add \<Z> x y = x + y"
     and int_finsum_eq: "finsum \<Z> f A = setsum f A"
@@ -114,7 +114,7 @@
      Since the morphisms through which the abelian structures are interpreted are
      not the identity, the equations of these interpretations are not inherited. *)
   (* FIXME *)
-  where "carrier \<Z> = UNIV"
+  rewrites "carrier \<Z> = UNIV"
     and "zero \<Z> = 0"
     and "add \<Z> x y = x + y"
     and "finsum \<Z> f A = setsum f A"
@@ -147,7 +147,7 @@
 qed (simp add: int_carrier_eq int_zero_eq int_add_eq int_finsum_eq)+
 
 interpretation int: "domain" \<Z>
-  where "carrier \<Z> = UNIV"
+  rewrites "carrier \<Z> = UNIV"
     and "zero \<Z> = 0"
     and "add \<Z> x y = x + y"
     and "finsum \<Z> f A = setsum f A"
@@ -173,7 +173,7 @@
 
 interpretation int (* FIXME [unfolded UNIV] *) :
   partial_order "\<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr>"
-  where "carrier \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> = UNIV"
+  rewrites "carrier \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> = UNIV"
     and "le \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> x y = (x \<le> y)"
     and "lless \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> x y = (x < y)"
 proof -
@@ -189,7 +189,7 @@
 
 interpretation int (* FIXME [unfolded UNIV] *) :
   lattice "\<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr>"
-  where "join \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> x y = max x y"
+  rewrites "join \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> x y = max x y"
     and "meet \<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr> x y = min x y"
 proof -
   let ?Z = "\<lparr>carrier = UNIV::int set, eq = op =, le = op \<le>\<rparr>"
--- a/src/HOL/Algebra/Lattice.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/Lattice.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -925,7 +925,7 @@
 
 text \<open>Total orders are lattices.\<close>
 
-sublocale weak_total_order < weak: weak_lattice
+sublocale weak_total_order < weak?: weak_lattice
 proof
   fix x y
   assume L: "x \<in> carrier L"  "y \<in> carrier L"
@@ -1132,14 +1132,14 @@
   assumes sup_of_two_exists:
     "[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. least L s (Upper L {x, y})"
 
-sublocale upper_semilattice < weak: weak_upper_semilattice
+sublocale upper_semilattice < weak?: weak_upper_semilattice
   by standard (rule sup_of_two_exists)
 
 locale lower_semilattice = partial_order +
   assumes inf_of_two_exists:
     "[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. greatest L s (Lower L {x, y})"
 
-sublocale lower_semilattice < weak: weak_lower_semilattice
+sublocale lower_semilattice < weak?: weak_lower_semilattice
   by standard (rule inf_of_two_exists)
 
 locale lattice = upper_semilattice + lower_semilattice
@@ -1190,7 +1190,7 @@
 locale total_order = partial_order +
   assumes total_order_total: "[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> y | y \<sqsubseteq> x"
 
-sublocale total_order < weak: weak_total_order
+sublocale total_order < weak?: weak_total_order
   by standard (rule total_order_total)
 
 text \<open>Introduction rule: the usual definition of total order\<close>
@@ -1202,7 +1202,7 @@
 
 text \<open>Total orders are lattices.\<close>
 
-sublocale total_order < weak: lattice
+sublocale total_order < weak?: lattice
   by standard (auto intro: sup_of_two_exists inf_of_two_exists)
 
 
@@ -1214,7 +1214,7 @@
     and inf_exists:
     "[| A \<subseteq> carrier L |] ==> EX i. greatest L i (Lower L A)"
 
-sublocale complete_lattice < weak: weak_complete_lattice
+sublocale complete_lattice < weak?: weak_complete_lattice
   by standard (auto intro: sup_exists inf_exists)
 
 text \<open>Introduction rule: the usual definition of complete lattice\<close>
--- a/src/HOL/Algebra/Module.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/Module.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -14,7 +14,7 @@
 record ('a, 'b) module = "'b ring" +
   smult :: "['a, 'b] => 'b" (infixl "\<odot>\<index>" 70)
 
-locale module = R: cring + M: abelian_group M for M (structure) +
+locale module = R?: cring + M?: abelian_group M for M (structure) +
   assumes smult_closed [simp, intro]:
       "[| a \<in> carrier R; x \<in> carrier M |] ==> a \<odot>\<^bsub>M\<^esub> x \<in> carrier M"
     and smult_l_distr:
--- a/src/HOL/Algebra/Ring.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/Ring.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -94,8 +94,8 @@
 text \<open>Transfer facts from multiplicative structures via interpretation.\<close>
 
 sublocale abelian_monoid <
-  add!: monoid "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
-  where "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
+  add: monoid "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
+  rewrites "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
     and "mult \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = add G"
     and "one \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = zero G"
   by (rule a_monoid) auto
@@ -112,8 +112,8 @@
 end
 
 sublocale abelian_monoid <
-  add!: comm_monoid "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
-  where "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
+  add: comm_monoid "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
+  rewrites "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
     and "mult \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = add G"
     and "one \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = zero G"
     and "finprod \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = finsum G"
@@ -168,8 +168,8 @@
 end
 
 sublocale abelian_group <
-  add!: group "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
-  where "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
+  add: group "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
+  rewrites "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
     and "mult \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = add G"
     and "one \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = zero G"
     and "m_inv \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = a_inv G"
@@ -196,8 +196,8 @@
 end
 
 sublocale abelian_group <
-  add!: comm_group "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
-  where "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
+  add: comm_group "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
+  rewrites "carrier \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = carrier G"
     and "mult \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = add G"
     and "one \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = zero G"
     and "m_inv \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr> = a_inv G"
@@ -648,7 +648,7 @@
   shows "h \<in> ring_hom R S ==> h \<one> = \<one>\<^bsub>S\<^esub>"
   by (simp add: ring_hom_def)
 
-locale ring_hom_cring = R: cring R + S: cring S
+locale ring_hom_cring = R?: cring R + S?: cring S
     for R (structure) and S (structure) +
   fixes h
   assumes homh [simp, intro]: "h \<in> ring_hom R S"
--- a/src/HOL/Algebra/RingHom.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/RingHom.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -9,7 +9,7 @@
 section \<open>Homomorphisms of Non-Commutative Rings\<close>
 
 text \<open>Lifting existing lemmas in a @{text ring_hom_ring} locale\<close>
-locale ring_hom_ring = R: ring R + S: ring S
+locale ring_hom_ring = R?: ring R + S?: ring S
     for R (structure) and S (structure) +
   fixes h
   assumes homh: "h \<in> ring_hom R S"
@@ -19,7 +19,7 @@
 sublocale ring_hom_cring \<subseteq> ring: ring_hom_ring
   by standard (rule homh)
 
-sublocale ring_hom_ring \<subseteq> abelian_group: abelian_group_hom R S
+sublocale ring_hom_ring \<subseteq> abelian_group?: abelian_group_hom R S
 apply (rule abelian_group_homI)
   apply (rule R.is_abelian_group)
  apply (rule S.is_abelian_group)
--- a/src/HOL/Algebra/UnivPoly.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Algebra/UnivPoly.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -174,14 +174,14 @@
   fixes R (structure) and P (structure)
   defines P_def: "P == UP R"
 
-locale UP_ring = UP + R: ring R
+locale UP_ring = UP + R?: ring R
 
-locale UP_cring = UP + R: cring R
+locale UP_cring = UP + R?: cring R
 
 sublocale UP_cring < UP_ring
   by intro_locales [1] (rule P_def)
 
-locale UP_domain = UP + R: "domain" R
+locale UP_domain = UP + R?: "domain" R
 
 sublocale UP_domain < UP_cring
   by intro_locales [1] (rule P_def)
@@ -457,8 +457,8 @@
 
 end
 
-sublocale UP_ring < P: ring P using UP_ring .
-sublocale UP_cring < P: cring P using UP_cring .
+sublocale UP_ring < P?: ring P using UP_ring .
+sublocale UP_cring < P?: cring P using UP_cring .
 
 
 subsection \<open>Polynomials Form an Algebra\<close>
@@ -1196,8 +1196,6 @@
 
 locale UP_pre_univ_prop = ring_hom_cring + UP_cring
 
-(* FIXME print_locale ring_hom_cring fails *)
-
 locale UP_univ_prop = UP_pre_univ_prop +
   fixes s and Eval
   assumes indet_img_carrier [simp, intro]: "s \<in> carrier S"
--- a/src/HOL/Binomial.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Binomial.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -3,6 +3,7 @@
     Copyright   : 1998  University of Cambridge
     Conversion to Isar and new proofs by Lawrence C Paulson, 2004
     The integer version of factorial and other additions by Jeremy Avigad.
+    Additional binomial identities by Chaitanya Mangla and Manuel Eberl
 *)
 
 section\<open>Factorial Function, Binomial Coefficients and Binomial Theorem\<close>
--- a/src/HOL/Cardinals/Ordinal_Arithmetic.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Cardinals/Ordinal_Arithmetic.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -498,8 +498,8 @@
   and     sWELL: "Well_order s"
 begin
 
-interpretation r!: wo_rel r by unfold_locales (rule rWELL)
-interpretation s!: wo_rel s by unfold_locales (rule sWELL)
+interpretation r: wo_rel r by unfold_locales (rule rWELL)
+interpretation s: wo_rel s by unfold_locales (rule sWELL)
 
 abbreviation "SUPP \<equiv> support r.zero (Field s)"
 abbreviation "FINFUNC \<equiv> FinFunc r s"
@@ -1134,8 +1134,8 @@
   moreover
   from *(2,4) have "compat ?L ?R ?f" unfolding compat_def osum_def map_prod_def by fastforce
   moreover
-  interpret t!: wo_rel t by unfold_locales (rule t)
-  interpret rt!: wo_rel ?R by unfold_locales (rule osum_Well_order[OF r t])
+  interpret t: wo_rel t by unfold_locales (rule t)
+  interpret rt: wo_rel ?R by unfold_locales (rule osum_Well_order[OF r t])
   from *(3) have "ofilter ?R (?f ` Field ?L)"
     unfolding t.ofilter_def rt.ofilter_def Field_osum image_Un image_image under_def
     by (auto simp: osum_def intro!: imageI) (auto simp: Field_def)
@@ -1200,8 +1200,8 @@
   from *(2,4) the_inv_into_f_f[OF *(1)] have "compat ?L ?R ?f" unfolding compat_def oprod_def
     by auto (metis well_order_on_domain t, metis well_order_on_domain s)
   moreover
-  interpret t!: wo_rel t by unfold_locales (rule t)
-  interpret rt!: wo_rel ?R by unfold_locales (rule oprod_Well_order[OF r t])
+  interpret t: wo_rel t by unfold_locales (rule t)
+  interpret rt: wo_rel ?R by unfold_locales (rule oprod_Well_order[OF r t])
   from *(3) have "ofilter ?R (?f ` Field ?L)"
     unfolding t.ofilter_def rt.ofilter_def Field_oprod under_def
     by (auto simp: oprod_def image_iff) (fast | metis r well_order_on_domain)+
@@ -1277,12 +1277,12 @@
   assumes "oone <o r" "s <o t"
   shows   "r ^o s <o r ^o t" (is "?L <o ?R")
 proof -
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret rt!: wo_rel2 r t by unfold_locales (rule r, rule t)
-  interpret rexpt!: wo_rel "r ^o t" by unfold_locales (rule rt.oexp_Well_order)
-  interpret r!: wo_rel r by unfold_locales (rule r)
-  interpret s!: wo_rel s by unfold_locales (rule s)
-  interpret t!: wo_rel t by unfold_locales (rule t)
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret rt: wo_rel2 r t by unfold_locales (rule r, rule t)
+  interpret rexpt: wo_rel "r ^o t" by unfold_locales (rule rt.oexp_Well_order)
+  interpret r: wo_rel r by unfold_locales (rule r)
+  interpret s: wo_rel s by unfold_locales (rule s)
+  interpret t: wo_rel t by unfold_locales (rule t)
   have "Field r \<noteq> {}" by (metis assms(1) internalize_ordLess not_psubset_empty)
   moreover
   { assume "Field r = {r.zero}"
@@ -1388,11 +1388,11 @@
   assumes "r \<le>o s"
   shows   "r ^o t \<le>o s ^o t"
 proof -
-  interpret rt!: wo_rel2 r t by unfold_locales (rule r, rule t)
-  interpret st!: wo_rel2 s t by unfold_locales (rule s, rule t)
-  interpret r!: wo_rel r by unfold_locales (rule r)
-  interpret s!: wo_rel s by unfold_locales (rule s)
-  interpret t!: wo_rel t by unfold_locales (rule t)
+  interpret rt: wo_rel2 r t by unfold_locales (rule r, rule t)
+  interpret st: wo_rel2 s t by unfold_locales (rule s, rule t)
+  interpret r: wo_rel r by unfold_locales (rule r)
+  interpret s: wo_rel s by unfold_locales (rule s)
+  interpret t: wo_rel t by unfold_locales (rule t)
   show ?thesis
   proof (cases "t = {}")
     case True thus ?thesis using r s unfolding ordLeq_def2 underS_def by auto
@@ -1453,9 +1453,9 @@
   assumes "oone <o r"
   shows   "s \<le>o r ^o s"
 proof -
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret r!: wo_rel r by unfold_locales (rule r)
-  interpret s!: wo_rel s by unfold_locales (rule s)
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret r: wo_rel r by unfold_locales (rule r)
+  interpret s: wo_rel s by unfold_locales (rule s)
   from assms well_order_on_domain[OF r] obtain x where
     x: "x \<in> Field r" "r.zero \<in> Field r" "x \<noteq> r.zero"
     unfolding ordLess_def oone_def embedS_def[abs_def] bij_betw_def embed_def under_def
@@ -1511,9 +1511,9 @@
     "case_sum f1 g1 \<in> FinFunc r (s +o t)" "case_sum f2 g2 \<in> FinFunc r (s +o t)"
   shows "wo_rel.max_fun_diff s f1 f2 = x" (is ?P) "g1 = g2" (is ?Q)
 proof -
-  interpret st!: wo_rel "s +o t" by unfold_locales (rule osum_Well_order[OF s t])
-  interpret s!: wo_rel s by unfold_locales (rule s)
-  interpret rst!: wo_rel2 r "s +o t" by unfold_locales (rule r, rule osum_Well_order[OF s t])
+  interpret st: wo_rel "s +o t" by unfold_locales (rule osum_Well_order[OF s t])
+  interpret s: wo_rel s by unfold_locales (rule s)
+  interpret rst: wo_rel2 r "s +o t" by unfold_locales (rule r, rule osum_Well_order[OF s t])
   from assms(1) have *: "st.isMaxim {a \<in> Field (s +o t). case_sum f1 g1 a \<noteq> case_sum f2 g2 a} (Inl x)"
     using rst.isMaxim_max_fun_diff[OF assms(2-4)] by simp
   hence "s.isMaxim {a \<in> Field s. f1 a \<noteq> f2 a} x"
@@ -1535,9 +1535,9 @@
     "case_sum f1 g1 \<in> FinFunc r (s +o t)" "case_sum f2 g2 \<in> FinFunc r (s +o t)"
   shows "wo_rel.max_fun_diff t g1 g2 = x" (is ?P) "g1 \<noteq> g2" (is ?Q)
 proof -
-  interpret st!: wo_rel "s +o t" by unfold_locales (rule osum_Well_order[OF s t])
-  interpret t!: wo_rel t by unfold_locales (rule t)
-  interpret rst!: wo_rel2 r "s +o t" by unfold_locales (rule r, rule osum_Well_order[OF s t])
+  interpret st: wo_rel "s +o t" by unfold_locales (rule osum_Well_order[OF s t])
+  interpret t: wo_rel t by unfold_locales (rule t)
+  interpret rst: wo_rel2 r "s +o t" by unfold_locales (rule r, rule osum_Well_order[OF s t])
   from assms(1) have *: "st.isMaxim {a \<in> Field (s +o t). case_sum f1 g1 a \<noteq> case_sum f2 g2 a} (Inr x)"
     using rst.isMaxim_max_fun_diff[OF assms(2-4)] by simp
   hence "t.isMaxim {a \<in> Field t. g1 a \<noteq> g2 a} x"
@@ -1548,9 +1548,9 @@
 
 lemma oexp_osum: "r ^o (s +o t) =o (r ^o s) *o (r ^o t)" (is "?R =o ?L")
 proof (rule ordIso_symmetric)
-  interpret rst!: wo_rel2 r "s +o t" by unfold_locales (rule r, rule osum_Well_order[OF s t])
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret rt!: wo_rel2 r t by unfold_locales (rule r, rule t)
+  interpret rst: wo_rel2 r "s +o t" by unfold_locales (rule r, rule osum_Well_order[OF s t])
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret rt: wo_rel2 r t by unfold_locales (rule r, rule t)
   let ?f = "\<lambda>(f, g). case_sum f g"
   have "bij_betw ?f (Field ?L) (Field ?R)"
   unfolding bij_betw_def rst.Field_oexp rs.Field_oexp rt.Field_oexp Field_oprod proof (intro conjI)
@@ -1581,8 +1581,8 @@
   assumes Field: "Field r \<noteq> {}"
   shows "rev_curr ` (FinFunc r (s *o t)) = FinFunc (r ^o s) t"
 proof safe
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret rst!: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret rst: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
   fix g assume g: "g \<in> FinFunc r (s *o t)"
   hence "finite (rst.SUPP (rev_curr g))" "\<forall>x \<in> Field t. finite (rs.SUPP (rev_curr g x))"
     unfolding FinFunc_def Field_oprod rs.Field_oexp Func_def fin_support_def support_def
@@ -1591,8 +1591,8 @@
     unfolding FinFunc_def Field_oprod rs.Field_oexp Func_def
     by (auto simp: rev_curr_def fin_support_def)
 next
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret rst!: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret rst: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
   fix fg assume *: "fg \<in> FinFunc (r ^o s) t"
   let ?g = "\<lambda>(a, b). if (a, b) \<in> Field (s *o t) then fg b a else undefined"
   show "fg \<in> rev_curr ` FinFunc r (s *o t)"
@@ -1631,12 +1631,12 @@
   shows "wo_rel.max_fun_diff (s *o t) f g =
     (wo_rel.max_fun_diff s (rev_curr f m) (rev_curr g m), m)"
 proof -
-  interpret st!: wo_rel "s *o t" by unfold_locales (rule oprod_Well_order[OF s t])
-  interpret s!: wo_rel s by unfold_locales (rule s)
-  interpret t!: wo_rel t by unfold_locales (rule t)
-  interpret r_st!: wo_rel2 r "s *o t" by unfold_locales (rule r, rule oprod_Well_order[OF s t])
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret rst!: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
+  interpret st: wo_rel "s *o t" by unfold_locales (rule oprod_Well_order[OF s t])
+  interpret s: wo_rel s by unfold_locales (rule s)
+  interpret t: wo_rel t by unfold_locales (rule t)
+  interpret r_st: wo_rel2 r "s *o t" by unfold_locales (rule r, rule oprod_Well_order[OF s t])
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret rst: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
   from fun_unequal_in_support[OF assms(2), of "Field (s *o t)" "Field r" "Field r"] assms(3,4)
     have diff1: "rev_curr f \<noteq> rev_curr g"
       "rev_curr f \<in> FinFunc (r ^o s) t" "rev_curr g \<in> FinFunc (r ^o s) t" using rev_curr_FinFunc[OF Field]
@@ -1668,8 +1668,8 @@
 lemma oexp_oexp: "(r ^o s) ^o t =o r ^o (s *o t)" (is "?R =o ?L")
 proof (cases "r = {}")
   case True
-  interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-  interpret rst!: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
+  interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+  interpret rst: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
   show ?thesis
   proof (cases "s = {} \<or> t = {}")
     case True with `r = {}` show ?thesis
@@ -1687,9 +1687,9 @@
   hence Field: "Field r \<noteq> {}" by (metis Field_def Range_empty_iff Un_empty)
   show ?thesis
   proof (rule ordIso_symmetric)
-    interpret r_st!: wo_rel2 r "s *o t" by unfold_locales (rule r, rule oprod_Well_order[OF s t])
-    interpret rs!: wo_rel2 r s by unfold_locales (rule r, rule s)
-    interpret rst!: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
+    interpret r_st: wo_rel2 r "s *o t" by unfold_locales (rule r, rule oprod_Well_order[OF s t])
+    interpret rs: wo_rel2 r s by unfold_locales (rule r, rule s)
+    interpret rst: wo_rel2 "r ^o s" t by unfold_locales (rule oexp_Well_order[OF r s], rule t)
     have bij: "bij_betw rev_curr (Field ?L) (Field ?R)"
     unfolding bij_betw_def r_st.Field_oexp rst.Field_oexp Field_oprod proof (intro conjI)
       show "inj_on rev_curr (FinFunc r (s *o t))"
--- a/src/HOL/Cardinals/Wellorder_Constructions.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Cardinals/Wellorder_Constructions.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -932,8 +932,8 @@
 and f: "\<And> a. a \<in> Field r \<Longrightarrow> f a \<in> Field s \<and> f ` underS r a \<subseteq> underS s (f a)"
 shows "\<exists> g. embed r s g"
 proof-
-  interpret r!: wo_rel r by unfold_locales (rule r)
-  interpret s!: wo_rel s by unfold_locales (rule s)
+  interpret r: wo_rel r by unfold_locales (rule r)
+  interpret s: wo_rel s by unfold_locales (rule s)
   let ?G = "\<lambda> g a. suc s (g ` underS r a)"
   def g \<equiv> "worec r ?G"
   have adm: "adm_wo r ?G" unfolding r.adm_wo_def image_def by auto
--- a/src/HOL/Data_Structures/AVL_Map.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/AVL_Map.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -8,36 +8,34 @@
   Lookup2
 begin
 
-fun update :: "'a::order \<Rightarrow> 'b \<Rightarrow> ('a*'b) avl_tree \<Rightarrow> ('a*'b) avl_tree" where
+fun update :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) avl_tree \<Rightarrow> ('a*'b) avl_tree" where
 "update x y Leaf = Node 1 Leaf (x,y) Leaf" |
-"update x y (Node h l (a,b) r) = 
-   (if x = a then Node h l (x,y) r else
-    if x < a then node_bal_l (update x y l) (a,b) r
-    else node_bal_r l (a,b) (update x y r))"
+"update x y (Node h l (a,b) r) = (case cmp x a of
+   EQ \<Rightarrow> Node h l (x,y) r |
+   LT \<Rightarrow> balL (update x y l) (a,b) r |
+   GT \<Rightarrow> balR l (a,b) (update x y r))"
 
-fun delete :: "'a::order \<Rightarrow> ('a*'b) avl_tree \<Rightarrow> ('a*'b) avl_tree" where
+fun delete :: "'a::cmp \<Rightarrow> ('a*'b) avl_tree \<Rightarrow> ('a*'b) avl_tree" where
 "delete _ Leaf = Leaf" |
-"delete x (Node h l (a,b) r) = (
-   if x = a then delete_root (Node h l (a,b) r) else
-   if x < a then node_bal_r (delete x l) (a,b) r
-   else node_bal_l l (a,b) (delete x r))"
+"delete x (Node h l (a,b) r) = (case cmp x a of
+   EQ \<Rightarrow> delete_root (Node h l (a,b) r) |
+   LT \<Rightarrow> balR (delete x l) (a,b) r |
+   GT \<Rightarrow> balL l (a,b) (delete x r))"
 
 
 subsection {* Functional Correctness Proofs *}
 
 theorem inorder_update:
   "sorted1(inorder t) \<Longrightarrow> inorder(update x y t) = upd_list x y (inorder t)"
-by (induct t) 
-   (auto simp: upd_list_simps inorder_node_bal_l inorder_node_bal_r)
+by (induct t) (auto simp: upd_list_simps inorder_balL inorder_balR)
 
 
 theorem inorder_delete:
   "sorted1(inorder t) \<Longrightarrow> inorder (delete x t) = del_list x (inorder t)"
 by(induction t)
-  (auto simp: del_list_simps inorder_node_bal_l inorder_node_bal_r
+  (auto simp: del_list_simps inorder_balL inorder_balR
      inorder_delete_root inorder_delete_maxD split: prod.splits)
 
-
 interpretation Map_by_Ordered
 where empty = Leaf and lookup = lookup and update = update and delete = delete
 and inorder = inorder and wf = "\<lambda>_. True"
--- a/src/HOL/Data_Structures/AVL_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/AVL_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -6,7 +6,7 @@
 section "AVL Tree Implementation of Sets"
 
 theory AVL_Set
-imports Isin2
+imports Cmp Isin2
 begin
 
 type_synonym 'a avl_tree = "('a,nat) tree"
@@ -26,8 +26,8 @@
 definition node :: "'a avl_tree \<Rightarrow> 'a \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
 "node l a r = Node (max (ht l) (ht r) + 1) l a r"
 
-definition node_bal_l :: "'a avl_tree \<Rightarrow> 'a \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
-"node_bal_l l a r = (
+definition balL :: "'a avl_tree \<Rightarrow> 'a \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
+"balL l a r = (
   if ht l = ht r + 2 then (case l of 
     Node _ bl b br \<Rightarrow> (if ht bl < ht br
     then case br of
@@ -35,8 +35,8 @@
     else node bl b (node br a r)))
   else node l a r)"
 
-definition node_bal_r :: "'a avl_tree \<Rightarrow> 'a \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
-"node_bal_r l a r = (
+definition balR :: "'a avl_tree \<Rightarrow> 'a \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
+"balR l a r = (
   if ht r = ht l + 2 then (case r of
     Node _ bl b br \<Rightarrow> (if ht bl > ht br
     then case bl of
@@ -44,19 +44,17 @@
     else node (node l a bl) b br))
   else node l a r)"
 
-fun insert :: "'a::order \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
+fun insert :: "'a::cmp \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
 "insert x Leaf = Node 1 Leaf x Leaf" |
-"insert x (Node h l a r) = 
-   (if x=a then Node h l a r
-    else if x<a
-      then node_bal_l (insert x l) a r
-      else node_bal_r l a (insert x r))"
+"insert x (Node h l a r) = (case cmp x a of
+   EQ \<Rightarrow> Node h l a r |
+   LT \<Rightarrow> balL (insert x l) a r |
+   GT \<Rightarrow> balR l a (insert x r))"
 
 fun delete_max :: "'a avl_tree \<Rightarrow> 'a avl_tree * 'a" where
 "delete_max (Node _ l a Leaf) = (l,a)" |
-"delete_max (Node _ l a r) = (
-  let (r',a') = delete_max r in
-  (node_bal_l l a r', a'))"
+"delete_max (Node _ l a r) =
+  (let (r',a') = delete_max r in (balL l a r', a'))"
 
 lemmas delete_max_induct = delete_max.induct[case_names Leaf Node]
 
@@ -64,16 +62,16 @@
 "delete_root (Node h Leaf a r) = r" |
 "delete_root (Node h l a Leaf) = l" |
 "delete_root (Node h l a r) =
-  (let (l', a') = delete_max l in node_bal_r l' a' r)"
+  (let (l', a') = delete_max l in balR l' a' r)"
 
 lemmas delete_root_cases = delete_root.cases[case_names Leaf_t Node_Leaf Node_Node]
 
-fun delete :: "'a::order \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
+fun delete :: "'a::cmp \<Rightarrow> 'a avl_tree \<Rightarrow> 'a avl_tree" where
 "delete _ Leaf = Leaf" |
-"delete x (Node h l a r) = (
-   if x = a then delete_root (Node h l a r)
-   else if x < a then node_bal_r (delete x l) a r
-   else node_bal_l l a (delete x r))"
+"delete x (Node h l a r) = (case cmp x a of
+   EQ \<Rightarrow> delete_root (Node h l a r) |
+   LT \<Rightarrow> balR (delete x l) a r |
+   GT \<Rightarrow> balL l a (delete x r))"
 
 
 subsection {* Functional Correctness Proofs *}
@@ -83,18 +81,18 @@
 
 subsubsection "Proofs for insert"
 
-lemma inorder_node_bal_l:
-  "inorder (node_bal_l l a r) = inorder l @ a # inorder r"
-by (auto simp: node_def node_bal_l_def split:tree.splits)
+lemma inorder_balL:
+  "inorder (balL l a r) = inorder l @ a # inorder r"
+by (auto simp: node_def balL_def split:tree.splits)
 
-lemma inorder_node_bal_r:
-  "inorder (node_bal_r l a r) = inorder l @ a # inorder r"
-by (auto simp: node_def node_bal_r_def split:tree.splits)
+lemma inorder_balR:
+  "inorder (balR l a r) = inorder l @ a # inorder r"
+by (auto simp: node_def balR_def split:tree.splits)
 
 theorem inorder_insert:
   "sorted(inorder t) \<Longrightarrow> inorder(insert x t) = ins_list x (inorder t)"
 by (induct t) 
-   (auto simp: ins_list_simps inorder_node_bal_l inorder_node_bal_r)
+   (auto simp: ins_list_simps inorder_balL inorder_balR)
 
 
 subsubsection "Proofs for delete"
@@ -103,17 +101,17 @@
   "\<lbrakk> delete_max t = (t',a); t \<noteq> Leaf \<rbrakk> \<Longrightarrow>
    inorder t' @ [a] = inorder t"
 by(induction t arbitrary: t' rule: delete_max.induct)
-  (auto simp: inorder_node_bal_l split: prod.splits tree.split)
+  (auto simp: inorder_balL split: prod.splits tree.split)
 
 lemma inorder_delete_root:
   "inorder (delete_root (Node h l a r)) = inorder l @ inorder r"
 by(induction "Node h l a r" arbitrary: l a r h rule: delete_root.induct)
-  (auto simp: inorder_node_bal_r inorder_delete_maxD split: prod.splits)
+  (auto simp: inorder_balR inorder_delete_maxD split: prod.splits)
 
 theorem inorder_delete:
   "sorted(inorder t) \<Longrightarrow> inorder (delete x t) = del_list x (inorder t)"
 by(induction t)
-  (auto simp: del_list_simps inorder_node_bal_l inorder_node_bal_r
+  (auto simp: del_list_simps inorder_balL inorder_balR
     inorder_delete_root inorder_delete_maxD split: prod.splits)
 
 
@@ -121,7 +119,7 @@
 
 interpretation Set_by_Ordered
 where empty = Leaf and isin = isin and insert = insert and delete = delete
-and inorder = inorder and wf = "\<lambda>_. True"
+and inorder = inorder and inv = "\<lambda>_. True"
 proof (standard, goal_cases)
   case 1 show ?case by simp
 next
@@ -145,17 +143,17 @@
 lemma [simp]: "avl t \<Longrightarrow> ht t = height t"
 by (induct t) simp_all
 
-lemma height_node_bal_l:
+lemma height_balL:
   "\<lbrakk> height l = height r + 2; avl l; avl r \<rbrakk> \<Longrightarrow>
-   height (node_bal_l l a r) = height r + 2 \<or>
-   height (node_bal_l l a r) = height r + 3"
-by (cases l) (auto simp:node_def node_bal_l_def split:tree.split)
+   height (balL l a r) = height r + 2 \<or>
+   height (balL l a r) = height r + 3"
+by (cases l) (auto simp:node_def balL_def split:tree.split)
        
-lemma height_node_bal_r:
+lemma height_balR:
   "\<lbrakk> height r = height l + 2; avl l; avl r \<rbrakk> \<Longrightarrow>
-   height (node_bal_r l a r) = height l + 2 \<or>
-   height (node_bal_r l a r) = height l + 3"
-by (cases r) (auto simp add:node_def node_bal_r_def split:tree.split)
+   height (balR l a r) = height l + 2 \<or>
+   height (balR l a r) = height l + 3"
+by (cases r) (auto simp add:node_def balR_def split:tree.split)
 
 lemma [simp]: "height(node l a r) = max (height l) (height r) + 1"
 by (simp add: node_def)
@@ -166,53 +164,53 @@
    \<rbrakk> \<Longrightarrow> avl(node l a r)"
 by (auto simp add:max_def node_def)
 
-lemma height_node_bal_l2:
+lemma height_balL2:
   "\<lbrakk> avl l; avl r; height l \<noteq> height r + 2 \<rbrakk> \<Longrightarrow>
-   height (node_bal_l l a r) = (1 + max (height l) (height r))"
-by (cases l, cases r) (simp_all add: node_bal_l_def)
+   height (balL l a r) = (1 + max (height l) (height r))"
+by (cases l, cases r) (simp_all add: balL_def)
 
-lemma height_node_bal_r2:
+lemma height_balR2:
   "\<lbrakk> avl l;  avl r;  height r \<noteq> height l + 2 \<rbrakk> \<Longrightarrow>
-   height (node_bal_r l a r) = (1 + max (height l) (height r))"
-by (cases l, cases r) (simp_all add: node_bal_r_def)
+   height (balR l a r) = (1 + max (height l) (height r))"
+by (cases l, cases r) (simp_all add: balR_def)
 
-lemma avl_node_bal_l: 
+lemma avl_balL: 
   assumes "avl l" "avl r" and "height l = height r \<or> height l = height r + 1
     \<or> height r = height l + 1 \<or> height l = height r + 2" 
-  shows "avl(node_bal_l l a r)"
+  shows "avl(balL l a r)"
 proof(cases l)
   case Leaf
-  with assms show ?thesis by (simp add: node_def node_bal_l_def)
+  with assms show ?thesis by (simp add: node_def balL_def)
 next
   case (Node ln ll lr lh)
   with assms show ?thesis
   proof(cases "height l = height r + 2")
     case True
     from True Node assms show ?thesis
-      by (auto simp: node_bal_l_def intro!: avl_node split: tree.split) arith+
+      by (auto simp: balL_def intro!: avl_node split: tree.split) arith+
   next
     case False
-    with assms show ?thesis by (simp add: avl_node node_bal_l_def)
+    with assms show ?thesis by (simp add: avl_node balL_def)
   qed
 qed
 
-lemma avl_node_bal_r: 
+lemma avl_balR: 
   assumes "avl l" and "avl r" and "height l = height r \<or> height l = height r + 1
     \<or> height r = height l + 1 \<or> height r = height l + 2" 
-  shows "avl(node_bal_r l a r)"
+  shows "avl(balR l a r)"
 proof(cases r)
   case Leaf
-  with assms show ?thesis by (simp add: node_def node_bal_r_def)
+  with assms show ?thesis by (simp add: node_def balR_def)
 next
   case (Node rn rl rr rh)
   with assms show ?thesis
   proof(cases "height r = height l + 2")
     case True
       from True Node assms show ?thesis
-        by (auto simp: node_bal_r_def intro!: avl_node split: tree.split) arith+
+        by (auto simp: balR_def intro!: avl_node split: tree.split) arith+
   next
     case False
-    with assms show ?thesis by (simp add: node_bal_r_def avl_node)
+    with assms show ?thesis by (simp add: balR_def avl_node)
   qed
 qed
 
@@ -237,10 +235,10 @@
     with Node 1 show ?thesis 
     proof(cases "x<a")
       case True
-      with Node 1 show ?thesis by (auto simp add:avl_node_bal_l)
+      with Node 1 show ?thesis by (auto simp add:avl_balL)
     next
       case False
-      with Node 1 `x\<noteq>a` show ?thesis by (auto simp add:avl_node_bal_r)
+      with Node 1 `x\<noteq>a` show ?thesis by (auto simp add:avl_balR)
     qed
   qed
   case 2
@@ -255,12 +253,12 @@
       case True
       with Node 2 show ?thesis
       proof(cases "height (insert x l) = height r + 2")
-        case False with Node 2 `x < a` show ?thesis by (auto simp: height_node_bal_l2)
+        case False with Node 2 `x < a` show ?thesis by (auto simp: height_balL2)
       next
         case True 
-        hence "(height (node_bal_l (insert x l) a r) = height r + 2) \<or>
-          (height (node_bal_l (insert x l) a r) = height r + 3)" (is "?A \<or> ?B")
-          using Node 2 by (intro height_node_bal_l) simp_all
+        hence "(height (balL (insert x l) a r) = height r + 2) \<or>
+          (height (balL (insert x l) a r) = height r + 3)" (is "?A \<or> ?B")
+          using Node 2 by (intro height_balL) simp_all
         thus ?thesis
         proof
           assume ?A
@@ -275,12 +273,12 @@
       with Node 2 show ?thesis 
       proof(cases "height (insert x r) = height l + 2")
         case False
-        with Node 2 `\<not>x < a` show ?thesis by (auto simp: height_node_bal_r2)
+        with Node 2 `\<not>x < a` show ?thesis by (auto simp: height_balR2)
       next
         case True 
-        hence "(height (node_bal_r l a (insert x r)) = height l + 2) \<or>
-          (height (node_bal_r l a (insert x r)) = height l + 3)"  (is "?A \<or> ?B")
-          using Node 2 by (intro height_node_bal_r) simp_all
+        hence "(height (balR l a (insert x r)) = height l + 2) \<or>
+          (height (balR l a (insert x r)) = height l + 3)"  (is "?A \<or> ?B")
+          using Node 2 by (intro height_balR) simp_all
         thus ?thesis 
         proof
           assume ?A
@@ -306,10 +304,10 @@
   case (Node h l a rh rl b rr)
   case 1
   with Node have "avl l" "avl (fst (delete_max (Node rh rl b rr)))" by auto
-  with 1 Node have "avl (node_bal_l l a (fst (delete_max (Node rh rl b rr))))"
-    by (intro avl_node_bal_l) fastforce+
+  with 1 Node have "avl (balL l a (fst (delete_max (Node rh rl b rr))))"
+    by (intro avl_balL) fastforce+
   thus ?case 
-    by (auto simp: height_node_bal_l height_node_bal_l2
+    by (auto simp: height_balL height_balL2
       linorder_class.max.absorb1 linorder_class.max.absorb2
       split:prod.split)
 next
@@ -318,7 +316,7 @@
   let ?r = "Node rh rl b rr"
   let ?r' = "fst (delete_max ?r)"
   from `avl x` Node 2 have "avl l" and "avl ?r" by simp_all
-  thus ?case using Node 2 height_node_bal_l[of l ?r' a] height_node_bal_l2[of l ?r' a]
+  thus ?case using Node 2 height_balL[of l ?r' a] height_balL2[of l ?r' a]
     apply (auto split:prod.splits simp del:avl.simps) by arith+
 qed auto
 
@@ -337,8 +335,8 @@
          height ?l = height(?l') + 1" by (rule avl_delete_max,simp)+
   with `avl t` Node_Node have "height ?l' = height ?r \<or> height ?l' = height ?r + 1
             \<or> height ?r = height ?l' + 1 \<or> height ?r = height ?l' + 2" by fastforce
-  with `avl ?l'` `avl ?r` have "avl(node_bal_r ?l' (snd(delete_max ?l)) ?r)"
-    by (rule avl_node_bal_r)
+  with `avl ?l'` `avl ?r` have "avl(balR ?l' (snd(delete_max ?l)) ?r)"
+    by (rule avl_balR)
   with Node_Node show ?thesis by (auto split:prod.splits)
 qed simp_all
 
@@ -351,7 +349,7 @@
   let ?l = "Node lh ll ln lr"
   let ?r = "Node rh rl rn rr"
   let ?l' = "fst (delete_max ?l)"
-  let ?t' = "node_bal_r ?l' (snd(delete_max ?l)) ?r"
+  let ?t' = "balR ?l' (snd(delete_max ?l)) ?r"
   from `avl t` and Node_Node have "avl ?r" by simp
   from `avl t` and Node_Node have "avl ?l" by simp
   hence "avl(?l')"  by (rule avl_delete_max,simp)
@@ -360,11 +358,11 @@
   have "height t = height ?t' \<or> height t = height ?t' + 1" using  `avl t` Node_Node
   proof(cases "height ?r = height ?l' + 2")
     case False
-    show ?thesis using l'_height t_height False by (subst  height_node_bal_r2[OF `avl ?l'` `avl ?r` False])+ arith
+    show ?thesis using l'_height t_height False by (subst  height_balR2[OF `avl ?l'` `avl ?r` False])+ arith
   next
     case True
     show ?thesis
-    proof(cases rule: disjE[OF height_node_bal_r[OF True `avl ?l'` `avl ?r`, of "snd (delete_max ?l)"]])
+    proof(cases rule: disjE[OF height_balR[OF True `avl ?l'` `avl ?r`, of "snd (delete_max ?l)"]])
       case 1
       thus ?thesis using l'_height t_height True by arith
     next
@@ -393,10 +391,10 @@
     with Node 1 show ?thesis 
     proof(cases "x<n")
       case True
-      with Node 1 show ?thesis by (auto simp add:avl_node_bal_r)
+      with Node 1 show ?thesis by (auto simp add:avl_balR)
     next
       case False
-      with Node 1 `x\<noteq>n` show ?thesis by (auto simp add:avl_node_bal_l)
+      with Node 1 `x\<noteq>n` show ?thesis by (auto simp add:avl_balL)
     qed
   qed
   case 2
@@ -414,38 +412,38 @@
       case True
       show ?thesis
       proof(cases "height r = height (delete x l) + 2")
-        case False with Node 1 `x < n` show ?thesis by(auto simp: node_bal_r_def)
+        case False with Node 1 `x < n` show ?thesis by(auto simp: balR_def)
       next
         case True 
-        hence "(height (node_bal_r (delete x l) n r) = height (delete x l) + 2) \<or>
-          height (node_bal_r (delete x l) n r) = height (delete x l) + 3" (is "?A \<or> ?B")
-          using Node 2 by (intro height_node_bal_r) auto
+        hence "(height (balR (delete x l) n r) = height (delete x l) + 2) \<or>
+          height (balR (delete x l) n r) = height (delete x l) + 3" (is "?A \<or> ?B")
+          using Node 2 by (intro height_balR) auto
         thus ?thesis 
         proof
           assume ?A
-          with `x < n` Node 2 show ?thesis by(auto simp: node_bal_r_def)
+          with `x < n` Node 2 show ?thesis by(auto simp: balR_def)
         next
           assume ?B
-          with `x < n` Node 2 show ?thesis by(auto simp: node_bal_r_def)
+          with `x < n` Node 2 show ?thesis by(auto simp: balR_def)
         qed
       qed
     next
       case False
       show ?thesis
       proof(cases "height l = height (delete x r) + 2")
-        case False with Node 1 `\<not>x < n` `x \<noteq> n` show ?thesis by(auto simp: node_bal_l_def)
+        case False with Node 1 `\<not>x < n` `x \<noteq> n` show ?thesis by(auto simp: balL_def)
       next
         case True 
-        hence "(height (node_bal_l l n (delete x r)) = height (delete x r) + 2) \<or>
-          height (node_bal_l l n (delete x r)) = height (delete x r) + 3" (is "?A \<or> ?B")
-          using Node 2 by (intro height_node_bal_l) auto
+        hence "(height (balL l n (delete x r)) = height (delete x r) + 2) \<or>
+          height (balL l n (delete x r)) = height (delete x r) + 3" (is "?A \<or> ?B")
+          using Node 2 by (intro height_balL) auto
         thus ?thesis 
         proof
           assume ?A
-          with `\<not>x < n` `x \<noteq> n` Node 2 show ?thesis by(auto simp: node_bal_l_def)
+          with `\<not>x < n` `x \<noteq> n` Node 2 show ?thesis by(auto simp: balL_def)
         next
           assume ?B
-          with `\<not>x < n` `x \<noteq> n` Node 2 show ?thesis by(auto simp: node_bal_l_def)
+          with `\<not>x < n` `x \<noteq> n` Node 2 show ?thesis by(auto simp: balL_def)
         qed
       qed
     qed
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Data_Structures/Cmp.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -0,0 +1,21 @@
+(* Author: Tobias Nipkow *)
+
+section {* Three-Way Comparison *}
+
+theory Cmp
+imports Main
+begin
+
+datatype cmp = LT | EQ | GT
+
+class cmp = linorder +
+fixes cmp :: "'a \<Rightarrow> 'a \<Rightarrow> cmp"
+assumes LT[simp]: "cmp x y = LT \<longleftrightarrow> x < y"
+assumes EQ[simp]: "cmp x y = EQ \<longleftrightarrow> x = y"
+assumes GT[simp]: "cmp x y = GT \<longleftrightarrow> x > y"
+
+lemma case_cmp_if[simp]: "(case c of EQ \<Rightarrow> e | LT \<Rightarrow> l | GT \<Rightarrow> g) =
+  (if c = LT then l else if c = GT then g else e)"
+by(simp split: cmp.split)
+
+end
--- a/src/HOL/Data_Structures/List_Ins_Del.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/List_Ins_Del.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -55,7 +55,7 @@
 "ins_list x (a#xs) =
   (if x < a then x#a#xs else if x=a then a#xs else a # ins_list x xs)"
 
-lemma set_ins_list[simp]: "elems (ins_list x xs) = insert x (elems xs)"
+lemma set_ins_list: "elems (ins_list x xs) = insert x (elems xs)"
 by(induction xs) auto
 
 lemma distinct_if_sorted: "sorted xs \<Longrightarrow> distinct xs"
@@ -86,7 +86,7 @@
 lemma del_list_idem: "x \<notin> elems xs \<Longrightarrow> del_list x xs = xs"
 by (induct xs) simp_all
 
-lemma elems_del_list_eq [simp]:
+lemma elems_del_list_eq:
   "distinct xs \<Longrightarrow> elems (del_list x xs) = elems xs - {x}"
 apply(induct xs)
  apply simp
--- a/src/HOL/Data_Structures/RBT_Map.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/RBT_Map.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -8,25 +8,26 @@
   Lookup2
 begin
 
-fun update :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) rbt \<Rightarrow> ('a*'b) rbt" where
+fun update :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) rbt \<Rightarrow> ('a*'b) rbt" where
 "update x y Leaf = R Leaf (x,y) Leaf" |
-"update x y (B l (a,b) r) =
-  (if x < a then bal (update x y l) (a,b) r else
-   if x > a then bal l (a,b) (update x y r)
-   else B l (x,y) r)" |
-"update x y (R l (a,b) r) =
-  (if x < a then R (update x y l) (a,b) r else
-   if x > a then R l (a,b) (update x y r)
-   else R l (x,y) r)"
+"update x y (B l (a,b) r) = (case cmp x a of
+  LT \<Rightarrow> bal (update x y l) (a,b) r |
+  GT \<Rightarrow> bal l (a,b) (update x y r) |
+  EQ \<Rightarrow> B l (x,y) r)" |
+"update x y (R l (a,b) r) = (case cmp x a of
+  LT \<Rightarrow> R (update x y l) (a,b) r |
+  GT \<Rightarrow> R l (a,b) (update x y r) |
+  EQ \<Rightarrow> R l (x,y) r)"
 
-fun delete :: "'a::linorder \<Rightarrow> ('a*'b)rbt \<Rightarrow> ('a*'b)rbt"
-and deleteL :: "'a::linorder \<Rightarrow> ('a*'b)rbt \<Rightarrow> 'a*'b \<Rightarrow> ('a*'b)rbt \<Rightarrow> ('a*'b)rbt"
-and deleteR :: "'a::linorder \<Rightarrow> ('a*'b)rbt \<Rightarrow> 'a*'b \<Rightarrow> ('a*'b)rbt \<Rightarrow> ('a*'b)rbt"
+fun delete :: "'a::cmp \<Rightarrow> ('a*'b)rbt \<Rightarrow> ('a*'b)rbt"
+and deleteL :: "'a::cmp \<Rightarrow> ('a*'b)rbt \<Rightarrow> 'a*'b \<Rightarrow> ('a*'b)rbt \<Rightarrow> ('a*'b)rbt"
+and deleteR :: "'a::cmp \<Rightarrow> ('a*'b)rbt \<Rightarrow> 'a*'b \<Rightarrow> ('a*'b)rbt \<Rightarrow> ('a*'b)rbt"
 where
 "delete x Leaf = Leaf" |
-"delete x (Node c t1 (a,b) t2) = 
-  (if x < a then deleteL x t1 (a,b) t2 else
-   if x > a then deleteR x t1 (a,b) t2 else combine t1 t2)" |
+"delete x (Node c t1 (a,b) t2) = (case cmp x a of
+  LT \<Rightarrow> deleteL x t1 (a,b) t2 |
+  GT \<Rightarrow> deleteR x t1 (a,b) t2 |
+  EQ \<Rightarrow> combine t1 t2)" |
 "deleteL x (B t1 a t2) b t3 = balL (delete x (B t1 a t2)) b t3" |
 "deleteL x t1 a t2 = R (delete x t1) a t2" |
 "deleteR x t1 a (B t2 b t3) = balR t1 a (delete x (B t2 b t3))" | 
@@ -50,7 +51,6 @@
 by(induction x t1 and x t1 a t2 and x t1 a t2 rule: delete_deleteL_deleteR.induct)
   (auto simp: del_list_simps inorder_combine inorder_balL inorder_balR)
 
-
 interpretation Map_by_Ordered
 where empty = Leaf and lookup = lookup and update = update and delete = delete
 and inorder = inorder and wf = "\<lambda>_. True"
--- a/src/HOL/Data_Structures/RBT_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/RBT_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,26 +5,30 @@
 theory RBT_Set
 imports
   RBT
+  Cmp
   Isin2
 begin
 
-fun insert :: "'a::linorder \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt" where
+fun insert :: "'a::cmp \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt" where
 "insert x Leaf = R Leaf x Leaf" |
-"insert x (B l a r) =
-  (if x < a then bal (insert x l) a r else
-   if x > a then bal l a (insert x r) else B l a r)" |
-"insert x (R l a r) =
-  (if x < a then R (insert x l) a r
-   else if x > a then R l a (insert x r) else R l a r)"
+"insert x (B l a r) = (case cmp x a of
+  LT \<Rightarrow> bal (insert x l) a r |
+  GT \<Rightarrow> bal l a (insert x r) |
+  EQ \<Rightarrow> B l a r)" |
+"insert x (R l a r) = (case cmp x a of
+  LT \<Rightarrow> R (insert x l) a r |
+  GT \<Rightarrow> R l a (insert x r) |
+  EQ \<Rightarrow> R l a r)"
 
-fun delete :: "'a::linorder \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt"
-and deleteL :: "'a::linorder \<Rightarrow> 'a rbt \<Rightarrow> 'a \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt"
-and deleteR :: "'a::linorder \<Rightarrow> 'a rbt \<Rightarrow> 'a \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt"
+fun delete :: "'a::cmp \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt"
+and deleteL :: "'a::cmp \<Rightarrow> 'a rbt \<Rightarrow> 'a \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt"
+and deleteR :: "'a::cmp \<Rightarrow> 'a rbt \<Rightarrow> 'a \<Rightarrow> 'a rbt \<Rightarrow> 'a rbt"
 where
 "delete x Leaf = Leaf" |
-"delete x (Node _ l a r) = 
-  (if x < a then deleteL x l a r 
-   else if x > a then deleteR x l a r else combine l r)" |
+"delete x (Node _ l a r) = (case cmp x a of
+  LT \<Rightarrow> deleteL x l a r |
+  GT \<Rightarrow> deleteR x l a r |
+  EQ \<Rightarrow> combine l r)" |
 "deleteL x (B t1 a t2) b t3 = balL (delete x (B t1 a t2)) b t3" |
 "deleteL x l a r = R (delete x l) a r" |
 "deleteR x t1 a (B t2 b t3) = balR t1 a (delete x (B t2 b t3))" | 
@@ -66,9 +70,10 @@
 by(induction x t and x l a r and x l a r rule: delete_deleteL_deleteR.induct)
   (auto simp: del_list_simps inorder_combine inorder_balL inorder_balR)
 
+
 interpretation Set_by_Ordered
 where empty = Leaf and isin = isin and insert = insert and delete = delete
-and inorder = inorder and wf = "\<lambda>_. True"
+and inorder = inorder and inv = "\<lambda>_. True"
 proof (standard, goal_cases)
   case 1 show ?case by simp
 next
--- a/src/HOL/Data_Structures/Set_by_Ordered.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Set_by_Ordered.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -27,36 +27,36 @@
 fixes delete :: "'a \<Rightarrow> 't \<Rightarrow> 't"
 fixes isin :: "'t \<Rightarrow> 'a \<Rightarrow> bool"
 fixes inorder :: "'t \<Rightarrow> 'a list"
-fixes wf :: "'t \<Rightarrow> bool"
+fixes inv :: "'t \<Rightarrow> bool"
 assumes empty: "inorder empty = []"
-assumes isin: "wf t \<and> sorted(inorder t) \<Longrightarrow>
+assumes isin: "inv t \<and> sorted(inorder t) \<Longrightarrow>
   isin t x = (x \<in> elems (inorder t))"
-assumes insert: "wf t \<and> sorted(inorder t) \<Longrightarrow>
+assumes insert: "inv t \<and> sorted(inorder t) \<Longrightarrow>
   inorder(insert x t) = ins_list x (inorder t)"
-assumes delete: "wf t \<and> sorted(inorder t) \<Longrightarrow>
+assumes delete: "inv t \<and> sorted(inorder t) \<Longrightarrow>
   inorder(delete x t) = del_list x (inorder t)"
-assumes wf_empty:  "wf empty"
-assumes wf_insert: "wf t \<and> sorted(inorder t) \<Longrightarrow> wf(insert x t)"
-assumes wf_delete: "wf t \<and> sorted(inorder t) \<Longrightarrow> wf(delete x t)"
+assumes inv_empty:  "inv empty"
+assumes inv_insert: "inv t \<and> sorted(inorder t) \<Longrightarrow> inv(insert x t)"
+assumes inv_delete: "inv t \<and> sorted(inorder t) \<Longrightarrow> inv(delete x t)"
 begin
 
 sublocale Set
-  empty insert delete isin "elems o inorder" "\<lambda>t. wf t \<and> sorted(inorder t)"
+  empty insert delete isin "elems o inorder" "\<lambda>t. inv t \<and> sorted(inorder t)"
 proof(standard, goal_cases)
   case 1 show ?case by (auto simp: empty)
 next
   case 2 thus ?case by(simp add: isin)
 next
-  case 3 thus ?case by(simp add: insert)
+  case 3 thus ?case by(simp add: insert set_ins_list)
 next
-  case (4 s x) show ?case
-    using delete[OF 4, of x] 4 by (auto simp: distinct_if_sorted)
+  case (4 s x) thus ?case
+    using delete[OF 4, of x] by (auto simp: distinct_if_sorted elems_del_list_eq)
 next
-  case 5 thus ?case by(simp add: empty wf_empty)
+  case 5 thus ?case by(simp add: empty inv_empty)
 next
-  case 6 thus ?case by(simp add: insert wf_insert sorted_ins_list)
+  case 6 thus ?case by(simp add: insert inv_insert sorted_ins_list)
 next
-  case 7 thus ?case by (auto simp: delete wf_delete sorted_del_list)
+  case 7 thus ?case by (auto simp: delete inv_delete sorted_del_list)
 qed
 
 end
--- a/src/HOL/Data_Structures/Splay_Map.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Splay_Map.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -42,35 +42,30 @@
 termination splay
 by lexicographic_order
 
-lemma splay_code: "splay x t = (case t of Leaf \<Rightarrow> Leaf |
-  Node al a ar \<Rightarrow>
-  (if x = fst a then t else
-   if x < fst a then
-     case al of
-       Leaf \<Rightarrow> t |
-       Node bl b br \<Rightarrow>
-         (if x = fst b then Node bl b (Node br a ar) else
-          if x < fst b then
-            if bl = Leaf then Node bl b (Node br a ar)
-            else case splay x bl of
-                   Node bll y blr \<Rightarrow> Node bll y (Node blr b (Node br a ar))
-          else
-          if br = Leaf then Node bl b (Node br a ar)
-          else case splay x br of
-                 Node brl y brr \<Rightarrow> Node (Node bl b brl) y (Node brr a ar))
-   else
-   case ar of
-     Leaf \<Rightarrow> t |
-     Node bl b br \<Rightarrow>
-       (if x = fst b then Node (Node al a bl) b br else
-        if x < fst b then
-          if bl = Leaf then Node (Node al a bl) b br
-          else case splay x bl of
-                 Node bll y blr \<Rightarrow> Node (Node al a bll) y (Node blr b br)
-        else if br=Leaf then Node (Node al a bl) b br
-             else case splay x br of
-                    Node bll y blr \<Rightarrow> Node (Node (Node al a bl) b bll) y blr)))"
-by(auto split: tree.split)
+lemma splay_code: "splay (x::_::cmp) t = (case t of Leaf \<Rightarrow> Leaf |
+  Node al a ar \<Rightarrow> (case cmp x (fst a) of
+    EQ \<Rightarrow> t |
+    LT \<Rightarrow> (case al of
+      Leaf \<Rightarrow> t |
+      Node bl b br \<Rightarrow> (case cmp x (fst b) of
+        EQ \<Rightarrow> Node bl b (Node br a ar) |
+        LT \<Rightarrow> if bl = Leaf then Node bl b (Node br a ar)
+              else case splay x bl of
+                Node bll y blr \<Rightarrow> Node bll y (Node blr b (Node br a ar)) |
+        GT \<Rightarrow> if br = Leaf then Node bl b (Node br a ar)
+              else case splay x br of
+                Node brl y brr \<Rightarrow> Node (Node bl b brl) y (Node brr a ar))) |
+    GT \<Rightarrow> (case ar of
+      Leaf \<Rightarrow> t |
+      Node bl b br \<Rightarrow> (case cmp x (fst b) of
+        EQ \<Rightarrow> Node (Node al a bl) b br |
+        LT \<Rightarrow> if bl = Leaf then Node (Node al a bl) b br
+              else case splay x bl of
+                Node bll y blr \<Rightarrow> Node (Node al a bll) y (Node blr b br) |
+        GT \<Rightarrow> if br=Leaf then Node (Node al a bl) b br
+              else case splay x br of
+                Node bll y blr \<Rightarrow> Node (Node (Node al a bl) b bll) y blr))))"
+by(auto cong: case_tree_cong split: tree.split)
 
 definition lookup :: "('a*'b)tree \<Rightarrow> 'a::linorder \<Rightarrow> 'b option" where "lookup t x =
   (case splay x t of Leaf \<Rightarrow> None | Node _ (a,b) _ \<Rightarrow> if x=a then Some b else None)"
--- a/src/HOL/Data_Structures/Splay_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Splay_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,6 +1,6 @@
 (*
 Author: Tobias Nipkow
-Function defs follows AFP entry Splay_Tree, proofs are new.
+Function defs follow AFP entry Splay_Tree, proofs are new.
 *)
 
 section "Splay Tree Implementation of Sets"
@@ -9,6 +9,7 @@
 imports
   "~~/src/HOL/Library/Tree"
   Set_by_Ordered
+  Cmp
 begin
 
 function splay :: "'a::linorder \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
@@ -45,35 +46,35 @@
 termination splay
 by lexicographic_order
 
-lemma splay_code: "splay x t = (case t of Leaf \<Rightarrow> Leaf |
-  Node al a ar \<Rightarrow>
-  (if x=a then t else
-   if x < a then
-     case al of
-       Leaf \<Rightarrow> t |
-       Node bl b br \<Rightarrow>
-         (if x=b then Node bl b (Node br a ar) else
-          if x < b then
-            if bl = Leaf then Node bl b (Node br a ar)
-            else case splay x bl of
-                   Node bll y blr \<Rightarrow> Node bll y (Node blr b (Node br a ar))
-          else
-          if br = Leaf then Node bl b (Node br a ar)
-          else case splay x br of
-                 Node brl y brr \<Rightarrow> Node (Node bl b brl) y (Node brr a ar))
-   else
-   case ar of
-     Leaf \<Rightarrow> t |
-     Node bl b br \<Rightarrow>
-       (if x=b then Node (Node al a bl) b br else
-        if x < b then
-          if bl = Leaf then Node (Node al a bl) b br
-          else case splay x bl of
-                 Node bll y blr \<Rightarrow> Node (Node al a bll) y (Node blr b br)
-        else if br=Leaf then Node (Node al a bl) b br
-             else case splay x br of
-                    Node bll y blr \<Rightarrow> Node (Node (Node al a bl) b bll) y blr)))"
-by(auto split: tree.split)
+(* no idea why this speeds things up below *)
+lemma case_tree_cong:
+  "\<lbrakk> x = x'; y = y'; z = z' \<rbrakk> \<Longrightarrow> case_tree x y z = case_tree x' y' z'"
+by auto
+
+lemma splay_code: "splay (x::_::cmp) t = (case t of Leaf \<Rightarrow> Leaf |
+  Node al a ar \<Rightarrow> (case cmp x a of
+    EQ \<Rightarrow> t |
+    LT \<Rightarrow> (case al of
+      Leaf \<Rightarrow> t |
+      Node bl b br \<Rightarrow> (case cmp x b of
+        EQ \<Rightarrow> Node bl b (Node br a ar) |
+        LT \<Rightarrow> if bl = Leaf then Node bl b (Node br a ar)
+              else case splay x bl of
+                Node bll y blr \<Rightarrow> Node bll y (Node blr b (Node br a ar)) |
+        GT \<Rightarrow> if br = Leaf then Node bl b (Node br a ar)
+              else case splay x br of
+                Node brl y brr \<Rightarrow> Node (Node bl b brl) y (Node brr a ar))) |
+    GT \<Rightarrow> (case ar of
+      Leaf \<Rightarrow> t |
+      Node bl b br \<Rightarrow> (case cmp x b of
+        EQ \<Rightarrow> Node (Node al a bl) b br |
+        LT \<Rightarrow> if bl = Leaf then Node (Node al a bl) b br
+              else case splay x bl of
+                Node bll y blr \<Rightarrow> Node (Node al a bll) y (Node blr b br) |
+        GT \<Rightarrow> if br=Leaf then Node (Node al a bl) b br
+              else case splay x br of
+                Node bll y blr \<Rightarrow> Node (Node (Node al a bl) b bll) y blr))))"
+by(auto cong: case_tree_cong split: tree.split)
 
 definition is_root :: "'a \<Rightarrow> 'a tree \<Rightarrow> bool" where
 "is_root a t = (case t of Leaf \<Rightarrow> False | Node _ x _ \<Rightarrow> x = a)"
@@ -197,7 +198,7 @@
 
 interpretation Set_by_Ordered
 where empty = Leaf and isin = isin and insert = insert
-and delete = delete and inorder = inorder and wf = "\<lambda>_. True"
+and delete = delete and inorder = inorder and inv = "\<lambda>_. True"
 proof (standard, goal_cases)
   case 2 thus ?case by(simp add: isin_set)
 next
--- a/src/HOL/Data_Structures/Tree234_Map.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Tree234_Map.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -10,118 +10,105 @@
 
 subsection \<open>Map operations on 2-3-4 trees\<close>
 
-fun lookup :: "('a::linorder * 'b) tree234 \<Rightarrow> 'a \<Rightarrow> 'b option" where
+fun lookup :: "('a::cmp * 'b) tree234 \<Rightarrow> 'a \<Rightarrow> 'b option" where
 "lookup Leaf x = None" |
-"lookup (Node2 l (a,b) r) x =
-  (if x < a then lookup l x else
-  if a < x then lookup r x else Some b)" |
-"lookup (Node3 l (a1,b1) m (a2,b2) r) x =
-  (if x < a1 then lookup l x else
-   if x = a1 then Some b1 else
-   if x < a2 then lookup m x else
-   if x = a2 then Some b2
-   else lookup r x)" |
-"lookup (Node4 l (a1,b1) m (a2,b2) n (a3,b3) r) x =
-  (if x < a2 then
-     if x = a1 then Some b1 else
-     if x < a1 then lookup l x else lookup m x
-   else
-     if x = a2 then Some b2 else
-     if x = a3 then Some b3 else
-     if x < a3 then lookup n x
-     else lookup r x)"
+"lookup (Node2 l (a,b) r) x = (case cmp x a of
+  LT \<Rightarrow> lookup l x |
+  GT \<Rightarrow> lookup r x |
+  EQ \<Rightarrow> Some b)" |
+"lookup (Node3 l (a1,b1) m (a2,b2) r) x = (case cmp x a1 of
+  LT \<Rightarrow> lookup l x |
+  EQ \<Rightarrow> Some b1 |
+  GT \<Rightarrow> (case cmp x a2 of
+          LT \<Rightarrow> lookup m x |
+          EQ \<Rightarrow> Some b2 |
+          GT \<Rightarrow> lookup r x))" |
+"lookup (Node4 t1 (a1,b1) t2 (a2,b2) t3 (a3,b3) t4) x = (case cmp x a2 of
+  LT \<Rightarrow> (case cmp x a1 of
+           LT \<Rightarrow> lookup t1 x | EQ \<Rightarrow> Some b1 | GT \<Rightarrow> lookup t2 x) |
+  EQ \<Rightarrow> Some b2 |
+  GT \<Rightarrow> (case cmp x a3 of
+           LT \<Rightarrow> lookup t3 x | EQ \<Rightarrow> Some b3 | GT \<Rightarrow> lookup t4 x))"
 
-fun upd :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) up\<^sub>i" where
+fun upd :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) up\<^sub>i" where
 "upd x y Leaf = Up\<^sub>i Leaf (x,y) Leaf" |
-"upd x y (Node2 l ab r) =
-   (if x < fst ab then
-        (case upd x y l of
+"upd x y (Node2 l ab r) = (case cmp x (fst ab) of
+   LT \<Rightarrow> (case upd x y l of
            T\<^sub>i l' => T\<^sub>i (Node2 l' ab r)
-         | Up\<^sub>i l1 q l2 => T\<^sub>i (Node3 l1 q l2 ab r))
-    else if x = fst ab then T\<^sub>i (Node2 l (x,y) r)
-    else
-        (case upd x y r of
+         | Up\<^sub>i l1 ab' l2 => T\<^sub>i (Node3 l1 ab' l2 ab r)) |
+   EQ \<Rightarrow> T\<^sub>i (Node2 l (x,y) r) |
+   GT \<Rightarrow> (case upd x y r of
            T\<^sub>i r' => T\<^sub>i (Node2 l ab r')
-         | Up\<^sub>i r1 q r2 => T\<^sub>i (Node3 l ab r1 q r2)))" |
-"upd x y (Node3 l ab1 m ab2 r) =
-   (if x < fst ab1 then
-        (case upd x y l of
+         | Up\<^sub>i r1 ab' r2 => T\<^sub>i (Node3 l ab r1 ab' r2)))" |
+"upd x y (Node3 l ab1 m ab2 r) = (case cmp x (fst ab1) of
+   LT \<Rightarrow> (case upd x y l of
            T\<^sub>i l' => T\<^sub>i (Node3 l' ab1 m ab2 r)
-         | Up\<^sub>i l1 q l2 => Up\<^sub>i (Node2 l1 q l2) ab1 (Node2 m ab2 r))
-    else if x = fst ab1 then T\<^sub>i (Node3 l (x,y) m ab2 r)
-    else if x < fst ab2 then
-             (case upd x y m of
-                T\<^sub>i m' => T\<^sub>i (Node3 l ab1 m' ab2 r)
-              | Up\<^sub>i m1 q m2 => Up\<^sub>i (Node2 l ab1 m1) q (Node2 m2 ab2 r))
-         else if x = fst ab2 then T\<^sub>i (Node3 l ab1 m (x,y) r)
-         else
-             (case upd x y r of
-                T\<^sub>i r' => T\<^sub>i (Node3 l ab1 m ab2 r')
-              | Up\<^sub>i r1 q r2 => Up\<^sub>i (Node2 l ab1 m) ab2 (Node2 r1 q r2)))" |
-"upd x y (Node4 l ab1 m ab2 n ab3 r) =
-   (if x < fst ab2 then
-      if x < fst ab1 then
-        (case upd x y l of
-           T\<^sub>i l' => T\<^sub>i (Node4 l' ab1 m ab2 n ab3 r)
-         | Up\<^sub>i l1 q l2 => Up\<^sub>i (Node2 l1 q l2) ab1 (Node3 m ab2 n ab3 r))
-      else
-      if x = fst ab1 then T\<^sub>i (Node4 l (x,y) m ab2 n ab3 r)
-      else
-        (case upd x y m of
-           T\<^sub>i m' => T\<^sub>i (Node4 l ab1 m' ab2 n ab3 r)
-         | Up\<^sub>i m1 q m2 => Up\<^sub>i (Node2 l ab1 m1) q (Node3 m2 ab2 n ab3 r))
-    else
-    if x = fst ab2 then T\<^sub>i (Node4 l ab1 m (x,y) n ab3 r) else
-    if x < fst ab3 then
-      (case upd x y n of
-         T\<^sub>i n' => T\<^sub>i (Node4 l ab1 m ab2 n' ab3 r)
-       | Up\<^sub>i n1 q n2 => Up\<^sub>i (Node2 l ab1 m) ab2(*q*) (Node3 n1 q n2 ab3 r))
-    else
-    if x = fst ab3 then T\<^sub>i (Node4 l ab1 m ab2 n (x,y) r)
-    else
-      (case upd x y r of
-         T\<^sub>i r' => T\<^sub>i (Node4 l ab1 m ab2 n ab3 r')
-       | Up\<^sub>i r1 q r2 => Up\<^sub>i (Node2 l ab1 m) ab2 (Node3 n ab3 r1 q r2)))"
+         | Up\<^sub>i l1 ab' l2 => Up\<^sub>i (Node2 l1 ab' l2) ab1 (Node2 m ab2 r)) |
+   EQ \<Rightarrow> T\<^sub>i (Node3 l (x,y) m ab2 r) |
+   GT \<Rightarrow> (case cmp x (fst ab2) of
+           LT \<Rightarrow> (case upd x y m of
+                   T\<^sub>i m' => T\<^sub>i (Node3 l ab1 m' ab2 r)
+                 | Up\<^sub>i m1 ab' m2 => Up\<^sub>i (Node2 l ab1 m1) ab' (Node2 m2 ab2 r)) |
+           EQ \<Rightarrow> T\<^sub>i (Node3 l ab1 m (x,y) r) |
+           GT \<Rightarrow> (case upd x y r of
+                   T\<^sub>i r' => T\<^sub>i (Node3 l ab1 m ab2 r')
+                 | Up\<^sub>i r1 ab' r2 => Up\<^sub>i (Node2 l ab1 m) ab2 (Node2 r1 ab' r2))))" |
+"upd x y (Node4 t1 ab1 t2 ab2 t3 ab3 t4) = (case cmp x (fst ab2) of
+   LT \<Rightarrow> (case cmp x (fst ab1) of
+            LT \<Rightarrow> (case upd x y t1 of
+                     T\<^sub>i t1' => T\<^sub>i (Node4 t1' ab1 t2 ab2 t3 ab3 t4)
+                  | Up\<^sub>i t11 q t12 => Up\<^sub>i (Node2 t11 q t12) ab1 (Node3 t2 ab2 t3 ab3 t4)) |
+            EQ \<Rightarrow> T\<^sub>i (Node4 t1 (x,y) t2 ab2 t3 ab3 t4) |
+            GT \<Rightarrow> (case upd x y t2 of
+                    T\<^sub>i t2' => T\<^sub>i (Node4 t1 ab1 t2' ab2 t3 ab3 t4)
+                  | Up\<^sub>i t21 q t22 => Up\<^sub>i (Node2 t1 ab1 t21) q (Node3 t22 ab2 t3 ab3 t4))) |
+   EQ \<Rightarrow> T\<^sub>i (Node4 t1 ab1 t2 (x,y) t3 ab3 t4) |
+   GT \<Rightarrow> (case cmp x (fst ab3) of
+            LT \<Rightarrow> (case upd x y t3 of
+                    T\<^sub>i t3' \<Rightarrow> T\<^sub>i (Node4 t1 ab1 t2 ab2 t3' ab3 t4)
+                  | Up\<^sub>i t31 q t32 => Up\<^sub>i (Node2 t1 ab1 t2) ab2(*q*) (Node3 t31 q t32 ab3 t4)) |
+            EQ \<Rightarrow> T\<^sub>i (Node4 t1 ab1 t2 ab2 t3 (x,y) t4) |
+            GT \<Rightarrow> (case upd x y t4 of
+                    T\<^sub>i t4' => T\<^sub>i (Node4 t1 ab1 t2 ab2 t3 ab3 t4')
+                  | Up\<^sub>i t41 q t42 => Up\<^sub>i (Node2 t1 ab1 t2) ab2 (Node3 t3 ab3 t41 q t42))))"
+
+definition update :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) tree234" where
+"update x y t = tree\<^sub>i(upd x y t)"
 
-definition update :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) tree234" where
-"update a b t = tree\<^sub>i(upd a b t)"
-
-fun del :: "'a::linorder \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) up\<^sub>d"
-where
-"del k Leaf = T\<^sub>d Leaf" |
-"del k (Node2 Leaf p Leaf) = (if k=fst p then Up\<^sub>d Leaf else T\<^sub>d(Node2 Leaf p Leaf))" |
-"del k (Node3 Leaf p Leaf q Leaf) =
-  T\<^sub>d(if k=fst p then Node2 Leaf q Leaf else
-     if k=fst q then Node2 Leaf p Leaf
-     else Node3 Leaf p Leaf q Leaf)" |
-"del k (Node4 Leaf ab1 Leaf ab2 Leaf ab3 Leaf) =
-  T\<^sub>d(if k=fst ab1 then Node3 Leaf ab2 Leaf ab3 Leaf else
-     if k=fst ab2 then Node3 Leaf ab1 Leaf ab3 Leaf else
-     if k=fst ab3 then Node3 Leaf ab1 Leaf ab2 Leaf
+fun del :: "'a::cmp \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) up\<^sub>d" where
+"del x Leaf = T\<^sub>d Leaf" |
+"del x (Node2 Leaf ab1 Leaf) = (if x=fst ab1 then Up\<^sub>d Leaf else T\<^sub>d(Node2 Leaf ab1 Leaf))" |
+"del x (Node3 Leaf ab1 Leaf ab2 Leaf) = T\<^sub>d(if x=fst ab1 then Node2 Leaf ab2 Leaf
+  else if x=fst ab2 then Node2 Leaf ab1 Leaf else Node3 Leaf ab1 Leaf ab2 Leaf)" |
+"del x (Node4 Leaf ab1 Leaf ab2 Leaf ab3 Leaf) =
+  T\<^sub>d(if x = fst ab1 then Node3 Leaf ab2 Leaf ab3 Leaf else
+     if x = fst ab2 then Node3 Leaf ab1 Leaf ab3 Leaf else
+     if x = fst ab3 then Node3 Leaf ab1 Leaf ab2 Leaf
      else Node4 Leaf ab1 Leaf ab2 Leaf ab3 Leaf)" |
-"del k (Node2 l a r) =
-  (if k<fst a then node21 (del k l) a r else
-   if k > fst a then node22 l a (del k r)
-   else let (a',t) = del_min r in node22 l a' t)" |
-"del k (Node3 l a m b r) =
-  (if k<fst a then node31 (del k l) a m b r else
-   if k = fst a then let (a',m') = del_min m in node32 l a' m' b r else
-   if k < fst b then node32 l a (del k m) b r else
-   if k = fst b then let (b',r') = del_min r in node33 l a m b' r'
-   else node33 l a m b (del k r))" |
-"del x (Node4 l ab1 m ab2 n ab3 r) =
-  (if x < fst ab2 then
-     if x < fst ab1 then node41 (del x l) ab1 m ab2 n ab3 r else
-     if x = fst ab1 then let (ab',m') = del_min m in node42 l ab' m' ab2 n ab3 r
-     else node42 l ab1 (del x m) ab2 n ab3 r
-   else
-     if x = fst ab2 then let (ab',n') = del_min n in node43 l ab1 m ab' n' ab3 r else
-     if x < fst ab3 then node43 l ab1 m ab2 (del x n) ab3 r else
-     if x = fst ab3 then let (ab',r') = del_min r in node44 l ab1 m ab2 n ab' r'
-     else node44 l ab1 m ab2 n ab3 (del x r))"
+"del x (Node2 l ab1 r) = (case cmp x (fst ab1) of
+  LT \<Rightarrow> node21 (del x l) ab1 r |
+  GT \<Rightarrow> node22 l ab1 (del x r) |
+  EQ \<Rightarrow> let (ab1',t) = del_min r in node22 l ab1' t)" |
+"del x (Node3 l ab1 m ab2 r) = (case cmp x (fst ab1) of
+  LT \<Rightarrow> node31 (del x l) ab1 m ab2 r |
+  EQ \<Rightarrow> let (ab1',m') = del_min m in node32 l ab1' m' ab2 r |
+  GT \<Rightarrow> (case cmp x (fst ab2) of
+           LT \<Rightarrow> node32 l ab1 (del x m) ab2 r |
+           EQ \<Rightarrow> let (ab2',r') = del_min r in node33 l ab1 m ab2' r' |
+           GT \<Rightarrow> node33 l ab1 m ab2 (del x r)))" |
+"del x (Node4 t1 ab1 t2 ab2 t3 ab3 t4) = (case cmp x (fst ab2) of
+  LT \<Rightarrow> (case cmp x (fst ab1) of
+           LT \<Rightarrow> node41 (del x t1) ab1 t2 ab2 t3 ab3 t4 |
+           EQ \<Rightarrow> let (ab',t2') = del_min t2 in node42 t1 ab' t2' ab2 t3 ab3 t4 |
+           GT \<Rightarrow> node42 t1 ab1 (del x t2) ab2 t3 ab3 t4) |
+  EQ \<Rightarrow> let (ab',t3') = del_min t3 in node43 t1 ab1 t2 ab' t3' ab3 t4 |
+  GT \<Rightarrow> (case cmp x (fst ab3) of
+          LT \<Rightarrow> node43 t1 ab1 t2 ab2 (del x t3) ab3 t4 |
+          EQ \<Rightarrow> let (ab',t4') = del_min t4 in node44 t1 ab1 t2 ab2 t3 ab' t4' |
+          GT \<Rightarrow> node44 t1 ab1 t2 ab2 t3 ab3 (del x t4)))"
 
-definition delete :: "'a::linorder \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) tree234" where
-"delete k t = tree\<^sub>d(del k t)"
+definition delete :: "'a::cmp \<Rightarrow> ('a*'b) tree234 \<Rightarrow> ('a*'b) tree234" where
+"delete x t = tree\<^sub>d(del x t)"
 
 
 subsection "Functional correctness"
@@ -144,7 +131,7 @@
   inorder(tree\<^sub>d (del x t)) = del_list x (inorder t)"
 by(induction t rule: del.induct)
   ((auto simp: del_list_simps inorder_nodes del_minD split: prod.splits)[1])+
-(* 290 secs (2015) *)
+(* 200 secs (2015) *)
 
 lemma inorder_delete: "\<lbrakk> bal t ; sorted1(inorder t) \<rbrakk> \<Longrightarrow>
   inorder(delete x t) = del_list x (inorder t)"
@@ -154,7 +141,7 @@
 subsection \<open>Balancedness\<close>
 
 lemma bal_upd: "bal t \<Longrightarrow> bal (tree\<^sub>i(upd x y t)) \<and> height(upd x y t) = height t"
-by (induct t) (auto, auto split: up\<^sub>i.split) (* 33 secs (2015) *)
+by (induct t) (auto, auto split: up\<^sub>i.split) (* 20 secs (2015) *)
 
 lemma bal_update: "bal t \<Longrightarrow> bal (update x y t)"
 by (simp add: update_def bal_upd)
@@ -163,11 +150,12 @@
 lemma height_del: "bal t \<Longrightarrow> height(del x t) = height t"
 by(induction x t rule: del.induct)
   (auto simp add: heights height_del_min split: prod.split)
+(* 20 secs (2015) *)
 
 lemma bal_tree\<^sub>d_del: "bal t \<Longrightarrow> bal(tree\<^sub>d(del x t))"
 by(induction x t rule: del.induct)
   (auto simp: bals bal_del_min height_del height_del_min split: prod.split)
-(* 110 secs (2015) *)
+(* 100 secs (2015) *)
 
 corollary bal_delete: "bal t \<Longrightarrow> bal(delete x t)"
 by(simp add: delete_def bal_tree\<^sub>d_del)
--- a/src/HOL/Data_Structures/Tree234_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Tree234_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,19 +5,29 @@
 theory Tree234_Set
 imports
   Tree234
+  Cmp
   "../Data_Structures/Set_by_Ordered"
 begin
 
 subsection \<open>Set operations on 2-3-4 trees\<close>
 
-fun isin :: "'a::linorder tree234 \<Rightarrow> 'a \<Rightarrow> bool" where
+fun isin :: "'a::cmp tree234 \<Rightarrow> 'a \<Rightarrow> bool" where
 "isin Leaf x = False" |
-"isin (Node2 l a r) x = (x < a \<and> isin l x \<or> x=a \<or> isin r x)" |
+"isin (Node2 l a r) x =
+  (case cmp x a of LT \<Rightarrow> isin l x | EQ \<Rightarrow> True | GT \<Rightarrow> isin r x)" |
 "isin (Node3 l a m b r) x =
-  (x < a \<and> isin l x \<or> x = a \<or> x < b \<and> isin m x \<or> x = b \<or> isin r x)" |
-"isin (Node4 l a m b n c r) x =
-  (x < b \<and> (x < a \<and> isin l x \<or> x = a \<or> isin m x) \<or> x = b \<or>
-   x > b \<and> (x < c \<and> isin n x \<or> x=c \<or> isin r x))"
+  (case cmp x a of LT \<Rightarrow> isin l x | EQ \<Rightarrow> True | GT \<Rightarrow> (case cmp x b of
+   LT \<Rightarrow> isin m x | EQ \<Rightarrow> True | GT \<Rightarrow> isin r x))" |
+"isin (Node4 t1 a t2 b t3 c t4) x = (case cmp x b of
+  LT \<Rightarrow> (case cmp x a of
+          LT \<Rightarrow> isin t1 x |
+          EQ \<Rightarrow> True |
+          GT \<Rightarrow> isin t2 x) |
+  EQ \<Rightarrow> True |
+  GT \<Rightarrow> (case cmp x c of
+          LT \<Rightarrow> isin t3 x |
+          EQ \<Rightarrow> True |
+          GT \<Rightarrow> isin t4 x))"
 
 datatype 'a up\<^sub>i = T\<^sub>i "'a tree234" | Up\<^sub>i "'a tree234" 'a "'a tree234"
 
@@ -25,33 +35,31 @@
 "tree\<^sub>i (T\<^sub>i t) = t" |
 "tree\<^sub>i (Up\<^sub>i l p r) = Node2 l p r"
 
-fun ins :: "'a::linorder \<Rightarrow> 'a tree234 \<Rightarrow> 'a up\<^sub>i" where
-"ins a Leaf = Up\<^sub>i Leaf a Leaf" |
-"ins a (Node2 l x r) =
-   (if a < x then
-        (case ins a l of
-           T\<^sub>i l' => T\<^sub>i (Node2 l' x r)
-         | Up\<^sub>i l1 q l2 => T\<^sub>i (Node3 l1 q l2 x r))
-    else if a=x then T\<^sub>i (Node2 l x r)
-    else
-        (case ins a r of
-           T\<^sub>i r' => T\<^sub>i (Node2 l x r')
-         | Up\<^sub>i r1 q r2 => T\<^sub>i (Node3 l x r1 q r2)))" |
-"ins a (Node3 l x1 m x2 r) =
-   (if a < x1 then
-        (case ins a l of
-           T\<^sub>i l' => T\<^sub>i (Node3 l' x1 m x2 r)
-         | Up\<^sub>i l1 q l2 => T\<^sub>i (Node4 l1 q l2 x1 m x2 r))
-    else if a=x1 then T\<^sub>i (Node3 l x1 m x2 r)
-    else if a < x2 then
-             (case ins a m of
-                T\<^sub>i m' => T\<^sub>i (Node3 l x1 m' x2 r)
-              | Up\<^sub>i m1 q m2 => T\<^sub>i (Node4 l x1 m1 q m2 x2 r))
-         else if a=x2 then T\<^sub>i (Node3 l x1 m x2 r)
-         else
-             (case ins a r of
-                T\<^sub>i r' => T\<^sub>i (Node3 l x1 m x2 r')
-              | Up\<^sub>i r1 q r2 => T\<^sub>i (Node4 l x1 m x2 r1 q r2)))" |
+fun ins :: "'a::cmp \<Rightarrow> 'a tree234 \<Rightarrow> 'a up\<^sub>i" where
+"ins x Leaf = Up\<^sub>i Leaf x Leaf" |
+"ins x (Node2 l a r) =
+   (case cmp x a of
+      LT \<Rightarrow> (case ins x l of
+              T\<^sub>i l' => T\<^sub>i (Node2 l' a r)
+            | Up\<^sub>i l1 b l2 => T\<^sub>i (Node3 l1 b l2 a r)) |
+      EQ \<Rightarrow> T\<^sub>i (Node2 l x r) |
+      GT \<Rightarrow> (case ins x r of
+              T\<^sub>i r' => T\<^sub>i (Node2 l a r')
+            | Up\<^sub>i r1 b r2 => T\<^sub>i (Node3 l a r1 b r2)))" |
+"ins x (Node3 l a m b r) =
+   (case cmp x a of
+      LT \<Rightarrow> (case ins x l of
+              T\<^sub>i l' => T\<^sub>i (Node3 l' a m b r)
+            | Up\<^sub>i l1 c l2 => Up\<^sub>i (Node2 l1 c l2) a (Node2 m b r)) |
+      EQ \<Rightarrow> T\<^sub>i (Node3 l a m b r) |
+      GT \<Rightarrow> (case cmp x b of
+               GT \<Rightarrow> (case ins x r of
+                       T\<^sub>i r' => T\<^sub>i (Node3 l a m b r')
+                     | Up\<^sub>i r1 c r2 => Up\<^sub>i (Node2 l a m) b (Node2 r1 c r2)) |
+               EQ \<Rightarrow> T\<^sub>i (Node3 l a m b r) |
+               LT \<Rightarrow> (case ins x m of
+                       T\<^sub>i m' => T\<^sub>i (Node3 l a m' b r)
+                     | Up\<^sub>i m1 c m2 => Up\<^sub>i (Node2 l a m1) c (Node2 m2 b r))))" |
 "ins a (Node4 l x1 m x2 n x3 r) =
    (if a < x2 then
       if a < x1 then
@@ -75,8 +83,8 @@
 
 hide_const insert
 
-definition insert :: "'a::linorder \<Rightarrow> 'a tree234 \<Rightarrow> 'a tree234" where
-"insert a t = tree\<^sub>i(ins a t)"
+definition insert :: "'a::cmp \<Rightarrow> 'a tree234 \<Rightarrow> 'a tree234" where
+"insert x t = tree\<^sub>i(ins x t)"
 
 datatype 'a up\<^sub>d = T\<^sub>d "'a tree234" | Up\<^sub>d "'a tree234"
 
@@ -146,7 +154,7 @@
 "del_min (Node3 l a m b r) = (let (x,l') = del_min l in (x, node31 l' a m b r))" |
 "del_min (Node4 l a m b n c r) = (let (x,l') = del_min l in (x, node41 l' a m b n c r))"
 
-fun del :: "'a::linorder \<Rightarrow> 'a tree234 \<Rightarrow> 'a up\<^sub>d" where
+fun del :: "'a::cmp \<Rightarrow> 'a tree234 \<Rightarrow> 'a up\<^sub>d" where
 "del k Leaf = T\<^sub>d Leaf" |
 "del k (Node2 Leaf p Leaf) = (if k=p then Up\<^sub>d Leaf else T\<^sub>d(Node2 Leaf p Leaf))" |
 "del k (Node3 Leaf p Leaf q Leaf) = T\<^sub>d(if k=p then Node2 Leaf q Leaf
@@ -156,36 +164,38 @@
      if k=b then Node3 Leaf a Leaf c Leaf else
      if k=c then Node3 Leaf a Leaf b Leaf
      else Node4 Leaf a Leaf b Leaf c Leaf)" |
-"del k (Node2 l a r) = (if k<a then node21 (del k l) a r else
-  if k > a then node22 l a (del k r) else
-  let (a',t) = del_min r in node22 l a' t)" |
-"del k (Node3 l a m b r) = (if k<a then node31 (del k l) a m b r else
-  if k = a then let (a',m') = del_min m in node32 l a' m' b r else
-  if k < b then node32 l a (del k m) b r else
-  if k = b then let (b',r') = del_min r in node33 l a m b' r'
-  else node33 l a m b (del k r))" |
-"del k (Node4 l a m b n c r) =
-  (if k < b then
-     if k < a then node41 (del k l) a m b n c r else
-     if k = a then let (a',m') = del_min m in node42 l a' m' b n c r
-     else node42 l a (del k m) b n c r
-   else
-     if k = b then let (b',n') = del_min n in node43 l a m b' n' c r else
-     if k < c then node43 l a m b (del k n) c r else
-     if k = c then let (c',r') = del_min r in node44 l a m b n c' r'
-     else node44 l a m b n c (del k r))"
+"del k (Node2 l a r) = (case cmp k a of
+  LT \<Rightarrow> node21 (del k l) a r |
+  GT \<Rightarrow> node22 l a (del k r) |
+  EQ \<Rightarrow> let (a',t) = del_min r in node22 l a' t)" |
+"del k (Node3 l a m b r) = (case cmp k a of
+  LT \<Rightarrow> node31 (del k l) a m b r |
+  EQ \<Rightarrow> let (a',m') = del_min m in node32 l a' m' b r |
+  GT \<Rightarrow> (case cmp k b of
+           LT \<Rightarrow> node32 l a (del k m) b r |
+           EQ \<Rightarrow> let (b',r') = del_min r in node33 l a m b' r' |
+           GT \<Rightarrow> node33 l a m b (del k r)))" |
+"del k (Node4 l a m b n c r) = (case cmp k b of
+  LT \<Rightarrow> (case cmp k a of
+          LT \<Rightarrow> node41 (del k l) a m b n c r |
+          EQ \<Rightarrow> let (a',m') = del_min m in node42 l a' m' b n c r |
+          GT \<Rightarrow> node42 l a (del k m) b n c r) |
+  EQ \<Rightarrow> let (b',n') = del_min n in node43 l a m b' n' c r |
+  GT \<Rightarrow> (case cmp k c of
+           LT \<Rightarrow> node43 l a m b (del k n) c r |
+           EQ \<Rightarrow> let (c',r') = del_min r in node44 l a m b n c' r' |
+           GT \<Rightarrow> node44 l a m b n c (del k r)))"
 
-definition delete :: "'a::linorder \<Rightarrow> 'a tree234 \<Rightarrow> 'a tree234" where
-"delete k t = tree\<^sub>d(del k t)"
+definition delete :: "'a::cmp \<Rightarrow> 'a tree234 \<Rightarrow> 'a tree234" where
+"delete x t = tree\<^sub>d(del x t)"
 
 
 subsection "Functional correctness"
 
-
 subsubsection \<open>Functional correctness of isin:\<close>
 
 lemma "sorted(inorder t) \<Longrightarrow> isin t x = (x \<in> elems (inorder t))"
-by (induction t) (auto simp: elems_simps1)
+by (induction t) (auto simp: elems_simps1 ball_Un)
 
 lemma isin_set: "sorted(inorder t) \<Longrightarrow> isin t x = (x \<in> elems (inorder t))"
 by (induction t) (auto simp: elems_simps2)
@@ -252,12 +262,9 @@
 
 lemma inorder_del: "\<lbrakk> bal t ; sorted(inorder t) \<rbrakk> \<Longrightarrow>
   inorder(tree\<^sub>d (del x t)) = del_list x (inorder t)"
-apply(induction t rule: del.induct)
-apply(simp_all add: del_list_simps inorder_nodes)
-apply(auto simp: del_list_simps;
-      auto simp: inorder_nodes del_list_simps del_minD split: prod.splits)+
-(* takes 285 s (2015); the last line alone would do it but takes hours *)
-done
+by(induction t rule: del.induct)
+  (auto simp: inorder_nodes del_list_simps del_minD split: prod.splits)
+  (* 150 secs (2015) *)
 
 lemma inorder_delete: "\<lbrakk> bal t ; sorted(inorder t) \<rbrakk> \<Longrightarrow>
   inorder(delete x t) = del_list x (inorder t)"
@@ -282,7 +289,7 @@
 end
 
 lemma bal_ins: "bal t \<Longrightarrow> bal (tree\<^sub>i(ins a t)) \<and> height(ins a t) = height t"
-by (induct t) (auto, auto split: up\<^sub>i.split) (* 29 secs (2015) *)
+by (induct t) (auto, auto split: up\<^sub>i.split) (* 20 secs (2015) *)
 
 
 text{* Now an alternative proof (by Brian Huffman) that runs faster because
@@ -344,9 +351,7 @@
 "full\<^sub>i n (Up\<^sub>i l p r) \<longleftrightarrow> full n l \<and> full n r"
 
 lemma full\<^sub>i_ins: "full n t \<Longrightarrow> full\<^sub>i n (ins a t)"
-apply (induct rule: full.induct)
-apply (auto, auto split: up\<^sub>i.split)
-done
+by (induct rule: full.induct) (auto, auto split: up\<^sub>i.split)
 
 text {* The @{const insert} operation preserves balance. *}
 
@@ -482,18 +487,17 @@
 
 lemma bal_tree\<^sub>d_del: "bal t \<Longrightarrow> bal(tree\<^sub>d(del x t))"
 by(induction x t rule: del.induct)
-  ((auto simp: bals bal_del_min height_del height_del_min split: prod.split)[1])+
-(* 64 secs (2015) *)
+  (auto simp: bals bal_del_min height_del height_del_min split: prod.split)
+(* 60 secs (2015) *)
 
 corollary bal_delete: "bal t \<Longrightarrow> bal(delete x t)"
 by(simp add: delete_def bal_tree\<^sub>d_del)
 
-
 subsection \<open>Overall Correctness\<close>
 
 interpretation Set_by_Ordered
 where empty = Leaf and isin = isin and insert = insert and delete = delete
-and inorder = inorder and wf = bal
+and inorder = inorder and inv = bal
 proof (standard, goal_cases)
   case 2 thus ?case by(simp add: isin_set)
 next
--- a/src/HOL/Data_Structures/Tree23_Map.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Tree23_Map.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -8,65 +8,65 @@
   Map_by_Ordered
 begin
 
-fun lookup :: "('a::linorder * 'b) tree23 \<Rightarrow> 'a \<Rightarrow> 'b option" where
+fun lookup :: "('a::cmp * 'b) tree23 \<Rightarrow> 'a \<Rightarrow> 'b option" where
 "lookup Leaf x = None" |
-"lookup (Node2 l (a,b) r) x =
-  (if x < a then lookup l x else
-  if a < x then lookup r x else Some b)" |
-"lookup (Node3 l (a1,b1) m (a2,b2) r) x =
-  (if x < a1 then lookup l x else
-   if x = a1 then Some b1 else
-   if x < a2 then lookup m x else
-   if x = a2 then Some b2
-   else lookup r x)"
+"lookup (Node2 l (a,b) r) x = (case cmp x a of
+  LT \<Rightarrow> lookup l x |
+  GT \<Rightarrow> lookup r x |
+  EQ \<Rightarrow> Some b)" |
+"lookup (Node3 l (a1,b1) m (a2,b2) r) x = (case cmp x a1 of
+  LT \<Rightarrow> lookup l x |
+  EQ \<Rightarrow> Some b1 |
+  GT \<Rightarrow> (case cmp x a2 of
+          LT \<Rightarrow> lookup m x |
+          EQ \<Rightarrow> Some b2 |
+          GT \<Rightarrow> lookup r x))"
 
-fun upd :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) up\<^sub>i" where
+fun upd :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) up\<^sub>i" where
 "upd x y Leaf = Up\<^sub>i Leaf (x,y) Leaf" |
-"upd x y (Node2 l ab r) =
-   (if x < fst ab then
-        (case upd x y l of
+"upd x y (Node2 l ab r) = (case cmp x (fst ab) of
+   LT \<Rightarrow> (case upd x y l of
            T\<^sub>i l' => T\<^sub>i (Node2 l' ab r)
-         | Up\<^sub>i l1 ab' l2 => T\<^sub>i (Node3 l1 ab' l2 ab r))
-    else if x = fst ab then T\<^sub>i (Node2 l (x,y) r)
-    else
-        (case upd x y r of
+         | Up\<^sub>i l1 ab' l2 => T\<^sub>i (Node3 l1 ab' l2 ab r)) |
+   EQ \<Rightarrow> T\<^sub>i (Node2 l (x,y) r) |
+   GT \<Rightarrow> (case upd x y r of
            T\<^sub>i r' => T\<^sub>i (Node2 l ab r')
          | Up\<^sub>i r1 ab' r2 => T\<^sub>i (Node3 l ab r1 ab' r2)))" |
-"upd x y (Node3 l ab1 m ab2 r) =
-   (if x < fst ab1 then
-        (case upd x y l of
+"upd x y (Node3 l ab1 m ab2 r) = (case cmp x (fst ab1) of
+   LT \<Rightarrow> (case upd x y l of
            T\<^sub>i l' => T\<^sub>i (Node3 l' ab1 m ab2 r)
-         | Up\<^sub>i l1 ab' l2 => Up\<^sub>i (Node2 l1 ab' l2) ab1 (Node2 m ab2 r))
-    else if x = fst ab1 then T\<^sub>i (Node3 l (x,y) m ab2 r)
-    else if x < fst ab2 then
-             (case upd x y m of
-                T\<^sub>i m' => T\<^sub>i (Node3 l ab1 m' ab2 r)
-              | Up\<^sub>i m1 ab' m2 => Up\<^sub>i (Node2 l ab1 m1) ab' (Node2 m2 ab2 r))
-         else if x = fst ab2 then T\<^sub>i (Node3 l ab1 m (x,y) r)
-         else
-             (case upd x y r of
-                T\<^sub>i r' => T\<^sub>i (Node3 l ab1 m ab2 r')
-              | Up\<^sub>i r1 ab' r2 => Up\<^sub>i (Node2 l ab1 m) ab2 (Node2 r1 ab' r2)))"
+         | Up\<^sub>i l1 ab' l2 => Up\<^sub>i (Node2 l1 ab' l2) ab1 (Node2 m ab2 r)) |
+   EQ \<Rightarrow> T\<^sub>i (Node3 l (x,y) m ab2 r) |
+   GT \<Rightarrow> (case cmp x (fst ab2) of
+           LT \<Rightarrow> (case upd x y m of
+                   T\<^sub>i m' => T\<^sub>i (Node3 l ab1 m' ab2 r)
+                 | Up\<^sub>i m1 ab' m2 => Up\<^sub>i (Node2 l ab1 m1) ab' (Node2 m2 ab2 r)) |
+           EQ \<Rightarrow> T\<^sub>i (Node3 l ab1 m (x,y) r) |
+           GT \<Rightarrow> (case upd x y r of
+                   T\<^sub>i r' => T\<^sub>i (Node3 l ab1 m ab2 r')
+                 | Up\<^sub>i r1 ab' r2 => Up\<^sub>i (Node2 l ab1 m) ab2 (Node2 r1 ab' r2))))"
 
-definition update :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) tree23" where
+definition update :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) tree23" where
 "update a b t = tree\<^sub>i(upd a b t)"
 
-fun del :: "'a::linorder \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) up\<^sub>d"
-where
+fun del :: "'a::cmp \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) up\<^sub>d" where
 "del x Leaf = T\<^sub>d Leaf" |
 "del x (Node2 Leaf ab1 Leaf) = (if x=fst ab1 then Up\<^sub>d Leaf else T\<^sub>d(Node2 Leaf ab1 Leaf))" |
 "del x (Node3 Leaf ab1 Leaf ab2 Leaf) = T\<^sub>d(if x=fst ab1 then Node2 Leaf ab2 Leaf
   else if x=fst ab2 then Node2 Leaf ab1 Leaf else Node3 Leaf ab1 Leaf ab2 Leaf)" |
-"del x (Node2 l ab1 r) = (if x<fst ab1 then node21 (del x l) ab1 r else
-  if x > fst ab1 then node22 l ab1 (del x r) else
-  let (ab1',t) = del_min r in node22 l ab1' t)" |
-"del x (Node3 l ab1 m ab2 r) = (if x<fst ab1 then node31 (del x l) ab1 m ab2 r else
-  if x = fst ab1 then let (ab1',m') = del_min m in node32 l ab1' m' ab2 r else
-  if x < fst ab2 then node32 l ab1 (del x m) ab2 r else
-  if x = fst ab2 then let (ab2',r') = del_min r in node33 l ab1 m ab2' r'
-  else node33 l ab1 m ab2 (del x r))"
+"del x (Node2 l ab1 r) = (case cmp x (fst ab1) of
+  LT \<Rightarrow> node21 (del x l) ab1 r |
+  GT \<Rightarrow> node22 l ab1 (del x r) |
+  EQ \<Rightarrow> let (ab1',t) = del_min r in node22 l ab1' t)" |
+"del x (Node3 l ab1 m ab2 r) = (case cmp x (fst ab1) of
+  LT \<Rightarrow> node31 (del x l) ab1 m ab2 r |
+  EQ \<Rightarrow> let (ab1',m') = del_min m in node32 l ab1' m' ab2 r |
+  GT \<Rightarrow> (case cmp x (fst ab2) of
+           LT \<Rightarrow> node32 l ab1 (del x m) ab2 r |
+           EQ \<Rightarrow> let (ab2',r') = del_min r in node33 l ab1 m ab2' r' |
+           GT \<Rightarrow> node33 l ab1 m ab2 (del x r)))"
 
-definition delete :: "'a::linorder \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) tree23" where
+definition delete :: "'a::cmp \<Rightarrow> ('a*'b) tree23 \<Rightarrow> ('a*'b) tree23" where
 "delete x t = tree\<^sub>d(del x t)"
 
 
@@ -98,7 +98,7 @@
 subsection \<open>Balancedness\<close>
 
 lemma bal_upd: "bal t \<Longrightarrow> bal (tree\<^sub>i(upd a b t)) \<and> height(upd a b t) = height t"
-by (induct t) (auto split: up\<^sub>i.split)(* 30 secs in 2015 *)
+by (induct t) (auto split: up\<^sub>i.split)(* 16 secs in 2015 *)
 
 corollary bal_update: "bal t \<Longrightarrow> bal (update a b t)"
 by (simp add: update_def bal_upd)
--- a/src/HOL/Data_Structures/Tree23_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Tree23_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,14 +5,17 @@
 theory Tree23_Set
 imports
   Tree23
+  Cmp
   Set_by_Ordered
 begin
 
-fun isin :: "'a::linorder tree23 \<Rightarrow> 'a \<Rightarrow> bool" where
+fun isin :: "'a::cmp tree23 \<Rightarrow> 'a \<Rightarrow> bool" where
 "isin Leaf x = False" |
-"isin (Node2 l a r) x = (x < a \<and> isin l x \<or> x=a \<or> isin r x)" |
+"isin (Node2 l a r) x =
+  (case cmp x a of LT \<Rightarrow> isin l x | EQ \<Rightarrow> True | GT \<Rightarrow> isin r x)" |
 "isin (Node3 l a m b r) x =
-  (x < a \<and> isin l x \<or> x > b \<and> isin r x \<or> x = a \<or> x = b \<or> isin m x)"
+  (case cmp x a of LT \<Rightarrow> isin l x | EQ \<Rightarrow> True | GT \<Rightarrow> (case cmp x b of
+   LT \<Rightarrow> isin m x | EQ \<Rightarrow> True | GT \<Rightarrow> isin r x))"
 
 datatype 'a up\<^sub>i = T\<^sub>i "'a tree23" | Up\<^sub>i "'a tree23" 'a "'a tree23"
 
@@ -20,38 +23,35 @@
 "tree\<^sub>i (T\<^sub>i t) = t" |
 "tree\<^sub>i (Up\<^sub>i l p r) = Node2 l p r"
 
-fun ins :: "'a::linorder \<Rightarrow> 'a tree23 \<Rightarrow> 'a up\<^sub>i" where
+fun ins :: "'a::cmp \<Rightarrow> 'a tree23 \<Rightarrow> 'a up\<^sub>i" where
 "ins x Leaf = Up\<^sub>i Leaf x Leaf" |
 "ins x (Node2 l a r) =
-   (if x < a then
-      case ins x l of
-         T\<^sub>i l' => T\<^sub>i (Node2 l' a r)
-       | Up\<^sub>i l1 b l2 => T\<^sub>i (Node3 l1 b l2 a r)
-    else if x=a then T\<^sub>i (Node2 l x r)
-    else
-      case ins x r of
-        T\<^sub>i r' => T\<^sub>i (Node2 l a r')
-      | Up\<^sub>i r1 b r2 => T\<^sub>i (Node3 l a r1 b r2))" |
+   (case cmp x a of
+      LT \<Rightarrow> (case ins x l of
+              T\<^sub>i l' => T\<^sub>i (Node2 l' a r)
+            | Up\<^sub>i l1 b l2 => T\<^sub>i (Node3 l1 b l2 a r)) |
+      EQ \<Rightarrow> T\<^sub>i (Node2 l x r) |
+      GT \<Rightarrow> (case ins x r of
+              T\<^sub>i r' => T\<^sub>i (Node2 l a r')
+            | Up\<^sub>i r1 b r2 => T\<^sub>i (Node3 l a r1 b r2)))" |
 "ins x (Node3 l a m b r) =
-   (if x < a then
-      case ins x l of
-        T\<^sub>i l' => T\<^sub>i (Node3 l' a m b r)
-      | Up\<^sub>i l1 c l2 => Up\<^sub>i (Node2 l1 c l2) a (Node2 m b r)
-    else
-    if x > b then
-      case ins x r of
-        T\<^sub>i r' => T\<^sub>i (Node3 l a m b r')
-      | Up\<^sub>i r1 c r2 => Up\<^sub>i (Node2 l a m) b (Node2 r1 c r2)
-    else
-    if x=a \<or> x = b then T\<^sub>i (Node3 l a m b r)
-    else
-      case ins x m of
-        T\<^sub>i m' => T\<^sub>i (Node3 l a m' b r)
-      | Up\<^sub>i m1 c m2 => Up\<^sub>i (Node2 l a m1) c (Node2 m2 b r))"
+   (case cmp x a of
+      LT \<Rightarrow> (case ins x l of
+              T\<^sub>i l' => T\<^sub>i (Node3 l' a m b r)
+            | Up\<^sub>i l1 c l2 => Up\<^sub>i (Node2 l1 c l2) a (Node2 m b r)) |
+      EQ \<Rightarrow> T\<^sub>i (Node3 l a m b r) |
+      GT \<Rightarrow> (case cmp x b of
+               GT \<Rightarrow> (case ins x r of
+                       T\<^sub>i r' => T\<^sub>i (Node3 l a m b r')
+                     | Up\<^sub>i r1 c r2 => Up\<^sub>i (Node2 l a m) b (Node2 r1 c r2)) |
+               EQ \<Rightarrow> T\<^sub>i (Node3 l a m b r) |
+               LT \<Rightarrow> (case ins x m of
+                       T\<^sub>i m' => T\<^sub>i (Node3 l a m' b r)
+                     | Up\<^sub>i m1 c m2 => Up\<^sub>i (Node2 l a m1) c (Node2 m2 b r))))"
 
 hide_const insert
 
-definition insert :: "'a::linorder \<Rightarrow> 'a tree23 \<Rightarrow> 'a tree23" where
+definition insert :: "'a::cmp \<Rightarrow> 'a tree23 \<Rightarrow> 'a tree23" where
 "insert x t = tree\<^sub>i(ins x t)"
 
 datatype 'a up\<^sub>d = T\<^sub>d "'a tree23" | Up\<^sub>d "'a tree23"
@@ -93,32 +93,34 @@
 "del_min (Node2 l a r) = (let (x,l') = del_min l in (x, node21 l' a r))" |
 "del_min (Node3 l a m b r) = (let (x,l') = del_min l in (x, node31 l' a m b r))"
 
-fun del :: "'a::linorder \<Rightarrow> 'a tree23 \<Rightarrow> 'a up\<^sub>d"
+fun del :: "'a::cmp \<Rightarrow> 'a tree23 \<Rightarrow> 'a up\<^sub>d"
 where
 "del x Leaf = T\<^sub>d Leaf" |
 "del x (Node2 Leaf a Leaf) = (if x = a then Up\<^sub>d Leaf else T\<^sub>d(Node2 Leaf a Leaf))" |
 "del x (Node3 Leaf a Leaf b Leaf) = T\<^sub>d(if x = a then Node2 Leaf b Leaf
   else if x = b then Node2 Leaf a Leaf else Node3 Leaf a Leaf b Leaf)" |
-"del x (Node2 l a r) = (if x<a then node21 (del x l) a r else
-  if x > a then node22 l a (del x r) else
-  let (a',t) = del_min r in node22 l a' t)" |
-"del x (Node3 l a m b r) = (if x<a then node31 (del x l) a m b r else
-  if x = a then let (a',m') = del_min m in node32 l a' m' b r else
-  if x < b then node32 l a (del x m) b r else
-  if x = b then let (b',r') = del_min r in node33 l a m b' r'
-  else node33 l a m b (del x r))"
+"del x (Node2 l a r) = (case cmp x a of
+  LT \<Rightarrow> node21 (del x l) a r |
+  GT \<Rightarrow> node22 l a (del x r) |
+  EQ \<Rightarrow> let (a',t) = del_min r in node22 l a' t)" |
+"del x (Node3 l a m b r) = (case cmp x a of
+  LT \<Rightarrow> node31 (del x l) a m b r |
+  EQ \<Rightarrow> let (a',m') = del_min m in node32 l a' m' b r |
+  GT \<Rightarrow> (case cmp x b of
+          LT \<Rightarrow> node32 l a (del x m) b r |
+          EQ \<Rightarrow> let (b',r') = del_min r in node33 l a m b' r' |
+          GT \<Rightarrow> node33 l a m b (del x r)))"
 
-definition delete :: "'a::linorder \<Rightarrow> 'a tree23 \<Rightarrow> 'a tree23" where
+definition delete :: "'a::cmp \<Rightarrow> 'a tree23 \<Rightarrow> 'a tree23" where
 "delete x t = tree\<^sub>d(del x t)"
 
 
 subsection "Functional Correctness"
 
-
 subsubsection "Proofs for isin"
 
 lemma "sorted(inorder t) \<Longrightarrow> isin t x = (x \<in> elems (inorder t))"
-by (induction t) (auto simp: elems_simps1)
+by (induction t) (auto simp: elems_simps1 ball_Un)
 
 lemma isin_set: "sorted(inorder t) \<Longrightarrow> isin t x = (x \<in> elems (inorder t))"
 by (induction t) (auto simp: elems_simps2)
@@ -128,7 +130,7 @@
 
 lemma inorder_ins:
   "sorted(inorder t) \<Longrightarrow> inorder(tree\<^sub>i(ins x t)) = ins_list x (inorder t)"
-by(induction t) (auto simp: ins_list_simps split: up\<^sub>i.splits) (* 38 secs in 2015 *)
+by(induction t) (auto simp: ins_list_simps split: up\<^sub>i.splits)
 
 lemma inorder_insert:
   "sorted(inorder t) \<Longrightarrow> inorder(insert a t) = ins_list a (inorder t)"
@@ -195,7 +197,7 @@
 end
 
 lemma bal_ins: "bal t \<Longrightarrow> bal (tree\<^sub>i(ins a t)) \<and> height(ins a t) = height t"
-by (induct t) (auto split: up\<^sub>i.split) (* 87 secs in 2015 *)
+by (induct t) (auto split: up\<^sub>i.split) (* 15 secs in 2015 *)
 
 text{* Now an alternative proof (by Brian Huffman) that runs faster because
 two properties (balance and height) are combined in one predicate. *}
@@ -354,7 +356,7 @@
 
 interpretation Set_by_Ordered
 where empty = Leaf and isin = isin and insert = insert and delete = delete
-and inorder = inorder and wf = bal
+and inorder = inorder and inv = bal
 proof (standard, goal_cases)
   case 2 thus ?case by(simp add: isin_set)
 next
--- a/src/HOL/Data_Structures/Tree_Map.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Tree_Map.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -8,23 +8,24 @@
   Map_by_Ordered
 begin
 
-fun lookup :: "('a::linorder*'b) tree \<Rightarrow> 'a \<Rightarrow> 'b option" where
+fun lookup :: "('a::cmp*'b) tree \<Rightarrow> 'a \<Rightarrow> 'b option" where
 "lookup Leaf x = None" |
-"lookup (Node l (a,b) r) x = (if x < a then lookup l x else
-  if x > a then lookup r x else Some b)"
+"lookup (Node l (a,b) r) x =
+  (case cmp x a of LT \<Rightarrow> lookup l x | GT \<Rightarrow> lookup r x | EQ \<Rightarrow> Some b)"
 
-fun update :: "'a::linorder \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
+fun update :: "'a::cmp \<Rightarrow> 'b \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
 "update x y Leaf = Node Leaf (x,y) Leaf" |
-"update x y (Node l (a,b) r) =
-   (if x < a then Node (update x y l) (a,b) r
-    else if x = a then Node l (x,y) r
-    else Node l (a,b) (update x y r))"
+"update x y (Node l (a,b) r) = (case cmp x a of
+   LT \<Rightarrow> Node (update x y l) (a,b) r |
+   EQ \<Rightarrow> Node l (x,y) r |
+   GT \<Rightarrow> Node l (a,b) (update x y r))"
 
-fun delete :: "'a::linorder \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
+fun delete :: "'a::cmp \<Rightarrow> ('a*'b) tree \<Rightarrow> ('a*'b) tree" where
 "delete x Leaf = Leaf" |
-"delete x (Node l (a,b) r) = (if x < a then Node (delete x l) (a,b) r else
-  if x > a then Node l (a,b) (delete x r) else
-  if r = Leaf then l else let (ab',r') = del_min r in Node l ab' r')"
+"delete x (Node l (a,b) r) = (case cmp x a of
+  LT \<Rightarrow> Node (delete x l) (a,b) r |
+  GT \<Rightarrow> Node l (a,b) (delete x r) |
+  EQ \<Rightarrow> if r = Leaf then l else let (ab',r') = del_min r in Node l ab' r')"
 
 
 subsection "Functional Correctness Proofs"
@@ -49,7 +50,6 @@
   "sorted1(inorder t) \<Longrightarrow> inorder(delete x t) = del_list x (inorder t)"
 by(induction t) (auto simp: del_list_simps del_minD split: prod.splits)
 
-
 interpretation Map_by_Ordered
 where empty = Leaf and lookup = lookup and update = update and delete = delete
 and inorder = inorder and wf = "\<lambda>_. True"
--- a/src/HOL/Data_Structures/Tree_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Data_Structures/Tree_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,31 +5,34 @@
 theory Tree_Set
 imports
   "~~/src/HOL/Library/Tree"
+  Cmp
   Set_by_Ordered
 begin
 
-fun isin :: "'a::linorder tree \<Rightarrow> 'a \<Rightarrow> bool" where
+fun isin :: "'a::cmp tree \<Rightarrow> 'a \<Rightarrow> bool" where
 "isin Leaf x = False" |
-"isin (Node l a r) x = (x < a \<and> isin l x \<or> x=a \<or> isin r x)"
+"isin (Node l a r) x =
+  (case cmp x a of LT \<Rightarrow> isin l x | EQ \<Rightarrow> True | GT \<Rightarrow> isin r x)"
 
 hide_const (open) insert
 
-fun insert :: "'a::linorder \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
+fun insert :: "'a::cmp \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
 "insert x Leaf = Node Leaf x Leaf" |
-"insert x (Node l a r) =
-   (if x < a then Node (insert x l) a r else
-    if x = a then Node l a r
-    else Node l a (insert x r))"
+"insert x (Node l a r) = (case cmp x a of
+      LT \<Rightarrow> Node (insert x l) a r |
+      EQ \<Rightarrow> Node l a r |
+      GT \<Rightarrow> Node l a (insert x r))"
 
 fun del_min :: "'a tree \<Rightarrow> 'a * 'a tree" where
 "del_min (Node Leaf a r) = (a, r)" |
 "del_min (Node l a r) = (let (x,l') = del_min l in (x, Node l' a r))"
 
-fun delete :: "'a::linorder \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
+fun delete :: "'a::cmp \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
 "delete x Leaf = Leaf" |
-"delete x (Node l a r) = (if x < a then Node (delete x l) a r else
-  if x > a then Node l a (delete x r) else
-  if r = Leaf then l else let (a',r') = del_min r in Node l a' r')"
+"delete x (Node l a r) = (case cmp x a of
+  LT \<Rightarrow>  Node (delete x l) a r |
+  GT \<Rightarrow>  Node l a (delete x r) |
+  EQ \<Rightarrow> if r = Leaf then l else let (a',r') = del_min r in Node l a' r')"
 
 
 subsection "Functional Correctness Proofs"
@@ -56,10 +59,9 @@
   "sorted(inorder t) \<Longrightarrow> inorder(delete x t) = del_list x (inorder t)"
 by(induction t) (auto simp: del_list_simps del_minD split: prod.splits)
 
-
 interpretation Set_by_Ordered
 where empty = Leaf and isin = isin and insert = insert and delete = delete
-and inorder = inorder and wf = "\<lambda>_. True"
+and inorder = inorder and inv = "\<lambda>_. True"
 proof (standard, goal_cases)
   case 1 show ?case by simp
 next
--- a/src/HOL/Decision_Procs/Approximation.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Approximation.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -18,7 +18,7 @@
 
 section "Horner Scheme"
 
-subsection \<open>Define auxiliary helper @{text horner} function\<close>
+subsection \<open>Define auxiliary helper \<open>horner\<close> function\<close>
 
 primrec horner :: "(nat \<Rightarrow> nat) \<Rightarrow> (nat \<Rightarrow> nat \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> real \<Rightarrow> real" where
 "horner F G 0 i k x       = 0" |
--- a/src/HOL/Decision_Procs/Commutative_Ring_Complete.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Commutative_Ring_Complete.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -101,7 +101,7 @@
     by (cases x) auto
 qed
 
-text \<open>mkPX conserves normalizedness (@{text "_cn"})\<close>
+text \<open>mkPX conserves normalizedness (\<open>_cn\<close>)\<close>
 lemma mkPX_cn:
   assumes "x \<noteq> 0"
     and "isnorm P"
--- a/src/HOL/Decision_Procs/Cooper.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Cooper.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -18,7 +18,7 @@
 datatype num = C int | Bound nat | CN nat int num | Neg num | Add num num| Sub num num
   | Mul int num
 
-primrec num_size :: "num \<Rightarrow> nat" -- \<open>A size for num to make inductive proofs simpler\<close>
+primrec num_size :: "num \<Rightarrow> nat" \<comment> \<open>A size for num to make inductive proofs simpler\<close>
 where
   "num_size (C c) = 1"
 | "num_size (Bound n) = 1"
@@ -44,7 +44,7 @@
   | Closed nat | NClosed nat
 
 
-fun fmsize :: "fm \<Rightarrow> nat"  -- \<open>A size for fm\<close>
+fun fmsize :: "fm \<Rightarrow> nat"  \<comment> \<open>A size for fm\<close>
 where
   "fmsize (NOT p) = 1 + fmsize p"
 | "fmsize (And p q) = 1 + fmsize p + fmsize q"
@@ -60,7 +60,7 @@
 lemma fmsize_pos: "fmsize p > 0"
   by (induct p rule: fmsize.induct) simp_all
 
-primrec Ifm :: "bool list \<Rightarrow> int list \<Rightarrow> fm \<Rightarrow> bool"  -- \<open>Semantics of formulae (fm)\<close>
+primrec Ifm :: "bool list \<Rightarrow> int list \<Rightarrow> fm \<Rightarrow> bool"  \<comment> \<open>Semantics of formulae (fm)\<close>
 where
   "Ifm bbs bs T \<longleftrightarrow> True"
 | "Ifm bbs bs F \<longleftrightarrow> False"
@@ -113,7 +113,7 @@
   by (induct p arbitrary: bs rule: prep.induct) auto
 
 
-fun qfree :: "fm \<Rightarrow> bool"  -- \<open>Quantifier freeness\<close>
+fun qfree :: "fm \<Rightarrow> bool"  \<comment> \<open>Quantifier freeness\<close>
 where
   "qfree (E p) \<longleftrightarrow> False"
 | "qfree (A p) \<longleftrightarrow> False"
@@ -127,7 +127,7 @@
 
 text \<open>Boundedness and substitution\<close>
 
-primrec numbound0 :: "num \<Rightarrow> bool"  -- \<open>a num is INDEPENDENT of Bound 0\<close>
+primrec numbound0 :: "num \<Rightarrow> bool"  \<comment> \<open>a num is INDEPENDENT of Bound 0\<close>
 where
   "numbound0 (C c) \<longleftrightarrow> True"
 | "numbound0 (Bound n) \<longleftrightarrow> n > 0"
@@ -142,7 +142,7 @@
   shows "Inum (b # bs) a = Inum (b' # bs) a"
   using nb by (induct a rule: num.induct) (auto simp add: gr0_conv_Suc)
 
-primrec bound0 :: "fm \<Rightarrow> bool" -- \<open>A Formula is independent of Bound 0\<close>
+primrec bound0 :: "fm \<Rightarrow> bool" \<comment> \<open>A Formula is independent of Bound 0\<close>
 where
   "bound0 T \<longleftrightarrow> True"
 | "bound0 F \<longleftrightarrow> True"
@@ -188,7 +188,7 @@
   "numbound0 a \<Longrightarrow> Inum (b#bs) (numsubst0 a t) = Inum ((Inum (b'#bs) a)#bs) t"
   by (induct t rule: numsubst0.induct) (auto simp: nth_Cons' numbound0_I[where b="b" and b'="b'"])
 
-primrec subst0:: "num \<Rightarrow> fm \<Rightarrow> fm"  -- \<open>substitue a num into a formula for Bound 0\<close>
+primrec subst0:: "num \<Rightarrow> fm \<Rightarrow> fm"  \<comment> \<open>substitue a num into a formula for Bound 0\<close>
 where
   "subst0 t T = T"
 | "subst0 t F = F"
@@ -254,7 +254,7 @@
 lemma decr_qf: "bound0 p \<Longrightarrow> qfree (decr p)"
   by (induct p) simp_all
 
-fun isatom :: "fm \<Rightarrow> bool"  -- \<open>test for atomicity\<close>
+fun isatom :: "fm \<Rightarrow> bool"  \<comment> \<open>test for atomicity\<close>
 where
   "isatom T \<longleftrightarrow> True"
 | "isatom F \<longleftrightarrow> True"
@@ -856,9 +856,9 @@
     (auto simp add: not disj conj iff imp not_qf disj_qf conj_qf imp_qf iff_qf
       simpfm simpfm_qf simp del: simpfm.simps)
 
-text \<open>Linearity for fm where Bound 0 ranges over @{text "\<int>"}\<close>
+text \<open>Linearity for fm where Bound 0 ranges over \<open>\<int>\<close>\<close>
 
-fun zsplit0 :: "num \<Rightarrow> int \<times> num"  -- \<open>splits the bounded from the unbounded part\<close>
+fun zsplit0 :: "num \<Rightarrow> int \<times> num"  \<comment> \<open>splits the bounded from the unbounded part\<close>
 where
   "zsplit0 (C c) = (0, C c)"
 | "zsplit0 (Bound n) = (if n = 0 then (1, C 0) else (0, Bound n))"
@@ -989,7 +989,7 @@
     by simp
 qed
 
-consts iszlfm :: "fm \<Rightarrow> bool"  -- \<open>Linearity test for fm\<close>
+consts iszlfm :: "fm \<Rightarrow> bool"  \<comment> \<open>Linearity test for fm\<close>
 recdef iszlfm "measure size"
   "iszlfm (And p q) \<longleftrightarrow> iszlfm p \<and> iszlfm q"
   "iszlfm (Or p q) \<longleftrightarrow> iszlfm p \<and> iszlfm q"
@@ -1006,7 +1006,7 @@
 lemma zlin_qfree: "iszlfm p \<Longrightarrow> qfree p"
   by (induct p rule: iszlfm.induct) auto
 
-consts zlfm :: "fm \<Rightarrow> fm"  -- \<open>Linearity transformation for fm\<close>
+consts zlfm :: "fm \<Rightarrow> fm"  \<comment> \<open>Linearity transformation for fm\<close>
 recdef zlfm "measure fmsize"
   "zlfm (And p q) = And (zlfm p) (zlfm q)"
   "zlfm (Or p q) = Or (zlfm p) (zlfm q)"
@@ -1258,7 +1258,7 @@
   qed
 qed auto
 
-consts minusinf :: "fm \<Rightarrow> fm" -- \<open>Virtual substitution of @{text "-\<infinity>"}\<close>
+consts minusinf :: "fm \<Rightarrow> fm" \<comment> \<open>Virtual substitution of \<open>-\<infinity>\<close>\<close>
 recdef minusinf "measure size"
   "minusinf (And p q) = And (minusinf p) (minusinf q)"
   "minusinf (Or p q) = Or (minusinf p) (minusinf q)"
@@ -1273,7 +1273,7 @@
 lemma minusinf_qfree: "qfree p \<Longrightarrow> qfree (minusinf p)"
   by (induct p rule: minusinf.induct) auto
 
-consts plusinf :: "fm \<Rightarrow> fm"  -- \<open>Virtual substitution of @{text "+\<infinity>"}\<close>
+consts plusinf :: "fm \<Rightarrow> fm"  \<comment> \<open>Virtual substitution of \<open>+\<infinity>\<close>\<close>
 recdef plusinf "measure size"
   "plusinf (And p q) = And (plusinf p) (plusinf q)"
   "plusinf (Or p q) = Or (plusinf p) (plusinf q)"
@@ -1285,7 +1285,7 @@
   "plusinf (Ge  (CN 0 c e)) = T"
   "plusinf p = p"
 
-consts \<delta> :: "fm \<Rightarrow> int"  -- \<open>Compute @{text "lcm {d| N\<^sup>? Dvd c*x+t \<in> p}"}\<close>
+consts \<delta> :: "fm \<Rightarrow> int"  \<comment> \<open>Compute \<open>lcm {d| N\<^sup>? Dvd c*x+t \<in> p}\<close>\<close>
 recdef \<delta> "measure size"
   "\<delta> (And p q) = lcm (\<delta> p) (\<delta> q)"
   "\<delta> (Or p q) = lcm (\<delta> p) (\<delta> q)"
@@ -1293,7 +1293,7 @@
   "\<delta> (NDvd i (CN 0 c e)) = i"
   "\<delta> p = 1"
 
-consts d_\<delta> :: "fm \<Rightarrow> int \<Rightarrow> bool"  -- \<open>check if a given l divides all the ds above\<close>
+consts d_\<delta> :: "fm \<Rightarrow> int \<Rightarrow> bool"  \<comment> \<open>check if a given l divides all the ds above\<close>
 recdef d_\<delta> "measure size"
   "d_\<delta> (And p q) = (\<lambda>d. d_\<delta> p d \<and> d_\<delta> q d)"
   "d_\<delta> (Or p q) = (\<lambda>d. d_\<delta> p d \<and> d_\<delta> q d)"
@@ -1354,7 +1354,7 @@
 qed simp_all
 
 
-consts a_\<beta> :: "fm \<Rightarrow> int \<Rightarrow> fm"  -- \<open>adjust the coefficients of a formula\<close>
+consts a_\<beta> :: "fm \<Rightarrow> int \<Rightarrow> fm"  \<comment> \<open>adjust the coefficients of a formula\<close>
 recdef a_\<beta> "measure size"
   "a_\<beta> (And p q) = (\<lambda>k. And (a_\<beta> p k) (a_\<beta> q k))"
   "a_\<beta> (Or p q) = (\<lambda>k. Or (a_\<beta> p k) (a_\<beta> q k))"
@@ -1368,7 +1368,7 @@
   "a_\<beta> (NDvd i (CN 0 c e))=(\<lambda>k. NDvd ((k div c)*i) (CN 0 1 (Mul (k div c) e)))"
   "a_\<beta> p = (\<lambda>k. p)"
 
-consts d_\<beta> :: "fm \<Rightarrow> int \<Rightarrow> bool"  -- \<open>test if all coeffs c of c divide a given l\<close>
+consts d_\<beta> :: "fm \<Rightarrow> int \<Rightarrow> bool"  \<comment> \<open>test if all coeffs c of c divide a given l\<close>
 recdef d_\<beta> "measure size"
   "d_\<beta> (And p q) = (\<lambda>k. (d_\<beta> p k) \<and> (d_\<beta> q k))"
   "d_\<beta> (Or p q) = (\<lambda>k. (d_\<beta> p k) \<and> (d_\<beta> q k))"
@@ -1382,7 +1382,7 @@
   "d_\<beta> (NDvd i (CN 0 c e))=(\<lambda>k. c dvd k)"
   "d_\<beta> p = (\<lambda>k. True)"
 
-consts \<zeta> :: "fm \<Rightarrow> int"  -- \<open>computes the lcm of all coefficients of x\<close>
+consts \<zeta> :: "fm \<Rightarrow> int"  \<comment> \<open>computes the lcm of all coefficients of x\<close>
 recdef \<zeta> "measure size"
   "\<zeta> (And p q) = lcm (\<zeta> p) (\<zeta> q)"
   "\<zeta> (Or p q) = lcm (\<zeta> p) (\<zeta> q)"
@@ -1434,7 +1434,7 @@
   "mirror (NDvd i (CN 0 c e)) = NDvd i (CN 0 c (Neg e))"
   "mirror p = p"
 
-text \<open>Lemmas for the correctness of @{text "\<sigma>_\<rho>"}\<close>
+text \<open>Lemmas for the correctness of \<open>\<sigma>_\<rho>\<close>\<close>
 
 lemma dvd1_eq1:
   fixes x :: int
--- a/src/HOL/Decision_Procs/Dense_Linear_Order.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Dense_Linear_Order.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -32,7 +32,7 @@
 lemma gather_start [no_atp]: "(\<exists>x. P x) \<equiv> (\<exists>x. (\<forall>y \<in> {}. y < x) \<and> (\<forall>y\<in> {}. x < y) \<and> P x)"
   by simp
 
-text\<open>Theorems for @{text "\<exists>z. \<forall>x. x < z \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>-\<^sub>\<infinity>)"}\<close>
+text\<open>Theorems for \<open>\<exists>z. \<forall>x. x < z \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>-\<^sub>\<infinity>)\<close>\<close>
 lemma minf_lt[no_atp]:  "\<exists>z . \<forall>x. x < z \<longrightarrow> (x < t \<longleftrightarrow> True)" by auto
 lemma minf_gt[no_atp]: "\<exists>z . \<forall>x. x < z \<longrightarrow>  (t < x \<longleftrightarrow>  False)"
   by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le)
@@ -44,7 +44,7 @@
 lemma minf_neq[no_atp]: "\<exists>z. \<forall>x. x < z \<longrightarrow> (x \<noteq> t \<longleftrightarrow> True)" by auto
 lemma minf_P[no_atp]: "\<exists>z. \<forall>x. x < z \<longrightarrow> (P \<longleftrightarrow> P)" by blast
 
-text\<open>Theorems for @{text "\<exists>z. \<forall>x. x < z \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>+\<^sub>\<infinity>)"}\<close>
+text\<open>Theorems for \<open>\<exists>z. \<forall>x. x < z \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>+\<^sub>\<infinity>)\<close>\<close>
 lemma pinf_gt[no_atp]:  "\<exists>z. \<forall>x. z < x \<longrightarrow> (t < x \<longleftrightarrow> True)" by auto
 lemma pinf_lt[no_atp]: "\<exists>z. \<forall>x. z < x \<longrightarrow>  (x < t \<longleftrightarrow>  False)"
   by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le)
@@ -452,7 +452,7 @@
 lemma ge_ex[no_atp]: "\<exists>y. x \<sqsubseteq> y"
   using gt_ex by auto
 
-text \<open>Theorems for @{text "\<exists>z. \<forall>x. z \<sqsubset> x \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>+\<^sub>\<infinity>)"}\<close>
+text \<open>Theorems for \<open>\<exists>z. \<forall>x. z \<sqsubset> x \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>+\<^sub>\<infinity>)\<close>\<close>
 lemma pinf_conj[no_atp]:
   assumes ex1: "\<exists>z1. \<forall>x. z1 \<sqsubset> x \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
     and ex2: "\<exists>z2. \<forall>x. z2 \<sqsubset> x \<longrightarrow> (P2 x \<longleftrightarrow> P2')"
@@ -516,7 +516,7 @@
   using lt_ex by auto
 
 
-text \<open>Theorems for @{text "\<exists>z. \<forall>x. x \<sqsubset> z \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>-\<^sub>\<infinity>)"}\<close>
+text \<open>Theorems for \<open>\<exists>z. \<forall>x. x \<sqsubset> z \<longrightarrow> (P x \<longleftrightarrow> P\<^sub>-\<^sub>\<infinity>)\<close>\<close>
 lemma minf_conj[no_atp]:
   assumes ex1: "\<exists>z1. \<forall>x. x \<sqsubset> z1 \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
     and ex2: "\<exists>z2. \<forall>x. x \<sqsubset> z2 \<longrightarrow> (P2 x \<longleftrightarrow> P2')"
--- a/src/HOL/Decision_Procs/Ferrack.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Ferrack.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -7,7 +7,7 @@
   "~~/src/HOL/Library/Code_Target_Numeral" "~~/src/HOL/Library/Old_Recdef"
 begin
 
-section \<open>Quantifier elimination for @{text "\<real> (0, 1, +, <)"}\<close>
+section \<open>Quantifier elimination for \<open>\<real> (0, 1, +, <)\<close>\<close>
 
   (*********************************************************************************)
   (****                            SHADOW SYNTAX AND SEMANTICS                  ****)
--- a/src/HOL/Decision_Procs/MIR.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/MIR.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -7,7 +7,7 @@
   "~~/src/HOL/Library/Code_Target_Numeral" "~~/src/HOL/Library/Old_Recdef"
 begin
 
-section \<open>Quantifier elimination for @{text "\<real> (0, 1, +, floor, <)"}\<close>
+section \<open>Quantifier elimination for \<open>\<real> (0, 1, +, floor, <)\<close>\<close>
 
 declare of_int_floor_cancel [simp del]
 
@@ -1428,8 +1428,8 @@
   by (induct p rule: qelim.induct) (auto simp del: simpfm.simps)
 
 
-text \<open>The @{text "\<int>"} Part\<close>
-text\<open>Linearity for fm where Bound 0 ranges over @{text "\<int>"}\<close>
+text \<open>The \<open>\<int>\<close> Part\<close>
+text\<open>Linearity for fm where Bound 0 ranges over \<open>\<int>\<close>\<close>
 
 function zsplit0 :: "num \<Rightarrow> int \<times> num" (* splits the bounded from the unbounded part*) where
   "zsplit0 (C c) = (0,C c)"
@@ -1933,10 +1933,10 @@
   ultimately show ?case by blast
 qed auto
 
-text\<open>plusinf : Virtual substitution of @{text "+\<infinity>"}
-       minusinf: Virtual substitution of @{text "-\<infinity>"}
-       @{text "\<delta>"} Compute lcm @{text "d| Dvd d  c*x+t \<in> p"}
-       @{text "d_\<delta>"} checks if a given l divides all the ds above\<close>
+text\<open>plusinf : Virtual substitution of \<open>+\<infinity>\<close>
+       minusinf: Virtual substitution of \<open>-\<infinity>\<close>
+       \<open>\<delta>\<close> Compute lcm \<open>d| Dvd d  c*x+t \<in> p\<close>
+       \<open>d_\<delta>\<close> checks if a given l divides all the ds above\<close>
 
 fun minusinf:: "fm \<Rightarrow> fm" where
   "minusinf (And p q) = conj (minusinf p) (minusinf q)" 
@@ -3270,9 +3270,9 @@
   using lp
   by (induct p rule: mirror.induct) (simp_all add: split_def image_Un)
   
-text \<open>The @{text "\<real>"} part\<close>
-
-text\<open>Linearity for fm where Bound 0 ranges over @{text "\<real>"}\<close>
+text \<open>The \<open>\<real>\<close> part\<close>
+
+text\<open>Linearity for fm where Bound 0 ranges over \<open>\<real>\<close>\<close>
 consts
   isrlfm :: "fm \<Rightarrow> bool"   (* Linearity test for fm *)
 recdef isrlfm "measure size"
--- a/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -60,7 +60,7 @@
 | "tmboundslt n (Sub a b) = (tmboundslt n a \<and> tmboundslt n b)"
 | "tmboundslt n (Mul i a) = tmboundslt n a"
 
-primrec tmbound0 :: "tm \<Rightarrow> bool"  -- \<open>a tm is INDEPENDENT of Bound 0\<close>
+primrec tmbound0 :: "tm \<Rightarrow> bool"  \<comment> \<open>a tm is INDEPENDENT of Bound 0\<close>
 where
   "tmbound0 (CP c) = True"
 | "tmbound0 (Bound n) = (n>0)"
@@ -76,7 +76,7 @@
   using nb
   by (induct a rule: tm.induct) auto
 
-primrec tmbound :: "nat \<Rightarrow> tm \<Rightarrow> bool"  -- \<open>a tm is INDEPENDENT of Bound n\<close>
+primrec tmbound :: "nat \<Rightarrow> tm \<Rightarrow> bool"  \<comment> \<open>a tm is INDEPENDENT of Bound n\<close>
 where
   "tmbound n (CP c) = True"
 | "tmbound n (Bound m) = (n \<noteq> m)"
@@ -626,7 +626,7 @@
 | "boundslt n (E p) = boundslt (Suc n) p"
 | "boundslt n (A p) = boundslt (Suc n) p"
 
-fun bound0:: "fm \<Rightarrow> bool"  -- \<open>a Formula is independent of Bound 0\<close>
+fun bound0:: "fm \<Rightarrow> bool"  \<comment> \<open>a Formula is independent of Bound 0\<close>
 where
   "bound0 T = True"
 | "bound0 F = True"
@@ -647,7 +647,7 @@
   using bp tmbound0_I[where b="b" and bs="bs" and b'="b'"]
   by (induct p rule: bound0.induct) auto
 
-primrec bound:: "nat \<Rightarrow> fm \<Rightarrow> bool"  -- \<open>a Formula is independent of Bound n\<close>
+primrec bound:: "nat \<Rightarrow> fm \<Rightarrow> bool"  \<comment> \<open>a Formula is independent of Bound n\<close>
 where
   "bound m T = True"
 | "bound m F = True"
@@ -897,7 +897,7 @@
 lemma decr0_qf: "bound0 p \<Longrightarrow> qfree (decr0 p)"
   by (induct p) simp_all
 
-fun isatom :: "fm \<Rightarrow> bool"  -- \<open>test for atomicity\<close>
+fun isatom :: "fm \<Rightarrow> bool"  \<comment> \<open>test for atomicity\<close>
 where
   "isatom T = True"
 | "isatom F = True"
@@ -1643,7 +1643,7 @@
 
 subsection \<open>Core Procedure\<close>
 
-fun minusinf:: "fm \<Rightarrow> fm"  -- \<open>Virtual substitution of -\<infinity>\<close>
+fun minusinf:: "fm \<Rightarrow> fm"  \<comment> \<open>Virtual substitution of -\<infinity>\<close>
 where
   "minusinf (And p q) = conj (minusinf p) (minusinf q)"
 | "minusinf (Or p q) = disj (minusinf p) (minusinf q)"
@@ -1653,7 +1653,7 @@
 | "minusinf (Le  (CNP 0 c e)) = disj (conj (eq (CP c)) (le e)) (lt (CP (~\<^sub>p c)))"
 | "minusinf p = p"
 
-fun plusinf:: "fm \<Rightarrow> fm"  -- \<open>Virtual substitution of +\<infinity>\<close>
+fun plusinf:: "fm \<Rightarrow> fm"  \<comment> \<open>Virtual substitution of +\<infinity>\<close>
 where
   "plusinf (And p q) = conj (plusinf p) (plusinf q)"
 | "plusinf (Or p q) = disj (plusinf p) (plusinf q)"
--- a/src/HOL/Decision_Procs/Polynomial_List.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Polynomial_List.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -81,15 +81,15 @@
   obtains q where "poly p2 = poly (p1 *** q)"
   using assms by (auto simp add: divides_def)
 
--- \<open>order of a polynomial\<close>
+\<comment> \<open>order of a polynomial\<close>
 definition (in ring_1) order :: "'a \<Rightarrow> 'a list \<Rightarrow> nat"
   where "order a p = (SOME n. ([-a, 1] %^ n) divides p \<and> \<not> (([-a, 1] %^ (Suc n)) divides p))"
 
--- \<open>degree of a polynomial\<close>
+\<comment> \<open>degree of a polynomial\<close>
 definition (in semiring_0) degree :: "'a list \<Rightarrow> nat"
   where "degree p = length (pnormalize p) - 1"
 
--- \<open>squarefree polynomials --- NB with respect to real roots only\<close>
+\<comment> \<open>squarefree polynomials --- NB with respect to real roots only\<close>
 definition (in ring_1) rsquarefree :: "'a list \<Rightarrow> bool"
   where "rsquarefree p \<longleftrightarrow> poly p \<noteq> poly [] \<and> (\<forall>a. order a p = 0 \<or> order a p = 1)"
 
--- a/src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -30,7 +30,7 @@
 | "polysize (Pw p n) = 1 + polysize p"
 | "polysize (CN c n p) = 4 + polysize c + polysize p"
 
-primrec polybound0:: "poly \<Rightarrow> bool" -- \<open>a poly is INDEPENDENT of Bound 0\<close>
+primrec polybound0:: "poly \<Rightarrow> bool" \<comment> \<open>a poly is INDEPENDENT of Bound 0\<close>
 where
   "polybound0 (C c) \<longleftrightarrow> True"
 | "polybound0 (Bound n) \<longleftrightarrow> n > 0"
@@ -41,7 +41,7 @@
 | "polybound0 (Pw p n) \<longleftrightarrow> polybound0 p"
 | "polybound0 (CN c n p) \<longleftrightarrow> n \<noteq> 0 \<and> polybound0 c \<and> polybound0 p"
 
-primrec polysubst0:: "poly \<Rightarrow> poly \<Rightarrow> poly" -- \<open>substitute a poly into a poly for Bound 0\<close>
+primrec polysubst0:: "poly \<Rightarrow> poly \<Rightarrow> poly" \<comment> \<open>substitute a poly into a poly for Bound 0\<close>
 where
   "polysubst0 t (C c) = C c"
 | "polysubst0 t (Bound n) = (if n = 0 then t else Bound n)"
--- a/src/HOL/Decision_Procs/ex/Approximation_Quickcheck_Ex.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Decision_Procs/ex/Approximation_Quickcheck_Ex.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -31,7 +31,7 @@
   shows "x > 1 \<Longrightarrow> x \<le> 2 ^ 20 * log 2 x + 1 \<and> (sin x)\<^sup>2 + (cos x)\<^sup>2 = 1"
   using [[quickcheck_approximation_custom_seed = 1]]
   using [[quickcheck_approximation_epsilon = 0.00000001]]
-    --\<open>avoids spurious counterexamples in approximate computation of @{term "(sin x)\<^sup>2 + (cos x)\<^sup>2"}
+    \<comment>\<open>avoids spurious counterexamples in approximate computation of @{term "(sin x)\<^sup>2 + (cos x)\<^sup>2"}
       and therefore avoids expensive failing attempts for certification\<close>
   quickcheck[approximation, expect=counterexample, size=20]
   oops
--- a/src/HOL/Finite_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Finite_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1196,12 +1196,12 @@
 definition card :: "'a set \<Rightarrow> nat" where
   "card = folding.F (\<lambda>_. Suc) 0"
 
-interpretation card!: folding "\<lambda>_. Suc" 0
-where
+interpretation card: folding "\<lambda>_. Suc" 0
+rewrites
   "folding.F (\<lambda>_. Suc) 0 = card"
 proof -
   show "folding (\<lambda>_. Suc)" by standard rule
-  then interpret card!: folding "\<lambda>_. Suc" 0 .
+  then interpret card: folding "\<lambda>_. Suc" 0 .
   from card_def show "folding.F (\<lambda>_. Suc) 0 = card" by rule
 qed
 
--- a/src/HOL/GCD.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/GCD.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -104,7 +104,7 @@
   "is_unit (gcd a b) \<longleftrightarrow> coprime a b"
   by (cases "a = 0 \<and> b = 0") (auto simp add: unit_factor_gcd dest: is_unit_unit_factor)
 
-sublocale gcd!: abel_semigroup gcd
+sublocale gcd: abel_semigroup gcd
 proof
   fix a b c
   show "gcd a b = gcd b a"
@@ -256,7 +256,7 @@
   "unit_factor (lcm a b) = (if a = 0 \<or> b = 0 then 0 else 1)"
   by (simp add: unit_factor_gcd dvd_unit_factor_div lcm_gcd)
 
-sublocale lcm!: abel_semigroup lcm
+sublocale lcm: abel_semigroup lcm
 proof
   fix a b c
   show "lcm a b = lcm b a"
@@ -1971,7 +1971,7 @@
 
 interpretation gcd_lcm_complete_lattice_nat:
   complete_lattice Gcd Lcm gcd Rings.dvd "\<lambda>m n. m dvd n \<and> \<not> n dvd m" lcm 1 "0::nat"
-where "Inf.INFIMUM Gcd A f = Gcd (f ` A :: nat set)"
+rewrites "Inf.INFIMUM Gcd A f = Gcd (f ` A :: nat set)"
   and "Sup.SUPREMUM Lcm A f = Lcm (f ` A)"
 proof -
   show "class.complete_lattice Gcd Lcm gcd Rings.dvd (\<lambda>m n. m dvd n \<and> \<not> n dvd m) lcm 1 (0::nat)"
--- a/src/HOL/Groups.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Groups.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -131,7 +131,7 @@
   assumes add_assoc [algebra_simps, field_simps]: "(a + b) + c = a + (b + c)"
 begin
 
-sublocale add!: semigroup plus
+sublocale add: semigroup plus
   by standard (fact add_assoc)
 
 end
@@ -142,7 +142,7 @@
   assumes add_commute [algebra_simps, field_simps]: "a + b = b + a"
 begin
 
-sublocale add!: abel_semigroup plus
+sublocale add: abel_semigroup plus
   by standard (fact add_commute)
 
 declare add.left_commute [algebra_simps, field_simps]
@@ -159,7 +159,7 @@
   assumes mult_assoc [algebra_simps, field_simps]: "(a * b) * c = a * (b * c)"
 begin
 
-sublocale mult!: semigroup times
+sublocale mult: semigroup times
   by standard (fact mult_assoc)
 
 end
@@ -170,7 +170,7 @@
   assumes mult_commute [algebra_simps, field_simps]: "a * b = b * a"
 begin
 
-sublocale mult!: abel_semigroup times
+sublocale mult: abel_semigroup times
   by standard (fact mult_commute)
 
 declare mult.left_commute [algebra_simps, field_simps]
@@ -188,7 +188,7 @@
     and add_0_right: "a + 0 = a"
 begin
 
-sublocale add!: monoid plus 0
+sublocale add: monoid plus 0
   by standard (fact add_0_left add_0_right)+
 
 end
@@ -203,7 +203,7 @@
 subclass monoid_add
   by standard (simp_all add: add_0 add.commute [of _ 0])
 
-sublocale add!: comm_monoid plus 0
+sublocale add: comm_monoid plus 0
   by standard (simp add: ac_simps)
 
 end
@@ -213,7 +213,7 @@
     and mult_1_right: "a * 1 = a"
 begin
 
-sublocale mult!: monoid times 1
+sublocale mult: monoid times 1
   by standard (fact mult_1_left mult_1_right)+
 
 end
@@ -228,7 +228,7 @@
 subclass monoid_mult
   by standard (simp_all add: mult_1 mult.commute [of _ 1])
 
-sublocale mult!: comm_monoid times 1
+sublocale mult: comm_monoid times 1
   by standard (simp add: ac_simps)
 
 end
--- a/src/HOL/Groups_Big.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Groups_Big.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -471,12 +471,12 @@
 where
   "setsum = comm_monoid_set.F plus 0"
 
-sublocale setsum!: comm_monoid_set plus 0
-where
+sublocale setsum: comm_monoid_set plus 0
+rewrites
   "comm_monoid_set.F plus 0 = setsum"
 proof -
   show "comm_monoid_set plus 0" ..
-  then interpret setsum!: comm_monoid_set plus 0 .
+  then interpret setsum: comm_monoid_set plus 0 .
   from setsum_def show "comm_monoid_set.F plus 0 = setsum" by rule
 qed
 
@@ -1062,12 +1062,12 @@
 where
   "setprod = comm_monoid_set.F times 1"
 
-sublocale setprod!: comm_monoid_set times 1
-where
+sublocale setprod: comm_monoid_set times 1
+rewrites
   "comm_monoid_set.F times 1 = setprod"
 proof -
   show "comm_monoid_set times 1" ..
-  then interpret setprod!: comm_monoid_set times 1 .
+  then interpret setprod: comm_monoid_set times 1 .
   from setprod_def show "comm_monoid_set.F times 1 = setprod" by rule
 qed
 
--- a/src/HOL/Groups_List.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Groups_List.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -65,12 +65,12 @@
 where
   "listsum  = monoid_list.F plus 0"
 
-sublocale listsum!: monoid_list plus 0
-where
+sublocale listsum: monoid_list plus 0
+rewrites
  "monoid_list.F plus 0 = listsum"
 proof -
   show "monoid_list plus 0" ..
-  then interpret listsum!: monoid_list plus 0 .
+  then interpret listsum: monoid_list plus 0 .
   from listsum_def show "monoid_list.F plus 0 = listsum" by rule
 qed
  
@@ -79,22 +79,22 @@
 context comm_monoid_add
 begin
 
-sublocale listsum!: comm_monoid_list plus 0
-where
+sublocale listsum: comm_monoid_list plus 0
+rewrites
   "monoid_list.F plus 0 = listsum"
 proof -
   show "comm_monoid_list plus 0" ..
-  then interpret listsum!: comm_monoid_list plus 0 .
+  then interpret listsum: comm_monoid_list plus 0 .
   from listsum_def show "monoid_list.F plus 0 = listsum" by rule
 qed
 
-sublocale setsum!: comm_monoid_list_set plus 0
-where
+sublocale setsum: comm_monoid_list_set plus 0
+rewrites
   "monoid_list.F plus 0 = listsum"
   and "comm_monoid_set.F plus 0 = setsum"
 proof -
   show "comm_monoid_list_set plus 0" ..
-  then interpret setsum!: comm_monoid_list_set plus 0 .
+  then interpret setsum: comm_monoid_list_set plus 0 .
   from listsum_def show "monoid_list.F plus 0 = listsum" by rule
   from setsum_def show "comm_monoid_set.F plus 0 = setsum" by rule
 qed
@@ -332,12 +332,12 @@
 where
   "listprod  = monoid_list.F times 1"
 
-sublocale listprod!: monoid_list times 1
-where
+sublocale listprod: monoid_list times 1
+rewrites
   "monoid_list.F times 1 = listprod"
 proof -
   show "monoid_list times 1" ..
-  then interpret listprod!: monoid_list times 1 .
+  then interpret listprod: monoid_list times 1 .
   from listprod_def show "monoid_list.F times 1 = listprod" by rule
 qed
 
@@ -346,22 +346,22 @@
 context comm_monoid_mult
 begin
 
-sublocale listprod!: comm_monoid_list times 1
-where
+sublocale listprod: comm_monoid_list times 1
+rewrites
   "monoid_list.F times 1 = listprod"
 proof -
   show "comm_monoid_list times 1" ..
-  then interpret listprod!: comm_monoid_list times 1 .
+  then interpret listprod: comm_monoid_list times 1 .
   from listprod_def show "monoid_list.F times 1 = listprod" by rule
 qed
 
-sublocale setprod!: comm_monoid_list_set times 1
-where
+sublocale setprod: comm_monoid_list_set times 1
+rewrites
   "monoid_list.F times 1 = listprod"
   and "comm_monoid_set.F times 1 = setprod"
 proof -
   show "comm_monoid_list_set times 1" ..
-  then interpret setprod!: comm_monoid_list_set times 1 .
+  then interpret setprod: comm_monoid_list_set times 1 .
   from listprod_def show "monoid_list.F times 1 = listprod" by rule
   from setprod_def show "comm_monoid_set.F times 1 = setprod" by rule
 qed
--- a/src/HOL/HOLCF/Universal.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/HOLCF/Universal.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -836,7 +836,7 @@
 
 end
 
-interpretation compact_basis!:
+interpretation compact_basis:
   ideal_completion below Rep_compact_basis
     "approximants :: 'a::bifinite \<Rightarrow> 'a compact_basis set"
 proof -
--- a/src/HOL/Hahn_Banach/Hahn_Banach.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Hahn_Banach/Hahn_Banach.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -49,9 +49,9 @@
     and "seminorm E p" and "linearform F f"
   assumes fp: "\<forall>x \<in> F. f x \<le> p x"
   shows "\<exists>h. linearform E h \<and> (\<forall>x \<in> F. h x = f x) \<and> (\<forall>x \<in> E. h x \<le> p x)"
-    -- \<open>Let \<open>E\<close> be a vector space, \<open>F\<close> a subspace of \<open>E\<close>, \<open>p\<close> a seminorm on \<open>E\<close>,\<close>
-    -- \<open>and \<open>f\<close> a linear form on \<open>F\<close> such that \<open>f\<close> is bounded by \<open>p\<close>,\<close>
-    -- \<open>then \<open>f\<close> can be extended to a linear form \<open>h\<close> on \<open>E\<close> in a norm-preserving way. \<^smallskip>\<close>
+    \<comment> \<open>Let \<open>E\<close> be a vector space, \<open>F\<close> a subspace of \<open>E\<close>, \<open>p\<close> a seminorm on \<open>E\<close>,\<close>
+    \<comment> \<open>and \<open>f\<close> a linear form on \<open>F\<close> such that \<open>f\<close> is bounded by \<open>p\<close>,\<close>
+    \<comment> \<open>then \<open>f\<close> can be extended to a linear form \<open>h\<close> on \<open>E\<close> in a norm-preserving way. \<^smallskip>\<close>
 proof -
   interpret vectorspace E by fact
   interpret subspace F E by fact
@@ -64,8 +64,8 @@
   {
     fix c assume cM: "c \<in> chains M" and ex: "\<exists>x. x \<in> c"
     have "\<Union>c \<in> M"
-      -- \<open>Show that every non-empty chain \<open>c\<close> of \<open>M\<close> has an upper bound in \<open>M\<close>:\<close>
-      -- \<open>\<open>\<Union>c\<close> is greater than any element of the chain \<open>c\<close>, so it suffices to show \<open>\<Union>c \<in> M\<close>.\<close>
+      \<comment> \<open>Show that every non-empty chain \<open>c\<close> of \<open>M\<close> has an upper bound in \<open>M\<close>:\<close>
+      \<comment> \<open>\<open>\<Union>c\<close> is greater than any element of the chain \<open>c\<close>, so it suffices to show \<open>\<Union>c \<in> M\<close>.\<close>
       unfolding M_def
     proof (rule norm_pres_extensionI)
       let ?H = "domain (\<Union>c)"
@@ -95,9 +95,9 @@
     qed
   }
   then have "\<exists>g \<in> M. \<forall>x \<in> M. g \<subseteq> x \<longrightarrow> x = g"
-  -- \<open>With Zorn's Lemma we can conclude that there is a maximal element in \<open>M\<close>. \<^smallskip>\<close>
+  \<comment> \<open>With Zorn's Lemma we can conclude that there is a maximal element in \<open>M\<close>. \<^smallskip>\<close>
   proof (rule Zorn's_Lemma)
-      -- \<open>We show that \<open>M\<close> is non-empty:\<close>
+      \<comment> \<open>We show that \<open>M\<close> is non-empty:\<close>
     show "graph F f \<in> M"
       unfolding M_def
     proof (rule norm_pres_extensionI2)
@@ -116,18 +116,18 @@
     and HE: "H \<unlhd> E" and FH: "F \<unlhd> H"
     and graphs: "graph F f \<subseteq> graph H h"
     and hp: "\<forall>x \<in> H. h x \<le> p x" unfolding M_def ..
-      -- \<open>\<open>g\<close> is a norm-preserving extension of \<open>f\<close>, in other words:\<close>
-      -- \<open>\<open>g\<close> is the graph of some linear form \<open>h\<close> defined on a subspace \<open>H\<close> of \<open>E\<close>,\<close>
-      -- \<open>and \<open>h\<close> is an extension of \<open>f\<close> that is again bounded by \<open>p\<close>. \<^smallskip>\<close>
+      \<comment> \<open>\<open>g\<close> is a norm-preserving extension of \<open>f\<close>, in other words:\<close>
+      \<comment> \<open>\<open>g\<close> is the graph of some linear form \<open>h\<close> defined on a subspace \<open>H\<close> of \<open>E\<close>,\<close>
+      \<comment> \<open>and \<open>h\<close> is an extension of \<open>f\<close> that is again bounded by \<open>p\<close>. \<^smallskip>\<close>
   from HE E have H: "vectorspace H"
     by (rule subspace.vectorspace)
 
   have HE_eq: "H = E"
-    -- \<open>We show that \<open>h\<close> is defined on whole \<open>E\<close> by classical contradiction. \<^smallskip>\<close>
+    \<comment> \<open>We show that \<open>h\<close> is defined on whole \<open>E\<close> by classical contradiction. \<^smallskip>\<close>
   proof (rule classical)
     assume neq: "H \<noteq> E"
-      -- \<open>Assume \<open>h\<close> is not defined on whole \<open>E\<close>. Then show that \<open>h\<close> can be extended\<close>
-      -- \<open>in a norm-preserving way to a function \<open>h'\<close> with the graph \<open>g'\<close>. \<^smallskip>\<close>
+      \<comment> \<open>Assume \<open>h\<close> is not defined on whole \<open>E\<close>. Then show that \<open>h\<close> can be extended\<close>
+      \<comment> \<open>in a norm-preserving way to a function \<open>h'\<close> with the graph \<open>g'\<close>. \<^smallskip>\<close>
     have "\<exists>g' \<in> M. g \<subseteq> g' \<and> g \<noteq> g'"
     proof -
       from HE have "H \<subseteq> E" ..
@@ -143,7 +143,7 @@
       qed
 
       def H' \<equiv> "H + lin x'"
-        -- \<open>Define \<open>H'\<close> as the direct sum of \<open>H\<close> and the linear closure of \<open>x'\<close>. \<^smallskip>\<close>
+        \<comment> \<open>Define \<open>H'\<close> as the direct sum of \<open>H\<close> and the linear closure of \<open>x'\<close>. \<^smallskip>\<close>
       have HH': "H \<unlhd> H'"
       proof (unfold H'_def)
         from x'E have "vectorspace (lin x')" ..
@@ -153,8 +153,8 @@
       obtain xi where
         xi: "\<forall>y \<in> H. - p (y + x') - h y \<le> xi
           \<and> xi \<le> p (y + x') - h y"
-        -- \<open>Pick a real number \<open>\<xi>\<close> that fulfills certain inequality; this will\<close>
-        -- \<open>be used to establish that \<open>h'\<close> is a norm-preserving extension of \<open>h\<close>.
+        \<comment> \<open>Pick a real number \<open>\<xi>\<close> that fulfills certain inequality; this will\<close>
+        \<comment> \<open>be used to establish that \<open>h'\<close> is a norm-preserving extension of \<open>h\<close>.
            \label{ex-xi-use}\<^smallskip>\<close>
       proof -
         from H have "\<exists>xi. \<forall>y \<in> H. - p (y + x') - h y \<le> xi
@@ -182,10 +182,10 @@
 
       def h' \<equiv> "\<lambda>x. let (y, a) =
           SOME (y, a). x = y + a \<cdot> x' \<and> y \<in> H in h y + a * xi"
-        -- \<open>Define the extension \<open>h'\<close> of \<open>h\<close> to \<open>H'\<close> using \<open>\<xi>\<close>. \<^smallskip>\<close>
+        \<comment> \<open>Define the extension \<open>h'\<close> of \<open>h\<close> to \<open>H'\<close> using \<open>\<xi>\<close>. \<^smallskip>\<close>
 
       have "g \<subseteq> graph H' h' \<and> g \<noteq> graph H' h'"
-        -- \<open>\<open>h'\<close> is an extension of \<open>h\<close> \dots \<^smallskip>\<close>
+        \<comment> \<open>\<open>h'\<close> is an extension of \<open>h\<close> \dots \<^smallskip>\<close>
       proof
         show "g \<subseteq> graph H' h'"
         proof -
@@ -222,7 +222,7 @@
         qed
       qed
       moreover have "graph H' h' \<in> M"
-        -- \<open>and \<open>h'\<close> is norm-preserving. \<^smallskip>\<close>
+        \<comment> \<open>and \<open>h'\<close> is norm-preserving. \<^smallskip>\<close>
       proof (unfold M_def)
         show "graph H' h' \<in> norm_pres_extensions E p F f"
         proof (rule norm_pres_extensionI2)
@@ -270,7 +270,7 @@
       ultimately show ?thesis ..
     qed
     then have "\<not> (\<forall>x \<in> M. g \<subseteq> x \<longrightarrow> g = x)" by simp
-      -- \<open>So the graph \<open>g\<close> of \<open>h\<close> cannot be maximal. Contradiction! \<^smallskip>\<close>
+      \<comment> \<open>So the graph \<open>g\<close> of \<open>h\<close> cannot be maximal. Contradiction! \<^smallskip>\<close>
     with gx show "H = E" by contradiction
   qed
 
--- a/src/HOL/Imperative_HOL/Heap_Monad.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Imperative_HOL/Heap_Monad.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -404,8 +404,8 @@
     by (simp only: Heap_ord_def Heap_lub_def)
 qed
 
-interpretation heap!: partial_function_definitions Heap_ord Heap_lub
-  where "Heap_lub {} \<equiv> Heap Map.empty"
+interpretation heap: partial_function_definitions Heap_ord Heap_lub
+  rewrites "Heap_lub {} \<equiv> Heap Map.empty"
 by (fact heap_interpretation)(simp add: Heap_lub_empty)
 
 lemma heap_step_admissible: 
--- a/src/HOL/Lattices.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Lattices.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -271,7 +271,7 @@
 context semilattice_inf
 begin
 
-sublocale inf!: semilattice inf
+sublocale inf: semilattice inf
 proof
   fix a b c
   show "(a \<sqinter> b) \<sqinter> c = a \<sqinter> (b \<sqinter> c)"
@@ -282,7 +282,7 @@
     by (rule antisym) (auto simp add: le_inf_iff)
 qed
 
-sublocale inf!: semilattice_order inf less_eq less
+sublocale inf: semilattice_order inf less_eq less
   by standard (auto simp add: le_iff_inf less_le)
 
 lemma inf_assoc: "(x \<sqinter> y) \<sqinter> z = x \<sqinter> (y \<sqinter> z)"
@@ -316,7 +316,7 @@
 context semilattice_sup
 begin
 
-sublocale sup!: semilattice sup
+sublocale sup: semilattice sup
 proof
   fix a b c
   show "(a \<squnion> b) \<squnion> c = a \<squnion> (b \<squnion> c)"
@@ -327,7 +327,7 @@
     by (rule antisym) (auto simp add: le_sup_iff)
 qed
 
-sublocale sup!: semilattice_order sup greater_eq greater
+sublocale sup: semilattice_order sup greater_eq greater
   by standard (auto simp add: le_iff_sup sup.commute less_le)
 
 lemma sup_assoc: "(x \<squnion> y) \<squnion> z = x \<squnion> (y \<squnion> z)"
@@ -484,8 +484,8 @@
 class bounded_semilattice_inf_top = semilattice_inf + order_top
 begin
 
-sublocale inf_top!: semilattice_neutr inf top
-  + inf_top!: semilattice_neutr_order inf top less_eq less
+sublocale inf_top: semilattice_neutr inf top
+  + inf_top: semilattice_neutr_order inf top less_eq less
 proof
   fix x
   show "x \<sqinter> \<top> = x"
@@ -497,8 +497,8 @@
 class bounded_semilattice_sup_bot = semilattice_sup + order_bot
 begin
 
-sublocale sup_bot!: semilattice_neutr sup bot
-  + sup_bot!: semilattice_neutr_order sup bot greater_eq greater
+sublocale sup_bot: semilattice_neutr sup bot
+  + sup_bot: semilattice_neutr_order sup bot greater_eq greater
 proof
   fix x
   show "x \<squnion> \<bottom> = x"
@@ -715,8 +715,8 @@
 context linorder
 begin
 
-sublocale min!: semilattice_order min less_eq less
-  + max!: semilattice_order max greater_eq greater
+sublocale min: semilattice_order min less_eq less
+  + max: semilattice_order max greater_eq greater
   by standard (auto simp add: min_def max_def)
 
 lemma min_le_iff_disj:
--- a/src/HOL/Lattices_Big.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Lattices_Big.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -318,12 +318,12 @@
 where
   "Inf_fin = semilattice_set.F inf"
 
-sublocale Inf_fin!: semilattice_order_set inf less_eq less
-where
+sublocale Inf_fin: semilattice_order_set inf less_eq less
+rewrites
   "semilattice_set.F inf = Inf_fin"
 proof -
   show "semilattice_order_set inf less_eq less" ..
-  then interpret Inf_fin!: semilattice_order_set inf less_eq less .
+  then interpret Inf_fin: semilattice_order_set inf less_eq less .
   from Inf_fin_def show "semilattice_set.F inf = Inf_fin" by rule
 qed
 
@@ -336,12 +336,12 @@
 where
   "Sup_fin = semilattice_set.F sup"
 
-sublocale Sup_fin!: semilattice_order_set sup greater_eq greater
-where
+sublocale Sup_fin: semilattice_order_set sup greater_eq greater
+rewrites
   "semilattice_set.F sup = Sup_fin"
 proof -
   show "semilattice_order_set sup greater_eq greater" ..
-  then interpret Sup_fin!: semilattice_order_set sup greater_eq greater .
+  then interpret Sup_fin: semilattice_order_set sup greater_eq greater .
   from Sup_fin_def show "semilattice_set.F sup = Sup_fin" by rule
 qed
 
@@ -490,16 +490,16 @@
 where
   "Max = semilattice_set.F max"
 
-sublocale Min!: semilattice_order_set min less_eq less
-  + Max!: semilattice_order_set max greater_eq greater
-where
+sublocale Min: semilattice_order_set min less_eq less
+  + Max: semilattice_order_set max greater_eq greater
+rewrites
   "semilattice_set.F min = Min"
   and "semilattice_set.F max = Max"
 proof -
   show "semilattice_order_set min less_eq less" by standard (auto simp add: min_def)
-  then interpret Min!: semilattice_order_set min less_eq less .
+  then interpret Min: semilattice_order_set min less_eq less .
   show "semilattice_order_set max greater_eq greater" by standard (auto simp add: max_def)
-  then interpret Max!: semilattice_order_set max greater_eq greater .
+  then interpret Max: semilattice_order_set max greater_eq greater .
   from Min_def show "semilattice_set.F min = Min" by rule
   from Max_def show "semilattice_set.F max = Max" by rule
 qed
@@ -530,14 +530,14 @@
 lemma dual_Min:
   "linorder.Min greater_eq = Max"
 proof -
-  interpret dual!: linorder greater_eq greater by (fact dual_linorder)
+  interpret dual: linorder greater_eq greater by (fact dual_linorder)
   show ?thesis by (simp add: dual.Min_def dual_min Max_def)
 qed
 
 lemma dual_Max:
   "linorder.Max greater_eq = Min"
 proof -
-  interpret dual!: linorder greater_eq greater by (fact dual_linorder)
+  interpret dual: linorder greater_eq greater by (fact dual_linorder)
   show ?thesis by (simp add: dual.Max_def dual_max Min_def)
 qed
 
--- a/src/HOL/Library/AList.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/AList.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -18,7 +18,7 @@
   to establish the invariant, e.g. for inductive proofs.
 \<close>
 
-subsection \<open>@{text update} and @{text updates}\<close>
+subsection \<open>\<open>update\<close> and \<open>updates\<close>\<close>
 
 qualified primrec update :: "'key \<Rightarrow> 'val \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
 where
@@ -163,7 +163,7 @@
   by (induct xs arbitrary: ys al) (auto split: list.splits)
 
 
-subsection \<open>@{text delete}\<close>
+subsection \<open>\<open>delete\<close>\<close>
 
 qualified definition delete :: "'key \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
   where delete_eq: "delete k = filter (\<lambda>(k', _). k \<noteq> k')"
@@ -215,7 +215,7 @@
   by (simp add: delete_eq)
 
 
-subsection \<open>@{text update_with_aux} and @{text delete_aux}\<close>
+subsection \<open>\<open>update_with_aux\<close> and \<open>delete_aux\<close>\<close>
 
 qualified primrec update_with_aux :: "'val \<Rightarrow> 'key \<Rightarrow> ('val \<Rightarrow> 'val) \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
 where
@@ -296,7 +296,7 @@
 by(cases ts)(auto split: split_if_asm)
 
 
-subsection \<open>@{text restrict}\<close>
+subsection \<open>\<open>restrict\<close>\<close>
 
 qualified definition restrict :: "'key set \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
   where restrict_eq: "restrict A = filter (\<lambda>(k, v). k \<in> A)"
@@ -380,7 +380,7 @@
   by (induct ps) auto
 
 
-subsection \<open>@{text clearjunk}\<close>
+subsection \<open>\<open>clearjunk\<close>\<close>
 
 qualified function clearjunk  :: "('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
 where
@@ -464,7 +464,7 @@
     (simp_all add: clearjunk_delete delete_map assms)
 
 
-subsection \<open>@{text map_ran}\<close>
+subsection \<open>\<open>map_ran\<close>\<close>
 
 definition map_ran :: "('key \<Rightarrow> 'val \<Rightarrow> 'val) \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
   where "map_ran f = map (\<lambda>(k, v). (k, f k v))"
@@ -490,7 +490,7 @@
   by (simp add: map_ran_def split_def clearjunk_map)
 
 
-subsection \<open>@{text merge}\<close>
+subsection \<open>\<open>merge\<close>\<close>
 
 qualified definition merge :: "('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
   where "merge qs ps = foldr (\<lambda>(k, v). update k v) ps qs"
@@ -558,7 +558,7 @@
   by (simp add: merge_conv')
 
 
-subsection \<open>@{text compose}\<close>
+subsection \<open>\<open>compose\<close>\<close>
 
 qualified function compose :: "('key \<times> 'a) list \<Rightarrow> ('a \<times> 'b) list \<Rightarrow> ('key \<times> 'b) list"
 where
@@ -723,7 +723,7 @@
   by (simp add: compose_conv map_comp_None_iff)
 
 
-subsection \<open>@{text map_entry}\<close>
+subsection \<open>\<open>map_entry\<close>\<close>
 
 qualified fun map_entry :: "'key \<Rightarrow> ('val \<Rightarrow> 'val) \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
 where
@@ -745,7 +745,7 @@
   using assms by (induct xs) (auto simp add: dom_map_entry)
 
 
-subsection \<open>@{text map_default}\<close>
+subsection \<open>\<open>map_default\<close>\<close>
 
 fun map_default :: "'key \<Rightarrow> 'val \<Rightarrow> ('val \<Rightarrow> 'val) \<Rightarrow> ('key \<times> 'val) list \<Rightarrow> ('key \<times> 'val) list"
 where
--- a/src/HOL/Library/BigO.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/BigO.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -16,20 +16,20 @@
 
 The main changes in this version are as follows:
 \begin{itemize}
-\item We have eliminated the @{text O} operator on sets. (Most uses of this seem
+\item We have eliminated the \<open>O\<close> operator on sets. (Most uses of this seem
   to be inessential.)
-\item We no longer use @{text "+"} as output syntax for @{text "+o"}
-\item Lemmas involving @{text "sumr"} have been replaced by more general lemmas
-  involving `@{text "setsum"}.
+\item We no longer use \<open>+\<close> as output syntax for \<open>+o\<close>
+\item Lemmas involving \<open>sumr\<close> have been replaced by more general lemmas
+  involving `\<open>setsum\<close>.
 \item The library has been expanded, with e.g.~support for expressions of
-  the form @{text "f < g + O(h)"}.
+  the form \<open>f < g + O(h)\<close>.
 \end{itemize}
 
 Note also since the Big O library includes rules that demonstrate set
 inclusion, to use the automated reasoners effectively with the library
-one should redeclare the theorem @{text "subsetI"} as an intro rule,
-rather than as an @{text "intro!"} rule, for example, using
-\isa{\isakeyword{declare}}~@{text "subsetI [del, intro]"}.
+one should redeclare the theorem \<open>subsetI\<close> as an intro rule,
+rather than as an \<open>intro!\<close> rule, for example, using
+\isa{\isakeyword{declare}}~\<open>subsetI [del, intro]\<close>.
 \<close>
 
 subsection \<open>Definitions\<close>
--- a/src/HOL/Library/Boolean_Algebra.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Boolean_Algebra.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -26,10 +26,10 @@
   assumes disj_cancel_right [simp]: "x \<squnion> \<sim> x = \<one>"
 begin
 
-sublocale conj!: abel_semigroup conj
+sublocale conj: abel_semigroup conj
   by standard (fact conj_assoc conj_commute)+
 
-sublocale disj!: abel_semigroup disj
+sublocale disj: abel_semigroup disj
   by standard (fact disj_assoc disj_commute)+
 
 lemmas conj_left_commute = conj.left_commute
@@ -190,7 +190,7 @@
   assumes xor_def: "x \<oplus> y = (x \<sqinter> \<sim> y) \<squnion> (\<sim> x \<sqinter> y)"
 begin
 
-sublocale xor!: abel_semigroup xor
+sublocale xor: abel_semigroup xor
 proof
   fix x y z :: 'a
   let ?t = "(x \<sqinter> y \<sqinter> z) \<squnion> (x \<sqinter> \<sim> y \<sqinter> \<sim> z) \<squnion>
--- a/src/HOL/Library/Cardinality.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Cardinality.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -211,7 +211,7 @@
   fixes card_UNIV :: "'a card_UNIV"
   assumes card_UNIV: "card_UNIV = Phantom('a) CARD('a)"
 
-subsection \<open>Instantiations for @{text "card_UNIV"}\<close>
+subsection \<open>Instantiations for \<open>card_UNIV\<close>\<close>
 
 instantiation nat :: card_UNIV begin
 definition "finite_UNIV = Phantom(nat) False"
@@ -534,7 +534,7 @@
      (\<lambda>_. List.coset xs \<subseteq> set ys))"
 by simp
 
-notepad begin -- "test code setup"
+notepad begin \<comment> "test code setup"
 have "List.coset [True] = set [False] \<and> 
       List.coset [] \<subseteq> List.set [True, False] \<and> 
       finite (List.coset [True])"
--- a/src/HOL/Library/Code_Char.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Code_Char.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -107,7 +107,7 @@
 |  constant "Orderings.less_eq :: String.literal \<Rightarrow> String.literal \<Rightarrow> bool" \<rightharpoonup>
     (SML) "!((_ : string) <= _)"
     and (OCaml) "!((_ : string) <= _)"
-    -- \<open>Order operations for @{typ String.literal} work in Haskell only 
+    \<comment> \<open>Order operations for @{typ String.literal} work in Haskell only 
           if no type class instance needs to be generated, because String = [Char] in Haskell
           and @{typ "char list"} need not have the same order as @{typ String.literal}.\<close>
     and (Haskell) infix 4 "<="
--- a/src/HOL/Library/Code_Target_Nat.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Code_Target_Nat.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -135,7 +135,7 @@
   including integer.lifting by transfer auto
 
 lemma term_of_nat_code [code]:
-  -- \<open>Use @{term Code_Numeral.nat_of_integer} in term reconstruction
+  \<comment> \<open>Use @{term Code_Numeral.nat_of_integer} in term reconstruction
         instead of @{term Code_Target_Nat.Nat} such that reconstructed
         terms can be fed back to the code generator\<close>
   "term_of_class.term_of n =
--- a/src/HOL/Library/Code_Test.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Code_Test.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -131,7 +131,7 @@
   "xml_of_term (Code_Evaluation.Const x ty) = [xml.tagged (STR ''0'') (Some x) (xml_of_typ ty)]"
   "xml_of_term (Code_Evaluation.App t1 t2)  = [xml.tagged (STR ''5'') None [xml.node (xml_of_term t1), xml.node (xml_of_term t2)]]"
   "xml_of_term (Code_Evaluation.Abs x ty t) = [xml.tagged (STR ''4'') (Some x) [xml.node (xml_of_typ ty), xml.node (xml_of_term t)]]"
-  -- \<open>
+  \<comment> \<open>
     FIXME: @{const Code_Evaluation.Free} is used only in @{theory Quickcheck_Narrowing} to represent
     uninstantiated parameters in constructors. Here, we always translate them to @{ML Free} variables.
 \<close>
--- a/src/HOL/Library/ContNotDenum.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/ContNotDenum.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -15,8 +15,8 @@
 uncountable. It is formalised in the Isabelle/Isar theorem proving
 system.
 
-{\em Theorem:} The Continuum @{text "\<real>"} is not denumerable. In other
-words, there does not exist a function @{text "f: \<nat> \<Rightarrow> \<real>"} such that f is
+{\em Theorem:} The Continuum \<open>\<real>\<close> is not denumerable. In other
+words, there does not exist a function \<open>f: \<nat> \<Rightarrow> \<real>\<close> such that f is
 surjective.
 
 {\em Outline:} An elegant informal proof of this result uses Cantor's
@@ -26,8 +26,7 @@
 completeness of the Real numbers and is the foundation for our
 argument. Informally it states that an intersection of countable
 closed intervals (where each successive interval is a subset of the
-last) is non-empty. We then assume a surjective function @{text
-"f: \<nat> \<Rightarrow> \<real>"} exists and find a real x such that x is not in the range of f
+last) is non-empty. We then assume a surjective function \<open>f: \<nat> \<Rightarrow> \<real>\<close> exists and find a real x such that x is not in the range of f
 by generating a sequence of closed intervals then using the NIP.\<close>
 
 theorem real_non_denum: "\<not> (\<exists>f :: nat \<Rightarrow> real. surj f)"
--- a/src/HOL/Library/Convex.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Convex.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -883,7 +883,7 @@
   fix t x y :: real
   assume t: "t > 0" "t < 1" and xy: "x \<in> A" "y \<in> A" "x < y"
   def z \<equiv> "(1 - t) * x + t * y"
-  with `connected A` and xy have ivl: "{x..y} \<subseteq> A" using connected_contains_Icc by blast
+  with \<open>connected A\<close> and xy have ivl: "{x..y} \<subseteq> A" using connected_contains_Icc by blast
   
   from xy t have xz: "z > x" by (simp add: z_def algebra_simps)
   have "y - z = (1 - t) * (y - x)" by (simp add: z_def algebra_simps)
--- a/src/HOL/Library/Countable_Set_Type.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Countable_Set_Type.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -369,11 +369,11 @@
 
 subsection \<open>Additional lemmas\<close>
 
-subsubsection \<open>@{text cempty}\<close>
+subsubsection \<open>\<open>cempty\<close>\<close>
 
 lemma cemptyE [elim!]: "cin a cempty \<Longrightarrow> P" by simp
 
-subsubsection \<open>@{text cinsert}\<close>
+subsubsection \<open>\<open>cinsert\<close>\<close>
 
 lemma countable_insert_iff: "countable (insert x A) \<longleftrightarrow> countable A"
 by (metis Diff_eq_empty_iff countable_empty countable_insert subset_insertI uncountable_minus_countable)
@@ -386,7 +386,7 @@
 lemma mk_disjoint_cinsert: "cin a A \<Longrightarrow> \<exists>B. A = cinsert a B \<and> \<not> cin a B"
   by (rule exI[where x = "cDiff A (csingle a)"]) blast
 
-subsubsection \<open>@{text cimage}\<close>
+subsubsection \<open>\<open>cimage\<close>\<close>
 
 lemma subset_cimage_iff: "csubset_eq B (cimage f A) \<longleftrightarrow> (\<exists>AA. csubset_eq AA A \<and> B = cimage f AA)"
 by transfer (metis countable_subset image_mono mem_Collect_eq subset_imageE) 
--- a/src/HOL/Library/DAList.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/DAList.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -17,7 +17,7 @@
   by (induct xs) auto
 
 
-subsection \<open>Type @{text "('key, 'value) alist" }\<close>
+subsection \<open>Type \<open>('key, 'value) alist\<close>\<close>
 
 typedef ('key, 'value) alist = "{xs :: ('key \<times> 'value) list. (distinct \<circ> map fst) xs}"
   morphisms impl_of Alist
--- a/src/HOL/Library/DAList_Multiset.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/DAList_Multiset.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -206,8 +206,8 @@
 lemma mset_eq [code]: "HOL.equal (m1::'a::equal multiset) m2 \<longleftrightarrow> m1 \<le># m2 \<and> m2 \<le># m1"
   by (metis equal_multiset_def subset_mset.eq_iff)
 
-text \<open>By default the code for @{text "<"} is @{prop"xs < ys \<longleftrightarrow> xs \<le> ys \<and> \<not> xs = ys"}.
-With equality implemented by @{text"\<le>"}, this leads to three calls of  @{text"\<le>"}.
+text \<open>By default the code for \<open><\<close> is @{prop"xs < ys \<longleftrightarrow> xs \<le> ys \<and> \<not> xs = ys"}.
+With equality implemented by \<open>\<le>\<close>, this leads to three calls of  \<open>\<le>\<close>.
 Here is a more efficient version:\<close>
 lemma mset_less[code]: "xs <# (ys :: 'a multiset) \<longleftrightarrow> xs \<le># ys \<and> \<not> ys \<le># xs"
   by (rule subset_mset.less_le_not_le)
--- a/src/HOL/Library/Debug.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Debug.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -36,7 +36,7 @@
 
 code_printing
   constant Debug.trace \<rightharpoonup> (Eval) "Output.tracing"
-| constant Debug.flush \<rightharpoonup> (Eval) "Output.tracing/ (@{make'_string} _)" -- \<open>note indirection via antiquotation\<close>
+| constant Debug.flush \<rightharpoonup> (Eval) "Output.tracing/ (@{make'_string} _)" \<comment> \<open>note indirection via antiquotation\<close>
 | constant Debug.timing \<rightharpoonup> (Eval) "Timing.timeap'_msg"
 
 code_reserved Eval Output Timing
--- a/src/HOL/Library/Extended_Real.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Extended_Real.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -14,7 +14,7 @@
 text \<open>
 
 This should be part of @{theory Extended_Nat} or @{theory Order_Continuity}, but then the
-AFP-entry @{text "Jinja_Thread"} fails, as it does overload certain named from @{theory Complex_Main}.
+AFP-entry \<open>Jinja_Thread\<close> fails, as it does overload certain named from @{theory Complex_Main}.
 
 \<close>
 
@@ -3607,7 +3607,7 @@
   shows "inverse -- x --> inverse x"
 proof (cases x)
   case (real r)
-  with `0 < x` have **: "(\<lambda>x. ereal (inverse x)) -- r --> ereal (inverse r)"
+  with \<open>0 < x\<close> have **: "(\<lambda>x. ereal (inverse x)) -- r --> ereal (inverse r)"
     by (auto intro!: tendsto_inverse)
   from real \<open>0 < x\<close> show ?thesis
     by (auto simp: at_ereal tendsto_compose_filtermap[symmetric] eventually_at_filter
--- a/src/HOL/Library/FSet.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/FSet.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -445,12 +445,12 @@
 
 subsection \<open>Additional lemmas\<close>
 
-subsubsection \<open>@{text fsingleton}\<close>
+subsubsection \<open>\<open>fsingleton\<close>\<close>
 
 lemmas fsingletonE = fsingletonD [elim_format]
 
 
-subsubsection \<open>@{text femepty}\<close>
+subsubsection \<open>\<open>femepty\<close>\<close>
 
 lemma fempty_ffilter[simp]: "ffilter (\<lambda>_. False) A = {||}"
 by transfer auto
@@ -460,7 +460,7 @@
   by simp
 
 
-subsubsection \<open>@{text fset}\<close>
+subsubsection \<open>\<open>fset\<close>\<close>
 
 lemmas fset_simps[simp] = bot_fset.rep_eq finsert.rep_eq
 
@@ -483,7 +483,7 @@
 lemmas minus_fset[simp] = minus_fset.rep_eq
 
 
-subsubsection \<open>@{text filter_fset}\<close>
+subsubsection \<open>\<open>filter_fset\<close>\<close>
 
 lemma subset_ffilter: 
   "ffilter P A |\<subseteq>| ffilter Q A = (\<forall> x. x |\<in>| A \<longrightarrow> P x \<longrightarrow> Q x)"
@@ -499,7 +499,7 @@
   unfolding less_fset_def by (auto simp add: subset_ffilter eq_ffilter)
 
 
-subsubsection \<open>@{text finsert}\<close>
+subsubsection \<open>\<open>finsert\<close>\<close>
 
 (* FIXME, transferred doesn't work here *)
 lemma set_finsert:
@@ -511,7 +511,7 @@
   by (rule_tac x = "A |-| {|a|}" in exI, blast)
 
 
-subsubsection \<open>@{text fimage}\<close>
+subsubsection \<open>\<open>fimage\<close>\<close>
 
 lemma subset_fimage_iff: "(B |\<subseteq>| f|`|A) = (\<exists> AA. AA |\<subseteq>| A \<and> B = f|`|AA)"
 by transfer (metis mem_Collect_eq rev_finite_subset subset_image_iff)
@@ -548,7 +548,7 @@
 end
 
 
-subsubsection \<open>@{text fcard}\<close>
+subsubsection \<open>\<open>fcard\<close>\<close>
 
 (* FIXME: improve transferred to handle bounded meta quantification *)
 
@@ -631,7 +631,7 @@
 by transfer (rule card_psubset)
 
 
-subsubsection \<open>@{text ffold}\<close>
+subsubsection \<open>\<open>ffold\<close>\<close>
 
 (* FIXME: improve transferred to handle bounded meta quantification *)
 
--- a/src/HOL/Library/FinFun.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/FinFun.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -17,7 +17,7 @@
 \<close>
 
 
-subsection \<open>The @{text "map_default"} operation\<close>
+subsection \<open>The \<open>map_default\<close> operation\<close>
 
 definition map_default :: "'b \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> 'a \<Rightarrow> 'b"
 where "map_default b f a \<equiv> case f a of None \<Rightarrow> b | Some b' \<Rightarrow> b'"
@@ -307,7 +307,7 @@
 
 quickcheck_generator finfun constructors: finfun_update_code, "finfun_const :: 'b \<Rightarrow> 'a \<Rightarrow>f 'b"
 
-subsection \<open>@{text "finfun_update"} as instance of @{text "comp_fun_commute"}\<close>
+subsection \<open>\<open>finfun_update\<close> as instance of \<open>comp_fun_commute\<close>\<close>
 
 interpretation finfun_update: comp_fun_commute "\<lambda>a f. f(a :: 'a $:= b')"
   including finfun
@@ -1525,7 +1525,7 @@
 instance by intro_classes (simp add: card_UNIV_finfun_def card_UNIV Let_def card_UNIV_finfun)
 end
 
-text \<open>Deactivate syntax again. Import theory @{text FinFun_Syntax} to reactivate it again\<close>
+text \<open>Deactivate syntax again. Import theory \<open>FinFun_Syntax\<close> to reactivate it again\<close>
 
 no_type_notation
   finfun ("(_ \<Rightarrow>f /_)" [22, 21] 21)
--- a/src/HOL/Library/Formal_Power_Series.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Formal_Power_Series.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,7 +5,7 @@
 section \<open>A formalization of formal power series\<close>
 
 theory Formal_Power_Series
-imports Complex_Main
+imports Complex_Main "~~/src/HOL/Number_Theory/Euclidean_Algorithm"
 begin
 
 
@@ -83,6 +83,9 @@
 lemma fps_mult_nth: "(f * g) $ n = (\<Sum>i=0..n. f$i * g$(n - i))"
   unfolding fps_times_def by simp
 
+lemma fps_mult_nth_0 [simp]: "(f * g) $ 0 = f $ 0 * g $ 0"
+  unfolding fps_times_def by simp
+
 declare atLeastAtMost_iff [presburger]
 declare Bex_def [presburger]
 declare Ball_def [presburger]
@@ -378,6 +381,12 @@
   "(- numeral k :: 'a :: ring_1 fps) = fps_const (- numeral k)"
   by (simp add: numeral_fps_const)
 
+lemma fps_numeral_nth: "numeral n $ i = (if i = 0 then numeral n else 0)"
+  by (simp add: numeral_fps_const)
+  
+lemma fps_numeral_nth_0 [simp]: "numeral n $ 0 = numeral n"
+  by (simp add: numeral_fps_const)
+
 
 subsection \<open>The eXtractor series X\<close>
 
@@ -423,6 +432,12 @@
     by (simp add: fps_eq_iff)
 qed
 
+lemma X_nth[simp]: "X$n = (if n = 1 then 1 else 0)"
+  by (simp add: X_def)
+
+lemma X_power_nth[simp]: "(X^k) $n = (if n = k then 1 else 0::'a::comm_ring_1)"
+  by (simp add: X_power_iff)
+
 lemma X_power_mult_nth: "(X^k * (f :: 'a::comm_ring_1 fps)) $n = (if n < k then 0 else f $ (n - k))"
   apply (induct k arbitrary: n)
   apply simp
@@ -436,6 +451,347 @@
   by (metis X_power_mult_nth mult.commute)
 
 
+lemma X_neq_fps_const [simp]: "(X :: 'a :: zero_neq_one fps) \<noteq> fps_const c"
+proof
+  assume "(X::'a fps) = fps_const (c::'a)"
+  hence "X$1 = (fps_const (c::'a))$1" by (simp only:)
+  thus False by auto
+qed
+
+lemma X_neq_zero [simp]: "(X :: 'a :: zero_neq_one fps) \<noteq> 0"
+  by (simp only: fps_const_0_eq_0[symmetric] X_neq_fps_const) simp
+
+lemma X_neq_one [simp]: "(X :: 'a :: zero_neq_one fps) \<noteq> 1"
+  by (simp only: fps_const_1_eq_1[symmetric] X_neq_fps_const) simp
+
+lemma X_neq_numeral [simp]: "(X :: 'a :: {semiring_1,zero_neq_one} fps) \<noteq> numeral c"
+  by (simp only: numeral_fps_const X_neq_fps_const) simp
+
+lemma X_pow_eq_X_pow_iff [simp]: 
+  "(X :: ('a :: {comm_ring_1}) fps) ^ m = X ^ n \<longleftrightarrow> m = n"
+proof
+  assume "(X :: 'a fps) ^ m = X ^ n"
+  hence "(X :: 'a fps) ^ m $ m = X ^ n $ m" by (simp only:)
+  thus "m = n" by (simp split: split_if_asm)
+qed simp_all
+  
+
+subsection {* Subdegrees *}  
+  
+definition subdegree :: "('a::zero) fps \<Rightarrow> nat" where
+  "subdegree f = (if f = 0 then 0 else LEAST n. f$n \<noteq> 0)"
+
+lemma subdegreeI:
+  assumes "f $ d \<noteq> 0" and "\<And>i. i < d \<Longrightarrow> f $ i = 0"
+  shows   "subdegree f = d"
+proof-
+  from assms(1) have "f \<noteq> 0" by auto
+  moreover from assms(1) have "(LEAST i. f $ i \<noteq> 0) = d"
+  proof (rule Least_equality)
+    fix e assume "f $ e \<noteq> 0"
+    with assms(2) have "\<not>(e < d)" by blast
+    thus "e \<ge> d" by simp
+  qed
+  ultimately show ?thesis unfolding subdegree_def by simp
+qed
+
+lemma nth_subdegree_nonzero [simp,intro]: "f \<noteq> 0 \<Longrightarrow> f $ subdegree f \<noteq> 0"
+proof-
+  assume "f \<noteq> 0"
+  hence "subdegree f = (LEAST n. f $ n \<noteq> 0)" by (simp add: subdegree_def)
+  also from \<open>f \<noteq> 0\<close> have "\<exists>n. f$n \<noteq> 0" using fps_nonzero_nth by blast
+  from LeastI_ex[OF this] have "f $ (LEAST n. f $ n \<noteq> 0) \<noteq> 0" .
+  finally show ?thesis .
+qed
+
+lemma nth_less_subdegree_zero [dest]: "n < subdegree f \<Longrightarrow> f $ n = 0"
+proof (cases "f = 0")
+  assume "f \<noteq> 0" and less: "n < subdegree f"
+  note less
+  also from \<open>f \<noteq> 0\<close> have "subdegree f = (LEAST n. f $ n \<noteq> 0)" by (simp add: subdegree_def)
+  finally show "f $ n = 0" using not_less_Least by blast
+qed simp_all
+  
+lemma subdegree_geI:
+  assumes "f \<noteq> 0" "\<And>i. i < n \<Longrightarrow> f$i = 0"
+  shows   "subdegree f \<ge> n"
+proof (rule ccontr)
+  assume "\<not>(subdegree f \<ge> n)"
+  with assms(2) have "f $ subdegree f = 0" by simp
+  moreover from assms(1) have "f $ subdegree f \<noteq> 0" by simp
+  ultimately show False by contradiction
+qed
+
+lemma subdegree_greaterI:
+  assumes "f \<noteq> 0" "\<And>i. i \<le> n \<Longrightarrow> f$i = 0"
+  shows   "subdegree f > n"
+proof (rule ccontr)
+  assume "\<not>(subdegree f > n)"
+  with assms(2) have "f $ subdegree f = 0" by simp
+  moreover from assms(1) have "f $ subdegree f \<noteq> 0" by simp
+  ultimately show False by contradiction
+qed
+
+lemma subdegree_leI:
+  "f $ n \<noteq> 0 \<Longrightarrow> subdegree f \<le> n"
+  by (rule leI) auto
+
+
+lemma subdegree_0 [simp]: "subdegree 0 = 0"
+  by (simp add: subdegree_def)
+
+lemma subdegree_1 [simp]: "subdegree (1 :: ('a :: zero_neq_one) fps) = 0"
+  by (auto intro!: subdegreeI)
+
+lemma subdegree_X [simp]: "subdegree (X :: ('a :: zero_neq_one) fps) = 1"
+  by (auto intro!: subdegreeI simp: X_def)
+
+lemma subdegree_fps_const [simp]: "subdegree (fps_const c) = 0"
+  by (cases "c = 0") (auto intro!: subdegreeI)
+
+lemma subdegree_numeral [simp]: "subdegree (numeral n) = 0"
+  by (simp add: numeral_fps_const)
+
+lemma subdegree_eq_0_iff: "subdegree f = 0 \<longleftrightarrow> f = 0 \<or> f $ 0 \<noteq> 0"
+proof (cases "f = 0")
+  assume "f \<noteq> 0"
+  thus ?thesis
+    using nth_subdegree_nonzero[OF \<open>f \<noteq> 0\<close>] by (fastforce intro!: subdegreeI)
+qed simp_all
+
+lemma subdegree_eq_0 [simp]: "f $ 0 \<noteq> 0 \<Longrightarrow> subdegree f = 0"
+  by (simp add: subdegree_eq_0_iff)
+
+lemma nth_subdegree_mult [simp]:
+  fixes f g :: "('a :: {mult_zero,comm_monoid_add}) fps"
+  shows "(f * g) $ (subdegree f + subdegree g) = f $ subdegree f * g $ subdegree g"
+proof-
+  let ?n = "subdegree f + subdegree g"
+  have "(f * g) $ ?n = (\<Sum>i=0..?n. f$i * g$(?n-i))"
+    by (simp add: fps_mult_nth)
+  also have "... = (\<Sum>i=0..?n. if i = subdegree f then f$i * g$(?n-i) else 0)"
+  proof (intro setsum.cong)
+    fix x assume x: "x \<in> {0..?n}"
+    hence "x = subdegree f \<or> x < subdegree f \<or> ?n - x < subdegree g" by auto
+    thus "f $ x * g $ (?n - x) = (if x = subdegree f then f $ x * g $ (?n - x) else 0)"
+      by (elim disjE conjE) auto
+  qed auto
+  also have "... = f $ subdegree f * g $ subdegree g" by (simp add: setsum.delta)
+  finally show ?thesis .
+qed
+
+lemma subdegree_mult [simp]:
+  assumes "f \<noteq> 0" "g \<noteq> 0"
+  shows "subdegree ((f :: ('a :: {ring_no_zero_divisors}) fps) * g) = subdegree f + subdegree g"
+proof (rule subdegreeI)
+  let ?n = "subdegree f + subdegree g"
+  have "(f * g) $ ?n = (\<Sum>i=0..?n. f$i * g$(?n-i))" by (simp add: fps_mult_nth)
+  also have "... = (\<Sum>i=0..?n. if i = subdegree f then f$i * g$(?n-i) else 0)"
+  proof (intro setsum.cong)
+    fix x assume x: "x \<in> {0..?n}"
+    hence "x = subdegree f \<or> x < subdegree f \<or> ?n - x < subdegree g" by auto
+    thus "f $ x * g $ (?n - x) = (if x = subdegree f then f $ x * g $ (?n - x) else 0)"
+      by (elim disjE conjE) auto
+  qed auto
+  also have "... = f $ subdegree f * g $ subdegree g" by (simp add: setsum.delta)
+  also from assms have "... \<noteq> 0" by auto
+  finally show "(f * g) $ (subdegree f + subdegree g) \<noteq> 0" .
+next
+  fix m assume m: "m < subdegree f + subdegree g"
+  have "(f * g) $ m = (\<Sum>i=0..m. f$i * g$(m-i))" by (simp add: fps_mult_nth) 
+  also have "... = (\<Sum>i=0..m. 0)"
+  proof (rule setsum.cong)
+    fix i assume "i \<in> {0..m}"
+    with m have "i < subdegree f \<or> m - i < subdegree g" by auto
+    thus "f$i * g$(m-i) = 0" by (elim disjE) auto
+  qed auto
+  finally show "(f * g) $ m = 0" by simp
+qed
+
+lemma subdegree_power [simp]:
+  "subdegree ((f :: ('a :: ring_1_no_zero_divisors) fps) ^ n) = n * subdegree f"
+  by (cases "f = 0"; induction n) simp_all
+
+lemma subdegree_uminus [simp]:
+  "subdegree (-(f::('a::group_add) fps)) = subdegree f"
+  by (simp add: subdegree_def)
+
+lemma subdegree_minus_commute [simp]:
+  "subdegree (f-(g::('a::group_add) fps)) = subdegree (g - f)"
+proof -
+  have "f - g = -(g - f)" by simp
+  also have "subdegree ... = subdegree (g - f)" by (simp only: subdegree_uminus)
+  finally show ?thesis .
+qed
+
+lemma subdegree_add_ge:
+  assumes "f \<noteq> -(g :: ('a :: {group_add}) fps)"
+  shows   "subdegree (f + g) \<ge> min (subdegree f) (subdegree g)"
+proof (rule subdegree_geI)
+  from assms show "f + g \<noteq> 0" by (subst (asm) eq_neg_iff_add_eq_0)
+next
+  fix i assume "i < min (subdegree f) (subdegree g)"
+  hence "f $ i = 0" and "g $ i = 0" by auto
+  thus "(f + g) $ i = 0" by force
+qed
+
+lemma subdegree_add_eq1:
+  assumes "f \<noteq> 0"
+  assumes "subdegree f < subdegree (g :: ('a :: {group_add}) fps)"
+  shows   "subdegree (f + g) = subdegree f"
+proof (rule antisym[OF subdegree_leI])
+  from assms show "subdegree (f + g) \<ge> subdegree f"
+    by (intro order.trans[OF min.boundedI subdegree_add_ge]) auto
+  from assms have "f $ subdegree f \<noteq> 0" "g $ subdegree f = 0" by auto
+  thus "(f + g) $ subdegree f \<noteq> 0" by simp
+qed
+
+lemma subdegree_add_eq2:
+  assumes "g \<noteq> 0"
+  assumes "subdegree g < subdegree (f :: ('a :: {ab_group_add}) fps)"
+  shows   "subdegree (f + g) = subdegree g"
+  using subdegree_add_eq1[OF assms] by (simp add: add.commute)
+
+lemma subdegree_diff_eq1:
+  assumes "f \<noteq> 0"
+  assumes "subdegree f < subdegree (g :: ('a :: {ab_group_add}) fps)"
+  shows   "subdegree (f - g) = subdegree f"
+  using subdegree_add_eq1[of f "-g"] assms by (simp add: add.commute)
+
+lemma subdegree_diff_eq2:
+  assumes "g \<noteq> 0"
+  assumes "subdegree g < subdegree (f :: ('a :: {ab_group_add}) fps)"
+  shows   "subdegree (f - g) = subdegree g"
+  using subdegree_add_eq2[of "-g" f] assms by (simp add: add.commute)
+
+lemma subdegree_diff_ge [simp]:
+  assumes "f \<noteq> (g :: ('a :: {group_add}) fps)"
+  shows   "subdegree (f - g) \<ge> min (subdegree f) (subdegree g)"
+  using assms subdegree_add_ge[of f "-g"] by simp
+
+
+
+
+subsection \<open>Shifting and slicing\<close>
+
+definition fps_shift :: "nat \<Rightarrow> 'a fps \<Rightarrow> 'a fps" where
+  "fps_shift n f = Abs_fps (\<lambda>i. f $ (i + n))"
+
+lemma fps_shift_nth [simp]: "fps_shift n f $ i = f $ (i + n)"
+  by (simp add: fps_shift_def)
+
+lemma fps_shift_0 [simp]: "fps_shift 0 f = f"
+  by (intro fps_ext) (simp add: fps_shift_def)
+
+lemma fps_shift_zero [simp]: "fps_shift n 0 = 0"
+  by (intro fps_ext) (simp add: fps_shift_def)
+
+lemma fps_shift_one: "fps_shift n 1 = (if n = 0 then 1 else 0)"
+  by (intro fps_ext) (simp add: fps_shift_def)
+
+lemma fps_shift_fps_const: "fps_shift n (fps_const c) = (if n = 0 then fps_const c else 0)"
+  by (intro fps_ext) (simp add: fps_shift_def)
+
+lemma fps_shift_numeral: "fps_shift n (numeral c) = (if n = 0 then numeral c else 0)"
+  by (simp add: numeral_fps_const fps_shift_fps_const)
+
+lemma fps_shift_X_power [simp]: 
+  "n \<le> m \<Longrightarrow> fps_shift n (X ^ m) = (X ^ (m - n) ::'a::comm_ring_1 fps)"
+  by (intro fps_ext) (auto simp: fps_shift_def ) 
+
+lemma fps_shift_times_X_power:
+  "n \<le> subdegree f \<Longrightarrow> fps_shift n f * X ^ n = (f :: 'a :: comm_ring_1 fps)"
+  by (intro fps_ext) (auto simp: X_power_mult_right_nth nth_less_subdegree_zero)
+
+lemma fps_shift_times_X_power' [simp]:
+  "fps_shift n (f * X^n) = (f :: 'a :: comm_ring_1 fps)"
+  by (intro fps_ext) (auto simp: X_power_mult_right_nth nth_less_subdegree_zero)
+
+lemma fps_shift_times_X_power'':
+  "m \<le> n \<Longrightarrow> fps_shift n (f * X^m) = fps_shift (n - m) (f :: 'a :: comm_ring_1 fps)"
+  by (intro fps_ext) (auto simp: X_power_mult_right_nth nth_less_subdegree_zero)
+
+lemma fps_shift_subdegree [simp]: 
+  "n \<le> subdegree f \<Longrightarrow> subdegree (fps_shift n f) = subdegree (f :: 'a :: comm_ring_1 fps) - n"
+  by (cases "f = 0") (force intro: nth_less_subdegree_zero subdegreeI)+
+
+lemma subdegree_decompose:
+  "f = fps_shift (subdegree f) f * X ^ subdegree (f :: ('a :: comm_ring_1) fps)"
+  by (rule fps_ext) (auto simp: X_power_mult_right_nth)
+
+lemma subdegree_decompose':
+  "n \<le> subdegree (f :: ('a :: comm_ring_1) fps) \<Longrightarrow> f = fps_shift n f * X^n"
+  by (rule fps_ext) (auto simp: X_power_mult_right_nth intro!: nth_less_subdegree_zero)
+
+lemma fps_shift_fps_shift:
+  "fps_shift (m + n) f = fps_shift m (fps_shift n f)"
+  by (rule fps_ext) (simp add: add_ac)
+  
+lemma fps_shift_add:
+  "fps_shift n (f + g) = fps_shift n f + fps_shift n g"
+  by (simp add: fps_eq_iff)
+  
+lemma fps_shift_mult:
+  assumes "n \<le> subdegree (g :: 'b :: {comm_ring_1} fps)"
+  shows   "fps_shift n (h*g) = h * fps_shift n g"
+proof -
+  from assms have "g = fps_shift n g * X^n" by (rule subdegree_decompose')
+  also have "h * ... = (h * fps_shift n g) * X^n" by simp
+  also have "fps_shift n ... = h * fps_shift n g" by simp
+  finally show ?thesis .
+qed
+
+lemma fps_shift_mult_right:
+  assumes "n \<le> subdegree (g :: 'b :: {comm_ring_1} fps)"
+  shows   "fps_shift n (g*h) = h * fps_shift n g"
+  by (subst mult.commute, subst fps_shift_mult) (simp_all add: assms)
+
+lemma nth_subdegree_zero_iff [simp]: "f $ subdegree f = 0 \<longleftrightarrow> f = 0"
+  by (cases "f = 0") auto
+
+lemma fps_shift_subdegree_zero_iff [simp]:
+  "fps_shift (subdegree f) f = 0 \<longleftrightarrow> f = 0"
+  by (subst (1) nth_subdegree_zero_iff[symmetric], cases "f = 0")
+     (simp_all del: nth_subdegree_zero_iff)
+
+
+definition "fps_cutoff n f = Abs_fps (\<lambda>i. if i < n then f$i else 0)"
+
+lemma fps_cutoff_nth [simp]: "fps_cutoff n f $ i = (if i < n then f$i else 0)"
+  unfolding fps_cutoff_def by simp
+
+lemma fps_cutoff_zero_iff: "fps_cutoff n f = 0 \<longleftrightarrow> (f = 0 \<or> n \<le> subdegree f)"
+proof
+  assume A: "fps_cutoff n f = 0"
+  thus "f = 0 \<or> n \<le> subdegree f"
+  proof (cases "f = 0")
+    assume "f \<noteq> 0"
+    with A have "n \<le> subdegree f"
+      by (intro subdegree_geI) (auto simp: fps_eq_iff split: split_if_asm)
+    thus ?thesis ..
+  qed simp
+qed (auto simp: fps_eq_iff intro: nth_less_subdegree_zero)
+
+lemma fps_cutoff_0 [simp]: "fps_cutoff 0 f = 0"
+  by (simp add: fps_eq_iff)
+  
+lemma fps_cutoff_zero [simp]: "fps_cutoff n 0 = 0"
+  by (simp add: fps_eq_iff)
+
+lemma fps_cutoff_one: "fps_cutoff n 1 = (if n = 0 then 0 else 1)"
+  by (simp add: fps_eq_iff)
+
+lemma fps_cutoff_fps_const: "fps_cutoff n (fps_const c) = (if n = 0 then 0 else fps_const c)"
+  by (simp add: fps_eq_iff)  
+
+lemma fps_cutoff_numeral: "fps_cutoff n (numeral c) = (if n = 0 then 0 else numeral c)"
+  by (simp add: numeral_fps_const fps_cutoff_fps_const)
+
+lemma fps_shift_cutoff: 
+  "fps_shift n (f :: ('a :: comm_ring_1) fps) * X^n + fps_cutoff n f = f"
+  by (simp add: fps_eq_iff X_power_mult_right_nth)
+
+
 subsection \<open>Formal Power series form a metric space\<close>
 
 definition (in dist) "ball x r = {y. dist y x < r}"
@@ -444,18 +800,13 @@
 begin
 
 definition
-  dist_fps_def: "dist (a :: 'a fps) b =
-    (if (\<exists>n. a$n \<noteq> b$n) then inverse (2 ^ (LEAST n. a$n \<noteq> b$n)) else 0)"
+  dist_fps_def: "dist (a :: 'a fps) b = (if a = b then 0 else inverse (2 ^ subdegree (a - b)))"
 
 lemma dist_fps_ge0: "dist (a :: 'a fps) b \<ge> 0"
   by (simp add: dist_fps_def)
 
 lemma dist_fps_sym: "dist (a :: 'a fps) b = dist b a"
-  apply (auto simp add: dist_fps_def)
-  apply (rule cong[OF refl, where x="(\<lambda>n. a $ n \<noteq> b $ n)"])
-  apply (rule ext)
-  apply auto
-  done
+  by (simp add: dist_fps_def)
 
 instance ..
 
@@ -466,70 +817,47 @@
 
 definition open_fps_def: "open (S :: 'a fps set) = (\<forall>a \<in> S. \<exists>r. r >0 \<and> ball a r \<subseteq> S)"
 
+
 instance
 proof
   show "open S = (\<forall>x\<in>S. \<exists>e>0. \<forall>y. dist y x < e \<longrightarrow> y \<in> S)" for S :: "'a fps set"
     by (auto simp add: open_fps_def ball_def subset_eq)
   show th: "dist a b = 0 \<longleftrightarrow> a = b" for a b :: "'a fps"
-  proof
-    assume "a = b"
-    then have "\<not> (\<exists>n. a $ n \<noteq> b $ n)" by simp
-    then show "dist a b = 0" by (simp add: dist_fps_def)
-  next
-    assume d: "dist a b = 0"
-    then have "\<forall>n. a$n = b$n"
-      by - (rule ccontr, simp add: dist_fps_def)
-    then show "a = b" by (simp add: fps_eq_iff)
-  qed
-  then have th'[simp]: "dist a a = 0" for a :: "'a fps"
-    by simp
+    by (simp add: dist_fps_def split: split_if_asm)
+  then have th'[simp]: "dist a a = 0" for a :: "'a fps" by simp
 
   fix a b c :: "'a fps"
   consider "a = b" | "c = a \<or> c = b" | "a \<noteq> b" "a \<noteq> c" "b \<noteq> c" by blast
   then show "dist a b \<le> dist a c + dist b c"
   proof cases
     case 1
-    then have "dist a b = 0" unfolding th .
-    then show ?thesis
-      using dist_fps_ge0 [of a c] dist_fps_ge0 [of b c] by simp
+    then show ?thesis by (simp add: dist_fps_def)
   next
     case 2
     then show ?thesis
       by (cases "c = a") (simp_all add: th dist_fps_sym)
   next
     case neq: 3
-    def n \<equiv> "\<lambda>a b::'a fps. LEAST n. a$n \<noteq> b$n"
-    then have n': "\<And>m a b. m < n a b \<Longrightarrow> a$m = b$m"
-      by (auto dest: not_less_Least)
-    from neq have dab: "dist a b = inverse (2 ^ n a b)"
-      and dac: "dist a c = inverse (2 ^ n a c)"
-      and dbc: "dist b c = inverse (2 ^ n b c)"
-      by (simp_all add: dist_fps_def n_def fps_eq_iff)
-    from neq have nz: "dist a b \<noteq> 0" "dist a c \<noteq> 0" "dist b c \<noteq> 0"
-      unfolding th by simp_all
-    from nz have pos: "dist a b > 0" "dist a c > 0" "dist b c > 0"
-      using dist_fps_ge0[of a b] dist_fps_ge0[of a c] dist_fps_ge0[of b c]
-      by auto
-    have th1: "\<And>n. (2::real)^n > 0" by auto
     have False if "dist a b > dist a c + dist b c"
     proof -
-      from that have gt: "dist a b > dist a c" "dist a b > dist b c"
-        using pos by auto
-      from gt have gtn: "n a b < n b c" "n a b < n a c"
-        unfolding dab dbc dac by (auto simp add: th1)
-      from n'[OF gtn(2)] n'(1)[OF gtn(1)]
-      have "a $ n a b = b $ n a b" by simp
-      moreover have "a $ n a b \<noteq> b $ n a b"
-         unfolding n_def by (rule LeastI_ex) (insert \<open>a \<noteq> b\<close>, simp add: fps_eq_iff)
-      ultimately show ?thesis by contradiction
+      let ?n = "subdegree (a - b)"
+      from neq have "dist a b > 0" "dist b c > 0" and "dist a c > 0" by (simp_all add: dist_fps_def)
+      with that have "dist a b > dist a c" and "dist a b > dist b c" by simp_all
+      with neq have "?n < subdegree (a - c)" and "?n < subdegree (b - c)"  
+        by (simp_all add: dist_fps_def field_simps)
+      hence "(a - c) $ ?n = 0" and "(b - c) $ ?n = 0" 
+        by (simp_all only: nth_less_subdegree_zero)
+      hence "(a - b) $ ?n = 0" by simp
+      moreover from neq have "(a - b) $ ?n \<noteq> 0" by (intro nth_subdegree_nonzero) simp_all
+      ultimately show False by contradiction
     qed
-    then show ?thesis
-      by (auto simp add: not_le[symmetric])
+    thus ?thesis by (auto simp add: not_le[symmetric])
   qed
 qed
 
 end
 
+
 text \<open>The infinite sums and justification of the notation in textbooks.\<close>
 
 lemma reals_power_lt_ex:
@@ -564,12 +892,6 @@
     using kp by blast
 qed
 
-lemma X_nth[simp]: "X$n = (if n = 1 then 1 else 0)"
-  by (simp add: X_def)
-
-lemma X_power_nth[simp]: "(X^k) $n = (if n = k then 1 else 0::'a::comm_ring_1)"
-  by (simp add: X_power_iff)
-
 lemma fps_sum_rep_nth: "(setsum (\<lambda>i. fps_const(a$i)*X^i) {0..m})$n =
     (if n \<le> m then a$n else 0::'a::comm_ring_1)"
   apply (auto simp add: fps_setsum_nth cond_value_iff cong del: if_weak_cong)
@@ -597,14 +919,12 @@
             using \<open>r > 0\<close> by (simp del: dist_eq_0_iff)
         next
           case False
-          def k \<equiv> "LEAST i. ?s n $ i \<noteq> a $ i"
-          from False have dth: "dist (?s n) a = (1/2)^k"
-            by (auto simp add: dist_fps_def inverse_eq_divide power_divide k_def fps_eq_iff)
-          from False have kn: "k > n"
-            by (auto simp: fps_sum_rep_nth not_le k_def fps_eq_iff
-              split: split_if_asm intro: LeastI2_ex)
-          then have "dist (?s n) a < (1/2)^n"
-            unfolding dth by (simp add: divide_simps)
+          from False have dth: "dist (?s n) a = (1/2)^subdegree (?s n - a)"
+            by (simp add: dist_fps_def field_simps)
+          from False have kn: "subdegree (?s n - a) > n"
+            by (intro subdegree_greaterI) (simp_all add: fps_sum_rep_nth)              
+          then have "dist (?s n) a < (1/2)^n" 
+            by (simp add: field_simps dist_fps_def)
           also have "\<dots> \<le> (1/2)^n0"
             using nn0 by (simp add: divide_simps)
           also have "\<dots> < r"
@@ -634,7 +954,10 @@
 
 definition fps_inverse_def: "inverse f = (if f $ 0 = 0 then 0 else Abs_fps (natfun_inverse f))"
 
-definition fps_divide_def: "divide = (\<lambda>(f::'a fps) g. f * inverse g)"
+definition fps_divide_def:
+  "f div g = (if g = 0 then 0 else 
+     let n = subdegree g; h = fps_shift n g
+     in  fps_shift n (f * inverse h))"
 
 instance ..
 
@@ -686,21 +1009,18 @@
 
 lemma fps_inverse_0_iff[simp]: "(inverse f) $ 0 = (0::'a::division_ring) \<longleftrightarrow> f $ 0 = 0"
   by (simp add: fps_inverse_def nonzero_imp_inverse_nonzero)
-
-lemma fps_inverse_eq_0_iff[simp]: "inverse f = (0:: ('a::field) fps) \<longleftrightarrow> f $ 0 = 0"
-  (is "?lhs \<longleftrightarrow> ?rhs")
+  
+lemma fps_inverse_nth_0 [simp]: "inverse f $ 0 = inverse (f $ 0 :: 'a :: division_ring)"
+  by (simp add: fps_inverse_def)
+
+lemma fps_inverse_eq_0_iff[simp]: "inverse f = (0:: ('a::division_ring) fps) \<longleftrightarrow> f $ 0 = 0"
 proof
-  show ?lhs if ?rhs
-    using that by (simp add: fps_inverse_def)
-  show ?rhs if h: ?lhs
-  proof (rule ccontr)
-    assume c: "f $0 \<noteq> 0"
-    from inverse_mult_eq_1[OF c] h show False
-      by simp
-  qed
-qed
-
-lemma fps_inverse_idempotent[intro]:
+  assume A: "inverse f = 0"
+  have "0 = inverse f $ 0" by (subst A) simp
+  thus "f $ 0 = 0" by simp
+qed (simp add: fps_inverse_def)
+
+lemma fps_inverse_idempotent[intro, simp]:
   assumes f0: "f$0 \<noteq> (0::'a::field)"
   shows "inverse (inverse f) = f"
 proof -
@@ -713,11 +1033,17 @@
 qed
 
 lemma fps_inverse_unique:
-  assumes f0: "f$0 \<noteq> (0::'a::field)"
-    and fg: "f*g = 1"
-  shows "inverse f = g"
+  assumes fg: "(f :: 'a :: field fps) * g = 1"
+  shows   "inverse f = g"
 proof -
-  from inverse_mult_eq_1[OF f0] fg
+  have f0: "f $ 0 \<noteq> 0"
+  proof
+    assume "f $ 0 = 0"
+    hence "0 = (f * g) $ 0" by simp
+    also from fg have "(f * g) $ 0 = 1" by simp
+    finally show False by simp
+  qed
+  from inverse_mult_eq_1[OF this] fg
   have th0: "inverse f * f = g * f"
     by (simp add: ac_simps)
   then show ?thesis
@@ -755,12 +1081,399 @@
     done
 qed
 
+lemma fps_inverse_mult: "inverse (f * g :: 'a::field fps) = inverse f * inverse g"
+proof (cases "f$0 = 0 \<or> g$0 = 0")
+  assume "\<not>(f$0 = 0 \<or> g$0 = 0)"
+  hence [simp]: "f$0 \<noteq> 0" "g$0 \<noteq> 0" by simp_all
+  show ?thesis
+  proof (rule fps_inverse_unique)
+    have "f * g * (inverse f * inverse g) = (inverse f * f) * (inverse g * g)" by simp
+    also have "... = 1" by (subst (1 2) inverse_mult_eq_1) simp_all
+    finally show "f * g * (inverse f * inverse g) = 1" .
+  qed
+next
+  assume A: "f$0 = 0 \<or> g$0 = 0"
+  hence "inverse (f * g) = 0" by simp
+  also from A have "... = inverse f * inverse g" by auto
+  finally show "inverse (f * g) = inverse f * inverse g" .
+qed
+  
+
 lemma fps_inverse_gp: "inverse (Abs_fps(\<lambda>n. (1::'a::field))) =
     Abs_fps (\<lambda>n. if n= 0 then 1 else if n=1 then - 1 else 0)"
   apply (rule fps_inverse_unique)
   apply (simp_all add: fps_eq_iff fps_mult_nth setsum_zero_lemma)
   done
 
+lemma subdegree_inverse [simp]: "subdegree (inverse (f::'a::field fps)) = 0"
+proof (cases "f$0 = 0")
+  assume nz: "f$0 \<noteq> 0"
+  hence "subdegree (inverse f) + subdegree f = subdegree (inverse f * f)"
+    by (subst subdegree_mult) auto
+  also from nz have "subdegree f = 0" by (simp add: subdegree_eq_0_iff)
+  also from nz have "inverse f * f = 1" by (rule inverse_mult_eq_1)
+  finally show "subdegree (inverse f) = 0" by simp
+qed (simp_all add: fps_inverse_def)
+
+lemma fps_is_unit_iff [simp]: "(f :: 'a :: field fps) dvd 1 \<longleftrightarrow> f $ 0 \<noteq> 0"
+proof
+  assume "f dvd 1"
+  then obtain g where "1 = f * g" by (elim dvdE)
+  from this[symmetric] have "(f*g) $ 0 = 1" by simp
+  thus "f $ 0 \<noteq> 0" by auto
+next
+  assume A: "f $ 0 \<noteq> 0"
+  thus "f dvd 1" by (simp add: inverse_mult_eq_1[OF A, symmetric])
+qed
+
+lemma subdegree_eq_0' [simp]: "(f :: 'a :: field fps) dvd 1 \<Longrightarrow> subdegree f = 0"
+  by simp
+
+lemma fps_unit_dvd [simp]: "(f $ 0 :: 'a :: field) \<noteq> 0 \<Longrightarrow> f dvd g"
+  by (rule dvd_trans, subst fps_is_unit_iff) simp_all
+
+
+
+instantiation fps :: (field) ring_div
+begin
+
+definition fps_mod_def:
+  "f mod g = (if g = 0 then f else
+     let n = subdegree g; h = fps_shift n g 
+     in  fps_cutoff n (f * inverse h) * h)"
+
+lemma fps_mod_eq_zero: 
+  assumes "g \<noteq> 0" and "subdegree f \<ge> subdegree g"
+  shows   "f mod g = 0"
+  using assms by (cases "f = 0") (auto simp: fps_cutoff_zero_iff fps_mod_def Let_def)
+
+lemma fps_times_divide_eq: 
+  assumes "g \<noteq> 0" and "subdegree f \<ge> subdegree (g :: 'a fps)"
+  shows   "f div g * g = f"
+proof (cases "f = 0")
+  assume nz: "f \<noteq> 0"
+  def n \<equiv> "subdegree g"
+  def h \<equiv> "fps_shift n g"
+  from assms have [simp]: "h $ 0 \<noteq> 0" unfolding h_def by (simp add: n_def)
+  
+  from assms nz have "f div g * g = fps_shift n (f * inverse h) * g"
+    by (simp add: fps_divide_def Let_def h_def n_def)
+  also have "... = fps_shift n (f * inverse h) * X^n * h" unfolding h_def n_def
+    by (subst subdegree_decompose[of g]) simp
+  also have "fps_shift n (f * inverse h) * X^n = f * inverse h"
+    by (rule fps_shift_times_X_power) (simp_all add: nz assms n_def)
+  also have "... * h = f * (inverse h * h)" by simp
+  also have "inverse h * h = 1" by (rule inverse_mult_eq_1) simp
+  finally show ?thesis by simp
+qed (simp_all add: fps_divide_def Let_def)
+
+lemma 
+  assumes "g$0 \<noteq> 0"
+  shows   fps_divide_unit: "f div g = f * inverse g" and fps_mod_unit [simp]: "f mod g = 0"
+proof -
+  from assms have [simp]: "subdegree g = 0" by (simp add: subdegree_eq_0_iff)
+  from assms show "f div g = f * inverse g" 
+    by (auto simp: fps_divide_def Let_def subdegree_eq_0_iff)
+  from assms show "f mod g = 0" by (intro fps_mod_eq_zero) auto
+qed
+
+context
+begin
+private lemma fps_divide_cancel_aux1:
+  assumes "h$0 \<noteq> (0 :: 'a :: field)"
+  shows   "(h * f) div (h * g) = f div g"
+proof (cases "g = 0")
+  assume "g \<noteq> 0"
+  from assms have "h \<noteq> 0" by auto
+  note nz [simp] = \<open>g \<noteq> 0\<close> \<open>h \<noteq> 0\<close>
+  from assms have [simp]: "subdegree h = 0" by (simp add: subdegree_eq_0_iff)
+  
+  have "(h * f) div (h * g) = 
+          fps_shift (subdegree g) (h * f * inverse (fps_shift (subdegree g) (h*g)))"
+    by (simp add: fps_divide_def Let_def)
+  also have "h * f * inverse (fps_shift (subdegree g) (h*g)) = 
+               (inverse h * h) * f * inverse (fps_shift (subdegree g) g)"
+    by (subst fps_shift_mult) (simp_all add: algebra_simps fps_inverse_mult)
+  also from assms have "inverse h * h = 1" by (rule inverse_mult_eq_1)
+  finally show "(h * f) div (h * g) = f div g" by (simp_all add: fps_divide_def Let_def)
+qed (simp_all add: fps_divide_def)
+
+private lemma fps_divide_cancel_aux2:
+  "(f * X^m) div (g * X^m) = f div (g :: 'a :: field fps)"
+proof (cases "g = 0")
+  assume [simp]: "g \<noteq> 0"
+  have "(f * X^m) div (g * X^m) = 
+          fps_shift (subdegree g + m) (f*inverse (fps_shift (subdegree g + m) (g*X^m))*X^m)"
+    by (simp add: fps_divide_def Let_def algebra_simps)
+  also have "... = f div g"
+    by (simp add: fps_shift_times_X_power'' fps_divide_def Let_def)
+  finally show ?thesis .
+qed (simp_all add: fps_divide_def)
+
+instance proof
+  fix f g :: "'a fps"
+  def n \<equiv> "subdegree g"
+  def h \<equiv> "fps_shift n g"
+  
+  show "f div g * g + f mod g = f"
+  proof (cases "g = 0 \<or> f = 0")
+    assume "\<not>(g = 0 \<or> f = 0)"
+    hence nz [simp]: "f \<noteq> 0" "g \<noteq> 0" by simp_all
+    show ?thesis
+    proof (rule disjE[OF le_less_linear])
+      assume "subdegree f \<ge> subdegree g"
+      with nz show ?thesis by (simp add: fps_mod_eq_zero fps_times_divide_eq)
+    next
+      assume "subdegree f < subdegree g"
+      have g_decomp: "g = h * X^n" unfolding h_def n_def by (rule subdegree_decompose)
+      have "f div g * g + f mod g = 
+              fps_shift n (f * inverse h) * g + fps_cutoff n (f * inverse h) * h" 
+        by (simp add: fps_mod_def fps_divide_def Let_def n_def h_def)
+      also have "... = h * (fps_shift n (f * inverse h) * X^n + fps_cutoff n (f * inverse h))"
+        by (subst g_decomp) (simp add: algebra_simps)
+      also have "... = f * (inverse h * h)"
+        by (subst fps_shift_cutoff) simp
+      also have "inverse h * h = 1" by (rule inverse_mult_eq_1) (simp add: h_def n_def)
+      finally show ?thesis by simp
+    qed
+  qed (auto simp: fps_mod_def fps_divide_def Let_def)
+next
+
+  fix f g h :: "'a fps"
+  assume "h \<noteq> 0"
+  show "(h * f) div (h * g) = f div g"
+  proof -
+    def m \<equiv> "subdegree h"
+    def h' \<equiv> "fps_shift m h"
+    have h_decomp: "h = h' * X ^ m" unfolding h'_def m_def by (rule subdegree_decompose)
+    from \<open>h \<noteq> 0\<close> have [simp]: "h'$0 \<noteq> 0" by (simp add: h'_def m_def)
+    have "(h * f) div (h * g) = (h' * f * X^m) div (h' * g * X^m)"
+      by (simp add: h_decomp algebra_simps)
+    also have "... = f div g" by (simp add: fps_divide_cancel_aux1 fps_divide_cancel_aux2)
+    finally show ?thesis .
+  qed
+
+next
+  fix f g h :: "'a fps"
+  assume [simp]: "h \<noteq> 0"
+  def n \<equiv> "subdegree h"
+  def h' \<equiv> "fps_shift n h"
+  note dfs = n_def h'_def
+  have "(f + g * h) div h = fps_shift n (f * inverse h') + fps_shift n (g * (h * inverse h'))"
+    by (simp add: fps_divide_def Let_def dfs[symmetric] algebra_simps fps_shift_add)
+  also have "h * inverse h' = (inverse h' * h') * X^n"
+    by (subst subdegree_decompose) (simp_all add: dfs)
+  also have "... = X^n" by (subst inverse_mult_eq_1) (simp_all add: dfs)
+  also have "fps_shift n (g * X^n) = g" by simp
+  also have "fps_shift n (f * inverse h') = f div h" 
+    by (simp add: fps_divide_def Let_def dfs)
+  finally show "(f + g * h) div h = g + f div h" by simp
+qed (auto simp: fps_divide_def fps_mod_def Let_def)
+
+end
+end
+
+lemma subdegree_mod:
+  assumes "f \<noteq> 0" "subdegree f < subdegree g"
+  shows   "subdegree (f mod g) = subdegree f"
+proof (cases "f div g * g = 0")
+  assume "f div g * g \<noteq> 0"
+  hence [simp]: "f div g \<noteq> 0" "g \<noteq> 0" by auto
+  from mod_div_equality[of f g] have "f mod g = f - f div g * g" by (simp add: algebra_simps)
+  also from assms have "subdegree ... = subdegree f"
+    by (intro subdegree_diff_eq1) simp_all
+  finally show ?thesis .
+next
+  assume zero: "f div g * g = 0"
+  from mod_div_equality[of f g] have "f mod g = f - f div g * g" by (simp add: algebra_simps)
+  also note zero
+  finally show ?thesis by simp
+qed
+
+lemma fps_divide_nth_0 [simp]: "g $ 0 \<noteq> 0 \<Longrightarrow> (f div g) $ 0 = f $ 0 / (g $ 0 :: _ :: field)"
+  by (simp add: fps_divide_unit divide_inverse)
+
+
+lemma dvd_imp_subdegree_le: 
+  "(f :: 'a :: idom fps) dvd g \<Longrightarrow> g \<noteq> 0 \<Longrightarrow> subdegree f \<le> subdegree g"
+  by (auto elim: dvdE)
+
+lemma fps_dvd_iff: 
+  assumes "(f :: 'a :: field fps) \<noteq> 0" "g \<noteq> 0"
+  shows   "f dvd g \<longleftrightarrow> subdegree f \<le> subdegree g"
+proof
+  assume "subdegree f \<le> subdegree g"
+  with assms have "g mod f = 0" 
+    by (simp add: fps_mod_def Let_def fps_cutoff_zero_iff)
+  thus "f dvd g" by (simp add: dvd_eq_mod_eq_0)
+qed (simp add: assms dvd_imp_subdegree_le)
+
+lemma fps_const_inverse: "inverse (fps_const (a::'a::field)) = fps_const (inverse a)"
+  by (cases "a \<noteq> 0", rule fps_inverse_unique) (auto simp: fps_eq_iff)
+
+lemma fps_const_divide: "fps_const (x :: _ :: field) / fps_const y = fps_const (x / y)"
+  by (cases "y = 0") (simp_all add: fps_divide_unit fps_const_inverse divide_inverse)
+
+lemma inverse_fps_numeral: 
+  "inverse (numeral n :: ('a :: field_char_0) fps) = fps_const (inverse (numeral n))"
+  by (intro fps_inverse_unique fps_ext) (simp_all add: fps_numeral_nth)
+
+
+
+
+instantiation fps :: (field) normalization_semidom
+begin
+
+definition fps_unit_factor_def [simp]: 
+  "unit_factor f = fps_shift (subdegree f) f"
+
+definition fps_normalize_def [simp]:
+  "normalize f = (if f = 0 then 0 else X ^ subdegree f)"
+
+instance proof
+  fix f :: "'a fps"
+  show "unit_factor f * normalize f = f"
+    by (simp add: fps_shift_times_X_power)
+next
+  fix f g :: "'a fps"
+  show "unit_factor (f * g) = unit_factor f * unit_factor g"
+  proof (cases "f = 0 \<or> g = 0")
+    assume "\<not>(f = 0 \<or> g = 0)"
+    thus "unit_factor (f * g) = unit_factor f * unit_factor g"
+    unfolding fps_unit_factor_def
+      by (auto simp: fps_shift_fps_shift fps_shift_mult fps_shift_mult_right)
+  qed auto
+qed auto
+
+end
+
+instance fps :: (field) algebraic_semidom ..
+
+
+subsection \<open>Formal power series form a Euclidean ring\<close>
+
+instantiation fps :: (field) euclidean_ring
+begin
+
+definition fps_euclidean_size_def: 
+  "euclidean_size f = (if f = 0 then 0 else Suc (subdegree f))"
+
+instance proof
+  fix f g :: "'a fps" assume [simp]: "g \<noteq> 0"
+  show "euclidean_size f \<le> euclidean_size (f * g)"
+    by (cases "f = 0") (auto simp: fps_euclidean_size_def)
+  show "euclidean_size (f mod g) < euclidean_size g"
+    apply (cases "f = 0", simp add: fps_euclidean_size_def)
+    apply (rule disjE[OF le_less_linear[of "subdegree g" "subdegree f"]])
+    apply (simp_all add: fps_mod_eq_zero fps_euclidean_size_def subdegree_mod)
+    done
+qed
+
+end
+
+instantiation fps :: (field) euclidean_ring_gcd
+begin
+definition fps_gcd_def: "(gcd :: 'a fps \<Rightarrow> _) = gcd_eucl"
+definition fps_lcm_def: "(lcm :: 'a fps \<Rightarrow> _) = lcm_eucl"
+definition fps_Gcd_def: "(Gcd :: 'a fps set \<Rightarrow> _) = Gcd_eucl"
+definition fps_Lcm_def: "(Lcm :: 'a fps set \<Rightarrow> _) = Lcm_eucl"
+instance by intro_classes (simp_all add: fps_gcd_def fps_lcm_def fps_Gcd_def fps_Lcm_def)
+end
+
+lemma fps_gcd:
+  assumes [simp]: "f \<noteq> 0" "g \<noteq> 0"
+  shows   "gcd f g = X ^ min (subdegree f) (subdegree g)"
+proof -
+  let ?m = "min (subdegree f) (subdegree g)"
+  show "gcd f g = X ^ ?m"
+  proof (rule sym, rule gcdI)
+    fix d assume "d dvd f" "d dvd g"
+    thus "d dvd X ^ ?m" by (cases "d = 0") (auto simp: fps_dvd_iff)
+  qed (simp_all add: fps_dvd_iff)
+qed
+
+lemma fps_gcd_altdef: "gcd (f :: 'a :: field fps) g = 
+  (if f = 0 \<and> g = 0 then 0 else
+   if f = 0 then X ^ subdegree g else 
+   if g = 0 then X ^ subdegree f else 
+     X ^ min (subdegree f) (subdegree g))"
+  by (simp add: fps_gcd)
+
+lemma fps_lcm:
+  assumes [simp]: "f \<noteq> 0" "g \<noteq> 0"
+  shows   "lcm f g = X ^ max (subdegree f) (subdegree g)"
+proof -
+  let ?m = "max (subdegree f) (subdegree g)"
+  show "lcm f g = X ^ ?m"
+  proof (rule sym, rule lcmI)
+    fix d assume "f dvd d" "g dvd d"
+    thus "X ^ ?m dvd d" by (cases "d = 0") (auto simp: fps_dvd_iff)
+  qed (simp_all add: fps_dvd_iff)
+qed
+
+lemma fps_lcm_altdef: "lcm (f :: 'a :: field fps) g = 
+  (if f = 0 \<or> g = 0 then 0 else X ^ max (subdegree f) (subdegree g))"
+  by (simp add: fps_lcm)
+
+lemma fps_Gcd:
+  assumes "A - {0} \<noteq> {}"
+  shows   "Gcd A = X ^ (INF f:A-{0}. subdegree f)"
+proof (rule sym, rule GcdI)
+  fix f assume "f \<in> A"
+  thus "X ^ (INF f:A - {0}. subdegree f) dvd f"
+    by (cases "f = 0") (auto simp: fps_dvd_iff intro!: cINF_lower)
+next
+  fix d assume d: "\<And>f. f \<in> A \<Longrightarrow> d dvd f"
+  from assms obtain f where "f \<in> A - {0}" by auto
+  with d[of f] have [simp]: "d \<noteq> 0" by auto
+  from d assms have "subdegree d \<le> (INF f:A-{0}. subdegree f)"
+    by (intro cINF_greatest) (auto simp: fps_dvd_iff[symmetric])
+  with d assms show "d dvd X ^ (INF f:A-{0}. subdegree f)" by (simp add: fps_dvd_iff)
+qed simp_all
+
+lemma fps_Gcd_altdef: "Gcd (A :: 'a :: field fps set) = 
+  (if A \<subseteq> {0} then 0 else X ^ (INF f:A-{0}. subdegree f))"
+  using fps_Gcd by auto
+
+lemma fps_Lcm:
+  assumes "A \<noteq> {}" "0 \<notin> A" "bdd_above (subdegree`A)"
+  shows   "Lcm A = X ^ (SUP f:A. subdegree f)"
+proof (rule sym, rule LcmI)
+  fix f assume "f \<in> A"
+  moreover from assms(3) have "bdd_above (subdegree ` A)" by auto
+  ultimately show "f dvd X ^ (SUP f:A. subdegree f)" using assms(2)
+    by (cases "f = 0") (auto simp: fps_dvd_iff intro!: cSUP_upper)
+next
+  fix d assume d: "\<And>f. f \<in> A \<Longrightarrow> f dvd d"
+  from assms obtain f where f: "f \<in> A" "f \<noteq> 0" by auto
+  show "X ^ (SUP f:A. subdegree f) dvd d"
+  proof (cases "d = 0")
+    assume "d \<noteq> 0"
+    moreover from d have "\<And>f. f \<in> A \<Longrightarrow> f \<noteq> 0 \<Longrightarrow> f dvd d" by blast
+    ultimately have "subdegree d \<ge> (SUP f:A. subdegree f)" using assms
+      by (intro cSUP_least) (auto simp: fps_dvd_iff)
+    with \<open>d \<noteq> 0\<close> show ?thesis by (simp add: fps_dvd_iff)
+  qed simp_all
+qed simp_all
+
+lemma fps_Lcm_altdef:
+  "Lcm (A :: 'a :: field fps set) = 
+     (if 0 \<in> A \<or> \<not>bdd_above (subdegree`A) then 0 else
+      if A = {} then 1 else X ^ (SUP f:A. subdegree f))"
+proof (cases "bdd_above (subdegree`A)")
+  assume unbounded: "\<not>bdd_above (subdegree`A)"
+  have "Lcm A = 0"
+  proof (rule ccontr)
+    assume "Lcm A \<noteq> 0"
+    from unbounded obtain f where f: "f \<in> A" "subdegree (Lcm A) < subdegree f"
+      unfolding bdd_above_def by (auto simp: not_le)
+    moreover from this and `Lcm A \<noteq> 0` have "subdegree f \<le> subdegree (Lcm A)"
+      by (intro dvd_imp_subdegree_le) simp_all
+    ultimately show False by simp
+  qed
+  with unbounded show ?thesis by simp
+qed (simp_all add: fps_Lcm)
+
 
 subsection \<open>Formal Derivatives, and the MacLaurin theorem around 0\<close>
 
@@ -1065,32 +1778,7 @@
 lemma fps_inverse_power:
   fixes a :: "'a::field fps"
   shows "inverse (a^n) = inverse a ^ n"
-proof (cases "a$0 = 0")
-  case True
-  then have eq: "inverse a = 0"
-    by (simp add: fps_inverse_def)
-  consider "n = 0" | "n > 0" by blast
-  then show ?thesis
-  proof cases
-    case 1
-    then show ?thesis by simp
-  next
-    case 2
-    from startsby_zero_power[OF True this] eq show ?thesis
-      by (simp add: fps_inverse_def)
-  qed
-next
-  case False
-  show ?thesis
-    apply (rule fps_inverse_unique)
-    apply (simp add: False)
-    unfolding power_mult_distrib[symmetric]
-    apply (rule ssubst[where t = "a * inverse a" and s= 1])
-    apply simp_all
-    apply (subst mult.commute)
-    apply (rule inverse_mult_eq_1[OF False])
-    done
-qed
+  by (induction n) (simp_all add: fps_inverse_mult)
 
 lemma fps_deriv_power:
   "fps_deriv (a ^ n) = fps_const (of_nat n :: 'a::comm_ring_1) * fps_deriv a * a ^ (n - 1)"
@@ -1124,50 +1812,12 @@
     by (simp add: field_simps)
 qed
 
-lemma fps_inverse_mult:
-  fixes a :: "'a::field fps"
-  shows "inverse (a * b) = inverse a * inverse b"
-proof -
-  consider "a $ 0 = 0" | "b $ 0 = 0" | "a $ 0 \<noteq> 0" "b $ 0 \<noteq> 0"
-    by blast
-  then show ?thesis
-  proof cases
-    case a: 1
-    then have "(a * b) $ 0 = 0"
-      by (simp add: fps_mult_nth)
-    with a have th: "inverse a = 0" "inverse (a * b) = 0"
-      by simp_all
-    show ?thesis
-      unfolding th by simp
-  next
-    case b: 2
-    then have "(a * b) $ 0 = 0"
-      by (simp add: fps_mult_nth)
-    with b have th: "inverse b = 0" "inverse (a * b) = 0"
-      by simp_all
-    show ?thesis
-      unfolding th by simp
-  next
-    case ab: 3
-    then have ab0:"(a * b) $ 0 \<noteq> 0"
-      by (simp add: fps_mult_nth)
-    from inverse_mult_eq_1[OF ab0]
-    have "inverse (a*b) * (a*b) * inverse a * inverse b = 1 * inverse a * inverse b"
-      by simp
-    then have "inverse (a*b) * (inverse a * a) * (inverse b * b) = inverse a * inverse b"
-      by (simp add: field_simps)
-    then show ?thesis
-      using inverse_mult_eq_1[OF ab(1)] inverse_mult_eq_1[OF ab(2)] by simp
-  qed
-qed
-
 lemma fps_inverse_deriv':
   fixes a :: "'a::field fps"
   assumes a0: "a $ 0 \<noteq> 0"
   shows "fps_deriv (inverse a) = - fps_deriv a / a\<^sup>2"
-  using fps_inverse_deriv[OF a0]
-  unfolding power2_eq_square fps_divide_def fps_inverse_mult
-  by simp
+  using fps_inverse_deriv[OF a0] a0
+  by (simp add: fps_divide_unit power2_eq_square fps_inverse_mult)
 
 lemma inverse_mult_eq_1':
   assumes f0: "f$0 \<noteq> (0::'a::field)"
@@ -1178,8 +1828,8 @@
   fixes a :: "'a::field fps"
   assumes a0: "b$0 \<noteq> 0"
   shows "fps_deriv (a / b) = (fps_deriv a * b - a * fps_deriv b) / b\<^sup>2"
-  using fps_inverse_deriv[OF a0]
-  by (simp add: fps_divide_def field_simps
+  using fps_inverse_deriv[OF a0] a0
+  by (simp add: fps_divide_unit field_simps
     power2_eq_square fps_inverse_mult inverse_mult_eq_1'[OF a0])
 
 
@@ -1231,6 +1881,9 @@
 lemma fps_compose_nth: "(a oo b)$n = setsum (\<lambda>i. a$i * (b^i$n)) {0..n}"
   by (simp add: fps_compose_def)
 
+lemma fps_compose_nth_0 [simp]: "(f oo g) $ 0 = f $ 0"
+  by (simp add: fps_compose_nth)
+
 lemma fps_compose_X[simp]: "a oo X = (a :: 'a::comm_ring_1 fps)"
   by (simp add: fps_ext fps_compose_def mult_delta_right setsum.delta')
 
@@ -1323,7 +1976,7 @@
 
 subsubsection \<open>Rule 3\<close>
 
-text \<open>Rule 3 is trivial and is given by @{text fps_times_def}.\<close>
+text \<open>Rule 3 is trivial and is given by \<open>fps_times_def\<close>.\<close>
 
 
 subsubsection \<open>Rule 5 --- summation and "division" by (1 - X)\<close>
@@ -2097,8 +2750,8 @@
     by (simp add: fps_deriv_power ac_simps del: power_Suc)
   then have "?iw * fps_deriv ?r * ?w = ?iw * fps_deriv a"
     by simp
-  then have "fps_deriv ?r * (?iw * ?w) = fps_deriv a / ?w"
-    by (simp add: fps_divide_def)
+  with a0 r0 have "fps_deriv ?r * (?iw * ?w) = fps_deriv a / ?w"
+    by (subst fps_divide_unit) (auto simp del: of_nat_Suc)
   then show ?thesis unfolding th0 by simp
 qed
 
@@ -2172,8 +2825,8 @@
 qed
 *)
 
-lemma fps_divide_1[simp]: "(a :: 'a::field fps) / 1 = a"
-  by (simp add: fps_divide_def)
+lemma fps_divide_1 [simp]: "(a :: 'a::field fps) / 1 = a"
+  by (fact divide_1)
 
 lemma radical_divide:
   fixes a :: "'a::field_char_0 fps"
@@ -2197,7 +2850,7 @@
     from that have "?r (a/b) $ 0 = (?r a / ?r b)$0"
       by simp
     then show ?thesis
-      using k a0 b0 rb0' by (simp add: fps_divide_def fps_mult_nth fps_inverse_def divide_inverse)
+      using k a0 b0 rb0' by (simp add: fps_divide_unit fps_mult_nth fps_inverse_def divide_inverse)
   qed
   show ?rhs if ?lhs
   proof -
@@ -2207,13 +2860,14 @@
       by (simp add: \<open>?lhs\<close> power_divide ra0 rb0)
     from a0 b0 ra0' rb0' kp \<open>?lhs\<close>
     have th1: "r k ((a / b) $ 0) = (fps_radical r k a / fps_radical r k b) $ 0"
-      by (simp add: fps_divide_def fps_mult_nth fps_inverse_def divide_inverse)
+      by (simp add: fps_divide_unit fps_mult_nth fps_inverse_def divide_inverse)
     from a0 b0 ra0' rb0' kp have ab0': "(a / b) $ 0 \<noteq> 0"
-      by (simp add: fps_divide_def fps_mult_nth fps_inverse_def nonzero_imp_inverse_nonzero)
+      by (simp add: fps_divide_unit fps_mult_nth fps_inverse_def nonzero_imp_inverse_nonzero)
     note tha[simp] = iffD1[OF power_radical[where r=r and k=h], OF a0 ra0[unfolded k], unfolded k[symmetric]]
     note thb[simp] = iffD1[OF power_radical[where r=r and k=h], OF b0 rb0[unfolded k], unfolded k[symmetric]]
-    have th2: "(?r a / ?r b)^k = a/b"
-      by (simp add: fps_divide_def power_mult_distrib fps_inverse_power[symmetric])
+    from b0 rb0' have th2: "(?r a / ?r b)^k = a/b"
+      by (simp add: fps_divide_unit power_mult_distrib fps_inverse_power[symmetric])
+      
     from iffD1[OF radical_unique[where r=r and a="?r a / ?r b" and b="a/b" and k=h], symmetric, unfolded k[symmetric], OF th0 th1 ab0' th2]
     show ?thesis .
   qed
@@ -2597,8 +3251,7 @@
   assumes c0: "(c$0 :: 'a::field) = 0"
     and b0: "b$0 \<noteq> 0"
   shows "(a/b) oo c = (a oo c) / (b oo c)"
-    unfolding fps_divide_def fps_compose_mult_distrib[OF c0]
-    fps_inverse_compose[OF c0 b0] ..
+    using b0 c0 by (simp add: fps_divide_unit fps_inverse_compose fps_compose_mult_distrib)
 
 lemma gp:
   assumes a0: "a$0 = (0::'a::field)"
@@ -2820,8 +3473,8 @@
     unfolding fps_compose_deriv[OF a0] .
   then have "(?d ?ia oo a) * ?d a * inverse (?d a) = ?d b * inverse (?d a)"
     by simp
-  then have "(?d ?ia oo a) * (inverse (?d a) * ?d a) = ?d b / ?d a"
-    by (simp add: fps_divide_def)
+  with a1 have "(?d ?ia oo a) * (inverse (?d a) * ?d a) = ?d b / ?d a"
+    by (simp add: fps_divide_unit)
   then have "(?d ?ia oo a) oo ?iXa =  (?d b / ?d a) oo ?iXa"
     unfolding inverse_mult_eq_1[OF da0] by simp
   then have "?d ?ia oo (a oo ?iXa) =  (?d b / ?d a) oo ?iXa"
@@ -2893,10 +3546,8 @@
 
 lemma E_neg: "E (- a) = inverse (E (a::'a::field_char_0))"
 proof -
-  from E_add_mult[of a "- a"] have th0: "E a * E (- a) = 1"
-    by (simp )
-  have th1: "E a $ 0 \<noteq> 0" by simp
-  from fps_inverse_unique[OF th1 th0] show ?thesis by simp
+  from E_add_mult[of a "- a"] have th0: "E a * E (- a) = 1" by simp
+  from fps_inverse_unique[OF th0] show ?thesis by simp
 qed
 
 lemma E_nth_deriv[simp]: "fps_nth_deriv n (E (a::'a::field_char_0)) = (fps_const a)^n * (E a)"
@@ -2919,12 +3570,6 @@
   from fps_inv_right[OF b0 b1] show "(E a - 1) oo fps_inv (E a - 1) = X" .
 qed
 
-lemma fps_const_inverse: "a \<noteq> 0 \<Longrightarrow> inverse (fps_const (a::'a::field)) = fps_const (inverse a)"
-  apply (auto simp add: fps_eq_iff fps_inverse_def)
-  apply (case_tac n)
-  apply auto
-  done
-
 lemma E_power_mult: "(E (c::'a::field_char_0))^n = E (of_nat n * c)"
   by (induct n) (auto simp add: field_simps E_add_mult)
 
@@ -3105,7 +3750,7 @@
     unfolding fps_binomial_deriv
     by (simp add: fps_divide_def field_simps)
   also have "\<dots> = (fps_const (c + d)/ (1 + X)) * ?P"
-    by (simp add: field_simps fps_divide_def fps_const_add[symmetric] del: fps_const_add)
+    by (simp add: field_simps fps_divide_unit fps_const_add[symmetric] del: fps_const_add)
   finally have th0: "fps_deriv ?P = fps_const (c+d) * ?P / (1 + X)"
     by (simp add: fps_divide_def)
   have "?P = fps_const (?P$0) * ?b (c + d)"
@@ -3525,13 +4170,14 @@
 lemma fps_tan_deriv: "fps_deriv (fps_tan c) = fps_const c / (fps_cos c)\<^sup>2"
 proof -
   have th0: "fps_cos c $ 0 \<noteq> 0" by (simp add: fps_cos_def)
-  show ?thesis
-    using fps_sin_cos_sum_of_squares[of c]
-    apply (simp add: fps_tan_def fps_divide_deriv[OF th0] fps_sin_deriv fps_cos_deriv
-      fps_const_neg[symmetric] field_simps power2_eq_square del: fps_const_neg)
-    unfolding distrib_left[symmetric]
-    apply simp
-    done
+  from this have "fps_cos c \<noteq> 0" by (intro notI) simp
+  hence "fps_deriv (fps_tan c) = 
+           fps_const c * (fps_cos c^2 + fps_sin c^2) / (fps_cos c^2)"
+    by (simp add: fps_tan_def fps_divide_deriv power2_eq_square algebra_simps 
+                  fps_sin_deriv fps_cos_deriv fps_const_neg[symmetric] div_mult_swap
+             del: fps_const_neg)
+  also note fps_sin_cos_sum_of_squares
+  finally show ?thesis by simp
 qed
 
 text \<open>Connection to E c over the complex numbers --- Euler and de Moivre.\<close>
@@ -3560,7 +4206,7 @@
   unfolding minus_mult_right Eii_sin_cos by (simp add: fps_sin_even fps_cos_odd)
 
 lemma fps_const_minus: "fps_const (c::'a::group_add) - fps_const d = fps_const (c - d)"
-  by (simp add: fps_eq_iff fps_const_def)
+  by (fact fps_const_sub)
 
 lemma fps_numeral_fps_const: "numeral i = fps_const (numeral i :: 'a::comm_ring_1)"
   by (fact numeral_fps_const) (* FIXME: duplicate *)
@@ -3571,7 +4217,7 @@
     by (simp add: numeral_fps_const)
   show ?thesis
     unfolding Eii_sin_cos minus_mult_commute
-    by (simp add: fps_sin_even fps_cos_odd numeral_fps_const fps_divide_def fps_const_inverse th)
+    by (simp add: fps_sin_even fps_cos_odd numeral_fps_const fps_divide_unit fps_const_inverse th)
 qed
 
 lemma fps_sin_Eii: "fps_sin c = (E (ii * c) - E (- ii * c)) / fps_const (2*ii)"
@@ -3580,13 +4226,13 @@
     by (simp add: fps_eq_iff numeral_fps_const)
   show ?thesis
     unfolding Eii_sin_cos minus_mult_commute
-    by (simp add: fps_sin_even fps_cos_odd fps_divide_def fps_const_inverse th)
+    by (simp add: fps_sin_even fps_cos_odd fps_divide_unit fps_const_inverse th)
 qed
 
 lemma fps_tan_Eii:
   "fps_tan c = (E (ii * c) - E (- ii * c)) / (fps_const ii * (E (ii * c) + E (- ii * c)))"
   unfolding fps_tan_def fps_sin_Eii fps_cos_Eii mult_minus_left E_neg
-  apply (simp add: fps_divide_def fps_inverse_mult fps_const_mult[symmetric] fps_const_inverse del: fps_const_mult)
+  apply (simp add: fps_divide_unit fps_inverse_mult fps_const_mult[symmetric] fps_const_inverse del: fps_const_mult)
   apply simp
   done
 
@@ -3719,11 +4365,11 @@
   shows "f $ j = g $ j"
 proof (rule ccontr)
   assume "f $ j \<noteq> g $ j"
-  then have "\<exists>n. f $ n \<noteq> g $ n" by auto
-  with assms have "i < (LEAST n. f $ n \<noteq> g $ n)"
+  hence "f \<noteq> g" by auto
+  with assms have "i < subdegree (f - g)"
     by (simp add: split_if_asm dist_fps_def)
   also have "\<dots> \<le> j"
-    using \<open>f $ j \<noteq> g $ j\<close> by (auto intro: Least_le)
+    using \<open>f $ j \<noteq> g $ j\<close> by (intro subdegree_leI) simp_all
   finally show False using \<open>j \<le> i\<close> by simp
 qed
 
@@ -3735,12 +4381,11 @@
   then show ?thesis by simp
 next
   case False
-  then have "\<exists>n. f $ n \<noteq> g $ n" by (simp add: fps_eq_iff)
-  with assms have "dist f g = inverse (2 ^ (LEAST n. f $ n \<noteq> g $ n))"
+  with assms have "dist f g = inverse (2 ^ subdegree (f - g))"
     by (simp add: split_if_asm dist_fps_def)
   moreover
-  from assms \<open>\<exists>n. f $ n \<noteq> g $ n\<close> have "i < (LEAST n. f $ n \<noteq> g $ n)"
-    by (metis (mono_tags) LeastI not_less)
+  from assms and False have "i < subdegree (f - g)"
+    by (intro subdegree_greaterI) simp_all
   ultimately show ?thesis by simp
 qed
 
@@ -3767,15 +4412,10 @@
     show "X ----> Abs_fps (\<lambda>i. X (M i) $ i)"
       unfolding tendsto_iff
     proof safe
-      fix e::real assume "0 < e"
-      with LIMSEQ_inverse_realpow_zero[of 2, simplified, simplified filterlim_iff,
-        THEN spec, of "\<lambda>x. x < e"]
-      have "eventually (\<lambda>i. inverse (2 ^ i) < e) sequentially"
-        unfolding eventually_nhds
-        apply clarsimp
-        apply (rule FalseE)
-        apply auto -- \<open>slow\<close>
-        done
+      fix e::real assume e: "0 < e"
+      have "(\<lambda>n. inverse (2 ^ n) :: real) ----> 0" by (rule LIMSEQ_inverse_realpow_zero) simp_all
+      from this and e have "eventually (\<lambda>i. inverse (2 ^ i) < e) sequentially"
+        by (rule order_tendstoD)
       then obtain i where "inverse (2 ^ i) < e"
         by (auto simp: eventually_sequentially)
       have "eventually (\<lambda>x. M i \<le> x) sequentially"
--- a/src/HOL/Library/FuncSet.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/FuncSet.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -226,7 +226,7 @@
 
 subsection \<open>Bijections Between Sets\<close>
 
-text \<open>The definition of @{const bij_betw} is in @{text "Fun.thy"}, but most of
+text \<open>The definition of @{const bij_betw} is in \<open>Fun.thy\<close>, but most of
 the theorems belong here, or need at least @{term Hilbert_Choice}.\<close>
 
 lemma bij_betwI:
--- a/src/HOL/Library/Function_Growth.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Function_Growth.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -12,39 +12,39 @@
 text \<open>
   When comparing growth of functions in computer science, it is common to adhere
   on Landau Symbols (``O-Notation'').  However these come at the cost of notational
-  oddities, particularly writing @{text "f = O(g)"} for @{text "f \<in> O(g)"} etc.
+  oddities, particularly writing \<open>f = O(g)\<close> for \<open>f \<in> O(g)\<close> etc.
   
   Here we suggest a different way, following Hardy (G.~H.~Hardy and J.~E.~Littlewood,
   Some problems of Diophantine approximation, Acta Mathematica 37 (1914), p.~225).
-  We establish a quasi order relation @{text "\<lesssim>"} on functions such that
-  @{text "f \<lesssim> g \<longleftrightarrow> f \<in> O(g)"}.  From a didactic point of view, this does not only
+  We establish a quasi order relation \<open>\<lesssim>\<close> on functions such that
+  \<open>f \<lesssim> g \<longleftrightarrow> f \<in> O(g)\<close>.  From a didactic point of view, this does not only
   avoid the notational oddities mentioned above but also emphasizes the key insight
   of a growth hierarchy of functions:
-  @{text "(\<lambda>n. 0) \<lesssim> (\<lambda>n. k) \<lesssim> Discrete.log \<lesssim> Discrete.sqrt \<lesssim> id \<lesssim> \<dots>"}.
+  \<open>(\<lambda>n. 0) \<lesssim> (\<lambda>n. k) \<lesssim> Discrete.log \<lesssim> Discrete.sqrt \<lesssim> id \<lesssim> \<dots>\<close>.
 \<close>
 
 subsection \<open>Model\<close>
 
 text \<open>
-  Our growth functions are of type @{text "\<nat> \<Rightarrow> \<nat>"}.  This is different
-  to the usual conventions for Landau symbols for which @{text "\<real> \<Rightarrow> \<real>"}
-  would be appropriate, but we argue that @{text "\<real> \<Rightarrow> \<real>"} is more
+  Our growth functions are of type \<open>\<nat> \<Rightarrow> \<nat>\<close>.  This is different
+  to the usual conventions for Landau symbols for which \<open>\<real> \<Rightarrow> \<real>\<close>
+  would be appropriate, but we argue that \<open>\<real> \<Rightarrow> \<real>\<close> is more
   appropriate for analysis, whereas our setting is discrete.
 
-  Note that we also restrict the additional coefficients to @{text \<nat>}, something
+  Note that we also restrict the additional coefficients to \<open>\<nat>\<close>, something
   we discuss at the particular definitions.
 \<close>
 
-subsection \<open>The @{text "\<lesssim>"} relation\<close>
+subsection \<open>The \<open>\<lesssim>\<close> relation\<close>
 
 definition less_eq_fun :: "(nat \<Rightarrow> nat) \<Rightarrow> (nat \<Rightarrow> nat) \<Rightarrow> bool" (infix "\<lesssim>" 50)
 where
   "f \<lesssim> g \<longleftrightarrow> (\<exists>c>0. \<exists>n. \<forall>m>n. f m \<le> c * g m)"
 
 text \<open>
-  This yields @{text "f \<lesssim> g \<longleftrightarrow> f \<in> O(g)"}.  Note that @{text c} is restricted to
-  @{text \<nat>}.  This does not pose any problems since if @{text "f \<in> O(g)"} holds for
-  a @{text "c \<in> \<real>"}, it also holds for @{text "\<lceil>c\<rceil> \<in> \<nat>"} by transitivity.
+  This yields \<open>f \<lesssim> g \<longleftrightarrow> f \<in> O(g)\<close>.  Note that \<open>c\<close> is restricted to
+  \<open>\<nat>\<close>.  This does not pose any problems since if \<open>f \<in> O(g)\<close> holds for
+  a \<open>c \<in> \<real>\<close>, it also holds for \<open>\<lceil>c\<rceil> \<in> \<nat>\<close> by transitivity.
 \<close>
 
 lemma less_eq_funI [intro?]:
@@ -68,7 +68,7 @@
   using assms unfolding less_eq_fun_def linorder_not_le [symmetric] by blast
 
 
-subsection \<open>The @{text "\<approx>"} relation, the equivalence relation induced by @{text "\<lesssim>"}\<close>
+subsection \<open>The \<open>\<approx>\<close> relation, the equivalence relation induced by \<open>\<lesssim>\<close>\<close>
 
 definition equiv_fun :: "(nat \<Rightarrow> nat) \<Rightarrow> (nat \<Rightarrow> nat) \<Rightarrow> bool" (infix "\<cong>" 50)
 where
@@ -76,8 +76,8 @@
     (\<exists>c\<^sub>1>0. \<exists>c\<^sub>2>0. \<exists>n. \<forall>m>n. f m \<le> c\<^sub>1 * g m \<and> g m \<le> c\<^sub>2 * f m)"
 
 text \<open>
-  This yields @{text "f \<cong> g \<longleftrightarrow> f \<in> \<Theta>(g)"}.  Concerning @{text "c\<^sub>1"} and @{text "c\<^sub>2"}
-  restricted to @{typ nat}, see note above on @{text "(\<lesssim>)"}.
+  This yields \<open>f \<cong> g \<longleftrightarrow> f \<in> \<Theta>(g)\<close>.  Concerning \<open>c\<^sub>1\<close> and \<open>c\<^sub>2\<close>
+  restricted to @{typ nat}, see note above on \<open>(\<lesssim>)\<close>.
 \<close>
 
 lemma equiv_funI [intro?]:
@@ -105,7 +105,7 @@
   using assms unfolding equiv_fun_def linorder_not_le [symmetric] by blast
 
 
-subsection \<open>The @{text "\<prec>"} relation, the strict part of @{text "\<lesssim>"}\<close>
+subsection \<open>The \<open>\<prec>\<close> relation, the strict part of \<open>\<lesssim>\<close>\<close>
 
 definition less_fun :: "(nat \<Rightarrow> nat) \<Rightarrow> (nat \<Rightarrow> nat) \<Rightarrow> bool" (infix "\<prec>" 50)
 where
@@ -147,18 +147,18 @@
   using assms unfolding less_fun_def linorder_not_less [symmetric] by blast
 
 text \<open>
-  I did not find a proof for @{text "f \<prec> g \<longleftrightarrow> f \<in> o(g)"}.  Maybe this only
-  holds if @{text f} and/or @{text g} are of a certain class of functions.
-  However @{text "f \<in> o(g) \<longrightarrow> f \<prec> g"} is provable, and this yields a
+  I did not find a proof for \<open>f \<prec> g \<longleftrightarrow> f \<in> o(g)\<close>.  Maybe this only
+  holds if \<open>f\<close> and/or \<open>g\<close> are of a certain class of functions.
+  However \<open>f \<in> o(g) \<longrightarrow> f \<prec> g\<close> is provable, and this yields a
   handy introduction rule.
 
-  Note that D. Knuth ignores @{text o} altogether.  So what \dots
+  Note that D. Knuth ignores \<open>o\<close> altogether.  So what \dots
 
-  Something still has to be said about the coefficient @{text c} in
-  the definition of @{text "(\<prec>)"}.  In the typical definition of @{text o},
-  it occurs on the \emph{right} hand side of the @{text "(>)"}.  The reason
-  is that the situation is dual to the definition of @{text O}: the definition
-  works since @{text c} may become arbitrary small.  Since this is not possible
+  Something still has to be said about the coefficient \<open>c\<close> in
+  the definition of \<open>(\<prec>)\<close>.  In the typical definition of \<open>o\<close>,
+  it occurs on the \emph{right} hand side of the \<open>(>)\<close>.  The reason
+  is that the situation is dual to the definition of \<open>O\<close>: the definition
+  works since \<open>c\<close> may become arbitrary small.  Since this is not possible
   within @{term \<nat>}, we push the coefficient to the left hand side instead such
   that it become arbitrary big instead.
 \<close>
@@ -187,12 +187,12 @@
 qed
 
 
-subsection \<open>@{text "\<lesssim>"} is a preorder\<close>
+subsection \<open>\<open>\<lesssim>\<close> is a preorder\<close>
 
-text \<open>This yields all lemmas relating @{text "\<lesssim>"}, @{text "\<prec>"} and @{text "\<cong>"}.\<close>
+text \<open>This yields all lemmas relating \<open>\<lesssim>\<close>, \<open>\<prec>\<close> and \<open>\<cong>\<close>.\<close>
 
 interpretation fun_order: preorder_equiv less_eq_fun less_fun
-  where "preorder_equiv.equiv less_eq_fun = equiv_fun"
+  rewrites "preorder_equiv.equiv less_eq_fun = equiv_fun"
 proof -
   interpret preorder: preorder_equiv less_eq_fun less_fun
   proof
--- a/src/HOL/Library/Fundamental_Theorem_Algebra.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Fundamental_Theorem_Algebra.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -149,7 +149,7 @@
     unfolding linorder_not_le[symmetric] by blast
 qed
 
-text \<open>Hence we can always reduce modulus of @{text "1 + b z^n"} if nonzero\<close>
+text \<open>Hence we can always reduce modulus of \<open>1 + b z^n\<close> if nonzero\<close>
 lemma reduce_poly_simple:
   assumes b: "b \<noteq> 0"
     and n: "n \<noteq> 0"
--- a/src/HOL/Library/Groups_Big_Fun.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Groups_Big_Fun.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -20,7 +20,7 @@
 where
   expand_set: "G g = comm_monoid_set.F f 1 g {a. g a \<noteq> 1}"
 
-interpretation F!: comm_monoid_set f 1
+interpretation F: comm_monoid_set f 1
   ..
 
 lemma expand_superset:
@@ -225,13 +225,13 @@
 where
   "Sum_any = comm_monoid_fun.G plus 0"
 
-permanent_interpretation Sum_any!: comm_monoid_fun plus 0
+permanent_interpretation Sum_any: comm_monoid_fun plus 0
 where
   "comm_monoid_fun.G plus 0 = Sum_any" and
   "comm_monoid_set.F plus 0 = setsum"
 proof -
   show "comm_monoid_fun plus 0" ..
-  then interpret Sum_any!: comm_monoid_fun plus 0 .
+  then interpret Sum_any: comm_monoid_fun plus 0 .
   from Sum_any_def show "comm_monoid_fun.G plus 0 = Sum_any" by rule
   from setsum_def show "comm_monoid_set.F plus 0 = setsum" by rule
 qed
@@ -298,13 +298,13 @@
 where
   "Prod_any = comm_monoid_fun.G times 1"
 
-permanent_interpretation Prod_any!: comm_monoid_fun times 1
+permanent_interpretation Prod_any: comm_monoid_fun times 1
 where
   "comm_monoid_fun.G times 1 = Prod_any" and
   "comm_monoid_set.F times 1 = setprod"
 proof -
   show "comm_monoid_fun times 1" ..
-  then interpret Prod_any!: comm_monoid_fun times 1 .
+  then interpret Prod_any: comm_monoid_fun times 1 .
   from Prod_any_def show "comm_monoid_fun.G times 1 = Prod_any" by rule
   from setprod_def show "comm_monoid_set.F times 1 = setprod" by rule
 qed
--- a/src/HOL/Library/Infinite_Set.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Infinite_Set.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -13,7 +13,7 @@
 text \<open>
   Some elementary facts about infinite sets, mostly by Stephan Merz.
   Beware! Because "infinite" merely abbreviates a negation, these
-  lemmas may not work well with @{text "blast"}.
+  lemmas may not work well with \<open>blast\<close>.
 \<close>
 
 abbreviation infinite :: "'a set \<Rightarrow> bool"
@@ -96,7 +96,7 @@
 
 text \<open>
   For a set of natural numbers to be infinite, it is enough to know
-  that for any number larger than some @{text k}, there is some larger
+  that for any number larger than some \<open>k\<close>, there is some larger
   number that is an element of the set.
 \<close>
 
--- a/src/HOL/Library/Liminf_Limsup.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Liminf_Limsup.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -30,7 +30,7 @@
   shows "(INF i : A. INF j : B. f i j) = (INF p : A \<times> B. f (fst p) (snd p))"
   by (rule antisym) (auto intro!: INF_greatest INF_lower2)
 
-subsubsection \<open>@{text Liminf} and @{text Limsup}\<close>
+subsubsection \<open>\<open>Liminf\<close> and \<open>Limsup\<close>\<close>
 
 definition Liminf :: "'a filter \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'b :: complete_lattice" where
   "Liminf F f = (SUP P:{P. eventually P F}. INF x:{x. P x}. f x)"
--- a/src/HOL/Library/ListVector.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/ListVector.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -18,7 +18,7 @@
 lemma scale1[simp]: "(1::'a::monoid_mult) *\<^sub>s xs = xs"
 by (induct xs) simp_all
 
-subsection \<open>@{text"+"} and @{text"-"}\<close>
+subsection \<open>\<open>+\<close> and \<open>-\<close>\<close>
 
 fun zipwith0 :: "('a::zero \<Rightarrow> 'b::zero \<Rightarrow> 'c) \<Rightarrow> 'a list \<Rightarrow> 'b list \<Rightarrow> 'c list"
 where
--- a/src/HOL/Library/Lub_Glb.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Lub_Glb.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -17,7 +17,7 @@
   where "x <=* S = (ALL y: S. x \<le> y)"
 
 
-subsection \<open>Rules for the Relations @{text "*<="} and @{text "<=*"}\<close>
+subsection \<open>Rules for the Relations \<open>*<=\<close> and \<open><=*\<close>\<close>
 
 lemma setleI: "ALL y: S. y \<le> x \<Longrightarrow> S *<= x"
   by (simp add: setle_def)
--- a/src/HOL/Library/Mapping.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Mapping.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -10,7 +10,7 @@
 
 subsection \<open>Parametricity transfer rules\<close>
 
-lemma map_of_foldr: -- \<open>FIXME move\<close>
+lemma map_of_foldr: \<comment> \<open>FIXME move\<close>
   "map_of xs = foldr (\<lambda>(k, v) m. m(k \<mapsto> v)) xs Map.empty"
   using map_add_map_of_foldr [of Map.empty] by auto
 
@@ -107,7 +107,7 @@
   is "\<lambda>m k. m k" parametric lookup_parametric .
 
 declare [[code drop: Mapping.lookup]]
-setup \<open>Code.add_default_eqn @{thm Mapping.lookup.abs_eq}\<close> -- \<open>FIXME lifting\<close>
+setup \<open>Code.add_default_eqn @{thm Mapping.lookup.abs_eq}\<close> \<comment> \<open>FIXME lifting\<close>
 
 lift_definition update :: "'a \<Rightarrow> 'b \<Rightarrow> ('a, 'b) mapping \<Rightarrow> ('a, 'b) mapping"
   is "\<lambda>k v m. m(k \<mapsto> v)" parametric update_parametric .
--- a/src/HOL/Library/Multiset.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Multiset.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -809,12 +809,11 @@
 text \<open>
   A note on code generation: When defining some function containing a
   subterm @{term "fold_mset F"}, code generation is not automatic. When
-  interpreting locale @{text left_commutative} with @{text F}, the
+  interpreting locale \<open>left_commutative\<close> with \<open>F\<close>, the
   would be code thms for @{const fold_mset} become thms like
-  @{term "fold_mset F z {#} = z"} where @{text F} is not a pattern but
+  @{term "fold_mset F z {#} = z"} where \<open>F\<close> is not a pattern but
   contains defined symbols, i.e.\ is not a code thm. Hence a separate
-  constant with its own code thms needs to be introduced for @{text
-  F}. See the image operator below.
+  constant with its own code thms needs to be introduced for \<open>F\<close>. See the image operator below.
 \<close>
 
 
@@ -1059,8 +1058,8 @@
 where
   "mset_set = folding.F (\<lambda>x M. {#x#} + M) {#}"
 
-interpretation mset_set!: folding "\<lambda>x M. {#x#} + M" "{#}"
-where
+interpretation mset_set: folding "\<lambda>x M. {#x#} + M" "{#}"
+rewrites
   "folding.F (\<lambda>x M. {#x#} + M) {#} = mset_set"
 proof -
   interpret comp_fun_commute "\<lambda>x M. {#x#} + M"
@@ -1083,7 +1082,7 @@
   qed
   then show "PROP ?P" "PROP ?Q" "PROP ?R"
   by (auto elim!: Set.set_insert)
-qed -- \<open>TODO: maybe define @{const mset_set} also in terms of @{const Abs_multiset}\<close>
+qed \<comment> \<open>TODO: maybe define @{const mset_set} also in terms of @{const Abs_multiset}\<close>
 
 lemma elem_mset_set[simp, intro]: "finite A \<Longrightarrow> x \<in># mset_set A \<longleftrightarrow> x \<in> A"
   by (induct A rule: finite_induct) simp_all
@@ -1222,8 +1221,8 @@
 definition msetsum :: "'a multiset \<Rightarrow> 'a"
   where "msetsum = comm_monoid_mset.F plus 0"
 
-sublocale msetsum!: comm_monoid_mset plus 0
-  where "comm_monoid_mset.F plus 0 = msetsum"
+sublocale msetsum: comm_monoid_mset plus 0
+  rewrites "comm_monoid_mset.F plus 0 = msetsum"
 proof -
   show "comm_monoid_mset plus 0" ..
   from msetsum_def show "comm_monoid_mset.F plus 0 = msetsum" ..
@@ -1280,8 +1279,8 @@
 definition msetprod :: "'a multiset \<Rightarrow> 'a"
   where "msetprod = comm_monoid_mset.F times 1"
 
-sublocale msetprod!: comm_monoid_mset times 1
-  where "comm_monoid_mset.F times 1 = msetprod"
+sublocale msetprod: comm_monoid_mset times 1
+  rewrites "comm_monoid_mset.F times 1 = msetprod"
 proof -
   show "comm_monoid_mset times 1" ..
   show "comm_monoid_mset.F times 1 = msetprod" using msetprod_def ..
@@ -1349,7 +1348,7 @@
 
 text \<open>
   This lemma shows which properties suffice to show that a function
-  @{text "f"} with @{text "f xs = ys"} behaves like sort.
+  \<open>f\<close> with \<open>f xs = ys\<close> behaves like sort.
 \<close>
 
 lemma properties_for_sort_key:
@@ -2106,7 +2105,7 @@
 
 declare sorted_list_of_multiset_mset [code]
 
-lemma [code]: -- \<open>not very efficient, but representation-ignorant!\<close>
+lemma [code]: \<comment> \<open>not very efficient, but representation-ignorant!\<close>
   "mset_set A = mset (sorted_list_of_set A)"
   apply (cases "finite A")
   apply simp_all
--- a/src/HOL/Library/Numeral_Type.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Numeral_Type.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -182,7 +182,7 @@
 subsection \<open>Ring class instances\<close>
 
 text \<open>
-  Unfortunately @{text ring_1} instance is not possible for
+  Unfortunately \<open>ring_1\<close> instance is not possible for
   @{typ num1}, since 0 and 1 are not distinct.
 \<close>
 
--- a/src/HOL/Library/Old_Datatype.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Old_Datatype.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -21,7 +21,7 @@
   morphisms Rep_Node Abs_Node
   unfolding Node_def by auto
 
-text\<open>Datatypes will be represented by sets of type @{text node}\<close>
+text\<open>Datatypes will be represented by sets of type \<open>node\<close>\<close>
 
 type_synonym 'a item        = "('a, unit) node set"
 type_synonym ('a, 'b) dtree = "('a, 'b) node set"
--- a/src/HOL/Library/Old_SMT.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Old_SMT.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -51,7 +51,7 @@
 definition weight :: "int \<Rightarrow> bool \<Rightarrow> bool" where "weight _ P = P"
 
 text \<open>
-Weights must be non-negative.  The value @{text 0} is equivalent to providing
+Weights must be non-negative.  The value \<open>0\<close> is equivalent to providing
 no weight at all.
 
 Weights should only be used at quantifiers and only inside triggers (if the
@@ -150,7 +150,7 @@
 
 text \<open>
 The current configuration can be printed by the command
-@{text old_smt_status}, which shows the values of most options.
+\<open>old_smt_status\<close>, which shows the values of most options.
 \<close>
 
 
@@ -158,14 +158,14 @@
 subsection \<open>General configuration options\<close>
 
 text \<open>
-The option @{text old_smt_solver} can be used to change the target SMT
-solver.  The possible values can be obtained from the @{text old_smt_status}
+The option \<open>old_smt_solver\<close> can be used to change the target SMT
+solver.  The possible values can be obtained from the \<open>old_smt_status\<close>
 command.
 
 Due to licensing restrictions, Yices and Z3 are not installed/enabled
 by default.  Z3 is free for non-commercial applications and can be enabled
-by setting the @{text OLD_Z3_NON_COMMERCIAL} environment variable to
-@{text yes}.
+by setting the \<open>OLD_Z3_NON_COMMERCIAL\<close> environment variable to
+\<open>yes\<close>.
 \<close>
 
 declare [[ old_smt_solver = z3 ]]
@@ -242,7 +242,7 @@
 subsection \<open>Certificates\<close>
 
 text \<open>
-By setting the option @{text old_smt_certificates} to the name of a file,
+By setting the option \<open>old_smt_certificates\<close> to the name of a file,
 all following applications of an SMT solver a cached in that file.
 Any further application of the same SMT solver (using the very same
 configuration) re-uses the cached certificate instead of invoking the
@@ -250,7 +250,7 @@
 
 The filename should be given as an explicit path.  It is good
 practice to use the name of the current theory (with ending
-@{text ".certs"} instead of @{text ".thy"}) as the certificates file.
+\<open>.certs\<close> instead of \<open>.thy\<close>) as the certificates file.
 Certificate files should be used at most once in a certain theory context,
 to avoid race conditions with other concurrent accesses.
 \<close>
@@ -258,11 +258,11 @@
 declare [[ old_smt_certificates = "" ]]
 
 text \<open>
-The option @{text old_smt_read_only_certificates} controls whether only
+The option \<open>old_smt_read_only_certificates\<close> controls whether only
 stored certificates are should be used or invocation of an SMT solver
-is allowed.  When set to @{text true}, no SMT solver will ever be
+is allowed.  When set to \<open>true\<close>, no SMT solver will ever be
 invoked and only the existing certificates found in the configured
-cache are used;  when set to @{text false} and there is no cached
+cache are used;  when set to \<open>false\<close> and there is no cached
 certificate for some proposition, then the configured SMT solver is
 invoked.
 \<close>
@@ -275,7 +275,7 @@
 
 text \<open>
 The SMT method, when applied, traces important information.  To
-make it entirely silent, set the following option to @{text false}.
+make it entirely silent, set the following option to \<open>false\<close>.
 \<close>
 
 declare [[ old_smt_verbose = true ]]
@@ -283,7 +283,7 @@
 text \<open>
 For tracing the generated problem file given to the SMT solver as
 well as the returned result of the solver, the option
-@{text old_smt_trace} should be set to @{text true}.
+\<open>old_smt_trace\<close> should be set to \<open>true\<close>.
 \<close>
 
 declare [[ old_smt_trace = false ]]
@@ -292,7 +292,7 @@
 From the set of assumptions given to the SMT solver, those assumptions
 used in the proof are traced when the following option is set to
 @{term true}.  This only works for Z3 when it runs in non-oracle mode
-(see options @{text old_smt_solver} and @{text old_smt_oracle} above).
+(see options \<open>old_smt_solver\<close> and \<open>old_smt_oracle\<close> above).
 \<close>
 
 declare [[ old_smt_trace_used_facts = false ]]
@@ -304,9 +304,9 @@
 text \<open>
 Several prof rules of Z3 are not very well documented.  There are two
 lemma groups which can turn failing Z3 proof reconstruction attempts
-into succeeding ones: the facts in @{text z3_rule} are tried prior to
+into succeeding ones: the facts in \<open>z3_rule\<close> are tried prior to
 any implemented reconstruction procedure for all uncertain Z3 proof
-rules;  the facts in @{text z3_simp} are only fed to invocations of
+rules;  the facts in \<open>z3_simp\<close> are only fed to invocations of
 the simplifier when reconstructing theory-specific proof steps.
 \<close>
 
--- a/src/HOL/Library/Omega_Words_Fun.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Omega_Words_Fun.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -529,20 +529,20 @@
 proof -
   have "\<exists>k. range (suffix k x) \<subseteq> limit x"
   proof -
-    -- "The set of letters that are not in the limit is certainly finite."
+    \<comment> "The set of letters that are not in the limit is certainly finite."
     from fin have "finite (range x - limit x)"
       by simp
-    -- "Moreover, any such letter occurs only finitely often"
+    \<comment> "Moreover, any such letter occurs only finitely often"
     moreover
     have "\<forall>a \<in> range x - limit x. finite (x -` {a})"
       by (auto simp add: limit_vimage)
-    -- "Thus, there are only finitely many occurrences of such letters."
+    \<comment> "Thus, there are only finitely many occurrences of such letters."
     ultimately have "finite (UN a : range x - limit x. x -` {a})"
       by (blast intro: finite_UN_I)
-    -- "Therefore these occurrences are within some initial interval."
+    \<comment> "Therefore these occurrences are within some initial interval."
     then obtain k where "(UN a : range x - limit x. x -` {a}) \<subseteq> {..<k}"
       by (blast dest: finite_nat_bounded)
-    -- "This is just the bound we are looking for."
+    \<comment> "This is just the bound we are looking for."
     hence "\<forall>m. k \<le> m \<longrightarrow> x m \<in> limit x"
       by (auto simp add: limit_vimage)
     hence "range (suffix k x) \<subseteq> limit x"
@@ -624,11 +624,11 @@
     fix a assume a: "a \<in> set w"
     then obtain k where k: "k < length w \<and> w!k = a"
       by (auto simp add: set_conv_nth)
-    -- "the following bound is terrible, but it simplifies the proof"
+    \<comment> "the following bound is terrible, but it simplifies the proof"
     from nempty k have "\<forall>m. w\<^sup>\<omega> ((Suc m)*(length w) + k) = a"
       by (simp add: mod_add_left_eq)
     moreover
-    -- "why is the following so hard to prove??"
+    \<comment> "why is the following so hard to prove??"
     have "\<forall>m. m < (Suc m)*(length w) + k"
     proof
       fix m
@@ -661,7 +661,7 @@
 text \<open>
   The converse relation is not true in general: $f(a)$ can be in the
   limit of $f \circ w$ even though $a$ is not in the limit of $w$.
-  However, @{text limit} commutes with renaming if the function is
+  However, \<open>limit\<close> commutes with renaming if the function is
   injective. More generally, if $f(a)$ is the image of only finitely
   many elements, some of these must be in the limit of $w$.
 \<close>
@@ -672,21 +672,21 @@
   shows "\<exists>a \<in> (f -` {x}). a \<in> limit w"
 proof (rule ccontr)
   assume contra: "\<not> ?thesis"
-  -- "hence, every element in the pre-image occurs only finitely often"
+  \<comment> "hence, every element in the pre-image occurs only finitely often"
   then have "\<forall>a \<in> (f -` {x}). finite {n. w n = a}"
     by (simp add: limit_def Inf_many_def)
-  -- "so there are only finitely many occurrences of any such element"
+  \<comment> "so there are only finitely many occurrences of any such element"
   with fin have "finite (\<Union> a \<in> (f -` {x}). {n. w n = a})"
     by auto
-  -- \<open>these are precisely those positions where $x$ occurs in $f \circ w$\<close>
+  \<comment> \<open>these are precisely those positions where $x$ occurs in $f \circ w$\<close>
   moreover
   have "(\<Union> a \<in> (f -` {x}). {n. w n = a}) = {n. f(w n) = x}"
     by auto
   ultimately
-  -- "so $x$ can occur only finitely often in the translated word"
+  \<comment> "so $x$ can occur only finitely often in the translated word"
   have "finite {n. f(w n) = x}"
     by simp
-  -- \<open>\ldots\ which yields a contradiction\<close>
+  \<comment> \<open>\ldots\ which yields a contradiction\<close>
   with x show "False"
     by (simp add: limit_def Inf_many_def)
 qed
--- a/src/HOL/Library/Order_Continuity.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Order_Continuity.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -29,8 +29,8 @@
   done
 
 text \<open>
-  The name @{text continuous} is already taken in @{text "Complex_Main"}, so we use
-  @{text "sup_continuous"} and @{text "inf_continuous"}. These names appear sometimes in literature
+  The name \<open>continuous\<close> is already taken in \<open>Complex_Main\<close>, so we use
+  \<open>sup_continuous\<close> and \<open>inf_continuous\<close>. These names appear sometimes in literature
   and have the advantage that these names are duals.
 \<close>
 
--- a/src/HOL/Library/Permutation.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Permutation.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -116,7 +116,7 @@
   apply (safe intro!: perm_append2)
   apply (rule append_perm_imp_perm)
   apply (rule perm_append_swap [THEN perm.trans])
-    -- \<open>the previous step helps this @{text blast} call succeed quickly\<close>
+    \<comment> \<open>the previous step helps this \<open>blast\<close> call succeed quickly\<close>
   apply (blast intro: perm_append_swap)
   done
 
--- a/src/HOL/Library/Polynomial.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Polynomial.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -50,7 +50,7 @@
   "tl (x ## xs) = xs"
   by (simp add: cCons_def)
 
-subsection \<open>Definition of type @{text poly}\<close>
+subsection \<open>Definition of type \<open>poly\<close>\<close>
 
 typedef (overloaded) 'a poly = "{f :: nat \<Rightarrow> 'a::zero. \<forall>\<^sub>\<infinity> n. f n = 0}"
   morphisms coeff Abs_poly by (auto intro!: ALL_MOST)
@@ -440,7 +440,7 @@
 
 definition poly :: "'a::comm_semiring_0 poly \<Rightarrow> 'a \<Rightarrow> 'a"
 where
-  "poly p = fold_coeffs (\<lambda>a f x. a + x * f x) p (\<lambda>x. 0)" -- \<open>The Horner Schema\<close>
+  "poly p = fold_coeffs (\<lambda>a f x. a + x * f x) p (\<lambda>x. 0)" \<comment> \<open>The Horner Schema\<close>
 
 lemma poly_0 [simp]:
   "poly 0 x = 0"
@@ -1887,7 +1887,7 @@
     by (rule poly_dvd_antisym)
 qed
 
-interpretation gcd_poly!: abel_semigroup "gcd :: _ poly \<Rightarrow> _"
+interpretation gcd_poly: abel_semigroup "gcd :: _ poly \<Rightarrow> _"
 proof
   fix x y z :: "'a poly"
   show "gcd (gcd x y) z = gcd x (gcd y z)"
--- a/src/HOL/Library/Predicate_Compile_Alternative_Defs.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Predicate_Compile_Alternative_Defs.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -177,7 +177,7 @@
 
 section \<open>Alternative list definitions\<close>
 
-subsection \<open>Alternative rules for @{text length}\<close>
+subsection \<open>Alternative rules for \<open>length\<close>\<close>
 
 definition size_list' :: "'a list => nat"
 where "size_list' = size"
@@ -191,7 +191,7 @@
 declare size_list'_def[symmetric, code_pred_inline]
 
 
-subsection \<open>Alternative rules for @{text list_all2}\<close>
+subsection \<open>Alternative rules for \<open>list_all2\<close>\<close>
 
 lemma list_all2_NilI [code_pred_intro]: "list_all2 P [] []"
 by auto
--- a/src/HOL/Library/Quotient_Type.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Quotient_Type.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -14,8 +14,8 @@
 
 subsection \<open>Equivalence relations and quotient types\<close>
 
-text \<open>Type class @{text equiv} models equivalence relations
-  @{text "\<sim> :: 'a \<Rightarrow> 'a \<Rightarrow> bool"}.\<close>
+text \<open>Type class \<open>equiv\<close> models equivalence relations
+  \<open>\<sim> :: 'a \<Rightarrow> 'a \<Rightarrow> bool\<close>.\<close>
 
 class eqv =
   fixes eqv :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infixl "\<sim>" 50)
@@ -57,7 +57,7 @@
 
 end
 
-text \<open>The quotient type @{text "'a quot"} consists of all \emph{equivalence
+text \<open>The quotient type \<open>'a quot\<close> consists of all \emph{equivalence
   classes} over elements of the base type @{typ 'a}.\<close>
 
 definition (in eqv) "quot = {{x. a \<sim> x} | a. True}"
--- a/src/HOL/Library/RBT_Impl.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/RBT_Impl.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -10,7 +10,7 @@
 begin
 
 text \<open>
-  For applications, you should use theory @{text RBT} which defines
+  For applications, you should use theory \<open>RBT\<close> which defines
   an abstract type of red-black tree obeying the invariant.
 \<close>
 
@@ -305,7 +305,7 @@
   "inv1 Empty = True"
 | "inv1 (Branch c lt k v rt) \<longleftrightarrow> inv1 lt \<and> inv1 rt \<and> (c = B \<or> color_of lt = B \<and> color_of rt = B)"
 
-primrec inv1l :: "('a, 'b) rbt \<Rightarrow> bool" -- \<open>Weaker version\<close>
+primrec inv1l :: "('a, 'b) rbt \<Rightarrow> bool" \<comment> \<open>Weaker version\<close>
 where
   "inv1l Empty = True"
 | "inv1l (Branch c l k v r) = (inv1 l \<and> inv1 r)"
--- a/src/HOL/Library/RBT_Mapping.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/RBT_Mapping.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -100,11 +100,11 @@
 text \<open>
   The type @{typ "('k, 'v) RBT_Impl.rbt"} denotes red-black trees with
   keys of type @{typ "'k"} and values of type @{typ "'v"}. To function
-  properly, the key type musorted belong to the @{text "linorder"}
+  properly, the key type musorted belong to the \<open>linorder\<close>
   class.
 
   A value @{term t} of this type is a valid red-black tree if it
-  satisfies the invariant @{text "is_rbt t"}.  The abstract type @{typ
+  satisfies the invariant \<open>is_rbt t\<close>.  The abstract type @{typ
   "('k, 'v) rbt"} always obeys this invariant, and for this reason you
   should only use this in our application.  Going back to @{typ "('k,
   'v) RBT_Impl.rbt"} may be necessary in proofs if not yet proven
@@ -155,25 +155,25 @@
 
 text \<open>
   \noindent
-  @{thm Empty_is_rbt}\hfill(@{text "Empty_is_rbt"})
+  @{thm Empty_is_rbt}\hfill(\<open>Empty_is_rbt\<close>)
 
   \noindent
-  @{thm rbt_insert_is_rbt}\hfill(@{text "rbt_insert_is_rbt"})
+  @{thm rbt_insert_is_rbt}\hfill(\<open>rbt_insert_is_rbt\<close>)
 
   \noindent
-  @{thm rbt_delete_is_rbt}\hfill(@{text "delete_is_rbt"})
+  @{thm rbt_delete_is_rbt}\hfill(\<open>delete_is_rbt\<close>)
 
   \noindent
-  @{thm rbt_bulkload_is_rbt}\hfill(@{text "bulkload_is_rbt"})
+  @{thm rbt_bulkload_is_rbt}\hfill(\<open>bulkload_is_rbt\<close>)
 
   \noindent
-  @{thm rbt_map_entry_is_rbt}\hfill(@{text "map_entry_is_rbt"})
+  @{thm rbt_map_entry_is_rbt}\hfill(\<open>map_entry_is_rbt\<close>)
 
   \noindent
-  @{thm map_is_rbt}\hfill(@{text "map_is_rbt"})
+  @{thm map_is_rbt}\hfill(\<open>map_is_rbt\<close>)
 
   \noindent
-  @{thm rbt_union_is_rbt}\hfill(@{text "union_is_rbt"})
+  @{thm rbt_union_is_rbt}\hfill(\<open>union_is_rbt\<close>)
 \<close>
 
 
@@ -181,27 +181,27 @@
 
 text \<open>
   \noindent
-  \underline{@{text "lookup_empty"}}
+  \underline{\<open>lookup_empty\<close>}
   @{thm [display] lookup_empty}
   \vspace{1ex}
 
   \noindent
-  \underline{@{text "lookup_insert"}}
+  \underline{\<open>lookup_insert\<close>}
   @{thm [display] lookup_insert}
   \vspace{1ex}
 
   \noindent
-  \underline{@{text "lookup_delete"}}
+  \underline{\<open>lookup_delete\<close>}
   @{thm [display] lookup_delete}
   \vspace{1ex}
 
   \noindent
-  \underline{@{text "lookup_bulkload"}}
+  \underline{\<open>lookup_bulkload\<close>}
   @{thm [display] lookup_bulkload}
   \vspace{1ex}
 
   \noindent
-  \underline{@{text "lookup_map"}}
+  \underline{\<open>lookup_map\<close>}
   @{thm [display] lookup_map}
   \vspace{1ex}
 \<close>
--- a/src/HOL/Library/Ramsey.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Ramsey.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -120,7 +120,7 @@
 subsubsection \<open>``Axiom'' of Dependent Choice\<close>
 
 primrec choice :: "('a => bool) => ('a * 'a) set => nat => 'a" where
-  --\<open>An integer-indexed chain of choices\<close>
+  \<comment>\<open>An integer-indexed chain of choices\<close>
     choice_0:   "choice P r 0 = (SOME x. P x)"
   | choice_Suc: "choice P r (Suc n) = (SOME y. P y & (choice P r n, y) \<in> r)"
 
@@ -157,7 +157,7 @@
 subsubsection \<open>Partitions of a Set\<close>
 
 definition part :: "nat => nat => 'a set => ('a set => nat) => bool"
-  --\<open>the function @{term f} partitions the @{term r}-subsets of the typically
+  \<comment>\<open>the function @{term f} partitions the @{term r}-subsets of the typically
        infinite set @{term Y} into @{term s} distinct categories.\<close>
 where
   "part r s Y f = (\<forall>X. X \<subseteq> Y & finite X & card X = r --> f X < s)"
--- a/src/HOL/Library/Saturated.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Saturated.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -214,8 +214,8 @@
 
 end
 
-interpretation Inf_sat!: semilattice_neutr_set min "top :: 'a::len sat"
-where
+interpretation Inf_sat: semilattice_neutr_set min "top :: 'a::len sat"
+rewrites
   "semilattice_neutr_set.F min (top :: 'a sat) = Inf"
 proof -
   show "semilattice_neutr_set min (top :: 'a sat)"
@@ -224,8 +224,8 @@
     by (simp add: Inf_sat_def)
 qed
 
-interpretation Sup_sat!: semilattice_neutr_set max "bot :: 'a::len sat"
-where
+interpretation Sup_sat: semilattice_neutr_set max "bot :: 'a::len sat"
+rewrites
   "semilattice_neutr_set.F max (bot :: 'a sat) = Sup"
 proof -
   show "semilattice_neutr_set max (bot :: 'a sat)"
--- a/src/HOL/Library/Set_Algebras.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Set_Algebras.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -11,7 +11,7 @@
 text \<open>
   This library lifts operations like addition and multiplication to
   sets.  It was designed to support asymptotic calculations. See the
-  comments at the top of theory @{text BigO}.
+  comments at the top of theory \<open>BigO\<close>.
 \<close>
 
 instantiation set :: (plus) plus
--- a/src/HOL/Library/State_Monad.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/State_Monad.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -32,26 +32,26 @@
 
   \begin{description}
 
-    \item[transformations] with type signature @{text "\<sigma> \<Rightarrow> \<sigma>'"},
+    \item[transformations] with type signature \<open>\<sigma> \<Rightarrow> \<sigma>'\<close>,
       transforming a state.
 
-    \item[``yielding'' transformations] with type signature @{text "\<sigma>
-      \<Rightarrow> \<alpha> \<times> \<sigma>'"}, ``yielding'' a side result while transforming a
+    \item[``yielding'' transformations] with type signature \<open>\<sigma>
+      \<Rightarrow> \<alpha> \<times> \<sigma>'\<close>, ``yielding'' a side result while transforming a
       state.
 
-    \item[queries] with type signature @{text "\<sigma> \<Rightarrow> \<alpha>"}, computing a
+    \item[queries] with type signature \<open>\<sigma> \<Rightarrow> \<alpha>\<close>, computing a
       result dependent on a state.
 
   \end{description}
 
-  By convention we write @{text "\<sigma>"} for types representing states and
-  @{text "\<alpha>"}, @{text "\<beta>"}, @{text "\<gamma>"}, @{text "\<dots>"} for types
+  By convention we write \<open>\<sigma>\<close> for types representing states and
+  \<open>\<alpha>\<close>, \<open>\<beta>\<close>, \<open>\<gamma>\<close>, \<open>\<dots>\<close> for types
   representing side results.  Type changes due to transformations are
   not excluded in our scenario.
 
-  We aim to assert that values of any state type @{text "\<sigma>"} are used
+  We aim to assert that values of any state type \<open>\<sigma>\<close> are used
   in a single-threaded way: after application of a transformation on a
-  value of type @{text "\<sigma>"}, the former value should not be used
+  value of type \<open>\<sigma>\<close>, the former value should not be used
   again.  To achieve this, we use a set of monad combinators:
 \<close>
 
--- a/src/HOL/Library/Sublist_Order.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Sublist_Order.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -11,8 +11,8 @@
 
 text \<open>
   This theory defines sublist ordering on lists.
-  A list @{text ys} is a sublist of a list @{text xs},
-  iff one obtains @{text ys} by erasing some elements from @{text xs}.
+  A list \<open>ys\<close> is a sublist of a list \<open>xs\<close>,
+  iff one obtains \<open>ys\<close> by erasing some elements from \<open>xs\<close>.
 \<close>
 
 subsection \<open>Definitions and basic lemmas\<close>
--- a/src/HOL/Library/Tree.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Library/Tree.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -146,7 +146,7 @@
   (heap l \<and> heap r \<and> (\<forall>x \<in> set_tree l \<union> set_tree r. m \<le> x))"
 
 
-subsection "Function @{text mirror}"
+subsection "Function \<open>mirror\<close>"
 
 fun mirror :: "'a tree \<Rightarrow> 'a tree" where
 "mirror \<langle>\<rangle> = Leaf" |
--- a/src/HOL/List.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/List.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5150,8 +5150,8 @@
 definition sorted_list_of_set :: "'a set \<Rightarrow> 'a list" where
   "sorted_list_of_set = folding.F insort []"
 
-sublocale sorted_list_of_set!: folding insort Nil
-where
+sublocale sorted_list_of_set: folding insort Nil
+rewrites
   "folding.F insort [] = sorted_list_of_set"
 proof -
   interpret comp_fun_commute insort by (fact comp_fun_commute_insort)
--- a/src/HOL/Multivariate_Analysis/Complex_Analysis_Basics.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/Complex_Analysis_Basics.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,7 +5,7 @@
 section \<open>Complex Analysis Basics\<close>
 
 theory Complex_Analysis_Basics
-imports  "~~/src/HOL/Multivariate_Analysis/Cartesian_Euclidean_Space"
+imports Cartesian_Euclidean_Space
 begin
 
 
--- a/src/HOL/Multivariate_Analysis/Complex_Transcendental.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/Complex_Transcendental.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5,7 +5,7 @@
 section \<open>Complex Transcendental Functions\<close>
 
 theory Complex_Transcendental
-imports  "~~/src/HOL/Multivariate_Analysis/Complex_Analysis_Basics"
+imports Complex_Analysis_Basics
 begin
 
 lemma cmod_add_real_less:
--- a/src/HOL/Multivariate_Analysis/Derivative.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/Derivative.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -6,7 +6,7 @@
 section \<open>Multivariate calculus in Euclidean space\<close>
 
 theory Derivative
-imports Brouwer_Fixpoint Operator_Norm "~~/src/HOL/Multivariate_Analysis/Uniform_Limit"
+imports Brouwer_Fixpoint Operator_Norm Uniform_Limit
 begin
 
 lemma netlimit_at_vector: (* TODO: move *)
--- a/src/HOL/Multivariate_Analysis/Extended_Real_Limits.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/Extended_Real_Limits.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -8,7 +8,10 @@
 section \<open>Limits on the Extended real number line\<close>
 
 theory Extended_Real_Limits
-  imports Topology_Euclidean_Space "~~/src/HOL/Library/Extended_Real" "~~/src/HOL/Library/Indicator_Function"
+imports
+  Topology_Euclidean_Space
+  "~~/src/HOL/Library/Extended_Real"
+  "~~/src/HOL/Library/Indicator_Function"
 begin
 
 lemma compact_UNIV:
--- a/src/HOL/Multivariate_Analysis/PolyRoots.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/PolyRoots.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,12 +1,11 @@
-section \<open>polynomial functions: extremal behaviour and root counts\<close>
-
 (*  Author: John Harrison and Valentina Bruno
     Ported from "hol_light/Multivariate/complexes.ml" by L C Paulson
 *)
 
+section \<open>polynomial functions: extremal behaviour and root counts\<close>
+
 theory PolyRoots
 imports Complex_Main
-
 begin
 
 subsection\<open>Geometric progressions\<close>
--- a/src/HOL/Multivariate_Analysis/Weierstrass.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/Weierstrass.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,8 +1,7 @@
-section\<open>Bernstein-Weierstrass and Stone-Weierstrass Theorems\<close>
+section \<open>Bernstein-Weierstrass and Stone-Weierstrass Theorems\<close>
 
 theory Weierstrass
 imports Uniform_Limit Path_Connected
-
 begin
 
 (*Power.thy:*)
--- a/src/HOL/Multivariate_Analysis/ex/Approximations.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Multivariate_Analysis/ex/Approximations.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1,7 +1,7 @@
 section \<open>Binary Approximations to Constants\<close>
 
 theory Approximations
-imports "~~/src/HOL/Multivariate_Analysis/Complex_Transcendental"
+imports Complex_Transcendental
 begin
 
 declare of_real_numeral [simp]
--- a/src/HOL/Nominal/Examples/Class1.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Nominal/Examples/Class1.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -5189,7 +5189,7 @@
 using a2 a1
 by (induct) (auto)
 
-text {* congruence rules for \<longrightarrow>\<^sub>a* *}
+text {* congruence rules for \<open>\<longrightarrow>\<^sub>a*\<close> *}
 
 lemma ax_do_not_a_star_reduce:
   shows "Ax x a \<longrightarrow>\<^sub>a* M \<Longrightarrow> M = Ax x a"
--- a/src/HOL/Number_Theory/Euclidean_Algorithm.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Number_Theory/Euclidean_Algorithm.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -238,7 +238,7 @@
   shows "c = gcd a b"
   by (rule associated_eqI) (auto simp: assms intro: gcd_greatest)
 
-sublocale gcd!: abel_semigroup gcd
+sublocale gcd: abel_semigroup gcd
 proof
   fix a b c 
   show "gcd (gcd a b) c = gcd a (gcd b c)"
@@ -790,7 +790,7 @@
   shows "c = lcm a b"
   by (rule associated_eqI) (auto simp: assms intro: lcm_least)
 
-sublocale lcm!: abel_semigroup lcm ..
+sublocale lcm: abel_semigroup lcm ..
 
 lemma dvd_lcm_D1:
   "lcm m n dvd k \<Longrightarrow> m dvd k"
--- a/src/HOL/Orderings.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Orderings.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -191,7 +191,7 @@
 lemma less_le: "x < y \<longleftrightarrow> x \<le> y \<and> x \<noteq> y"
   by (auto simp add: less_le_not_le intro: antisym)
 
-sublocale order!: ordering less_eq less +  dual_order!: ordering greater_eq greater
+sublocale order: ordering less_eq less +  dual_order: ordering greater_eq greater
   by standard (auto intro: antisym order_trans simp add: less_le)
 
 
@@ -1181,7 +1181,7 @@
   assumes bot_least: "\<bottom> \<le> a"
 begin
 
-sublocale bot!: ordering_top greater_eq greater bot
+sublocale bot: ordering_top greater_eq greater bot
   by standard (fact bot_least)
 
 lemma le_bot:
@@ -1209,7 +1209,7 @@
   assumes top_greatest: "a \<le> \<top>"
 begin
 
-sublocale top!: ordering_top less_eq less top
+sublocale top: ordering_top less_eq less top
   by standard (fact top_greatest)
 
 lemma top_le:
--- a/src/HOL/Partial_Function.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Partial_Function.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -281,14 +281,14 @@
 lemma antisymP_flat_ord: "antisymP (flat_ord a)"
 by(rule antisymI)(auto dest: flat_ord_antisym)
 
-interpretation tailrec!:
+interpretation tailrec:
   partial_function_definitions "flat_ord undefined" "flat_lub undefined"
-  where "flat_lub undefined {} \<equiv> undefined"
+  rewrites "flat_lub undefined {} \<equiv> undefined"
 by (rule flat_interpretation)(simp add: flat_lub_def)
 
-interpretation option!:
+interpretation option:
   partial_function_definitions "flat_ord None" "flat_lub None"
-  where "flat_lub None {} \<equiv> None"
+  rewrites "flat_lub None {} \<equiv> None"
 by (rule flat_interpretation)(simp add: flat_lub_def)
 
 
--- a/src/HOL/Probability/Binary_Product_Measure.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Binary_Product_Measure.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -322,7 +322,7 @@
 
 subsection {* Binary products of $\sigma$-finite emeasure spaces *}
 
-locale pair_sigma_finite = M1: sigma_finite_measure M1 + M2: sigma_finite_measure M2
+locale pair_sigma_finite = M1?: sigma_finite_measure M1 + M2?: sigma_finite_measure M2
   for M1 :: "'a measure" and M2 :: "'b measure"
 
 lemma (in pair_sigma_finite) measurable_emeasure_Pair1:
@@ -379,7 +379,7 @@
   qed
 qed
 
-sublocale pair_sigma_finite \<subseteq> P: sigma_finite_measure "M1 \<Otimes>\<^sub>M M2"
+sublocale pair_sigma_finite \<subseteq> P?: sigma_finite_measure "M1 \<Otimes>\<^sub>M M2"
 proof
   from M1.sigma_finite_countable guess F1 ..
   moreover from M2.sigma_finite_countable guess F2 ..
--- a/src/HOL/Probability/Finite_Product_Measure.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Finite_Product_Measure.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -829,7 +829,7 @@
   fixes M :: "'i \<Rightarrow> 'a measure"
   assumes sigma_finite_measures: "\<And>i. sigma_finite_measure (M i)"
 
-sublocale product_sigma_finite \<subseteq> M: sigma_finite_measure "M i" for i
+sublocale product_sigma_finite \<subseteq> M?: sigma_finite_measure "M i" for i
   by (rule sigma_finite_measures)
 
 locale finite_product_sigma_finite = product_sigma_finite M for M :: "'i \<Rightarrow> 'a measure" +
--- a/src/HOL/Probability/Giry_Monad.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Giry_Monad.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -133,7 +133,7 @@
 locale pair_subprob_space = 
   pair_sigma_finite M1 M2 + M1: subprob_space M1 + M2: subprob_space M2 for M1 M2
 
-sublocale pair_subprob_space \<subseteq> P: subprob_space "M1 \<Otimes>\<^sub>M M2"
+sublocale pair_subprob_space \<subseteq> P?: subprob_space "M1 \<Otimes>\<^sub>M M2"
 proof
   have "\<And>a b. \<lbrakk>a \<ge> 0; b \<ge> 0; a \<le> 1; b \<le> 1\<rbrakk> \<Longrightarrow> a * b \<le> (1::ereal)"
     by (metis monoid_mult_class.mult.left_neutral dual_order.trans ereal_mult_right_mono)
--- a/src/HOL/Probability/Infinite_Product_Measure.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Infinite_Product_Measure.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -51,7 +51,7 @@
     emeasure (Pi\<^sub>M I M) (emb I J (Pi\<^sub>E J X)) = (\<Prod> i\<in>J. emeasure (M i) (X i))"
   by (subst emeasure_PiM_emb') (auto intro!: emeasure_PiM)
 
-sublocale product_prob_space \<subseteq> P: prob_space "Pi\<^sub>M I M"
+sublocale product_prob_space \<subseteq> P?: prob_space "Pi\<^sub>M I M"
 proof
   have *: "emb I {} {\<lambda>x. undefined} = space (PiM I M)"
     by (auto simp: prod_emb_def space_PiM)
--- a/src/HOL/Probability/Lebesgue_Measure.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Lebesgue_Measure.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -709,7 +709,7 @@
 lemma lborel_distr_plus: "distr lborel borel (op + c) = (lborel :: real measure)"
   by (subst lborel_real_affine[of 1 c]) (auto simp: density_1 one_ereal_def[symmetric])
 
-interpretation lborel!: sigma_finite_measure lborel
+interpretation lborel: sigma_finite_measure lborel
   by (rule sigma_finite_lborel)
 
 interpretation lborel_pair: pair_sigma_finite lborel lborel ..
--- a/src/HOL/Probability/Probability_Mass_Function.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Probability_Mass_Function.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -113,10 +113,10 @@
 lemma prob_space_measure_pmf: "prob_space (measure_pmf p)"
   using pmf.measure_pmf[of p] by auto
 
-interpretation measure_pmf!: prob_space "measure_pmf M" for M
+interpretation measure_pmf: prob_space "measure_pmf M" for M
   by (rule prob_space_measure_pmf)
 
-interpretation measure_pmf!: subprob_space "measure_pmf M" for M
+interpretation measure_pmf: subprob_space "measure_pmf M" for M
   by (rule prob_space_imp_subprob_space) unfold_locales
 
 lemma subprob_space_measure_pmf: "subprob_space (measure_pmf x)"
--- a/src/HOL/Probability/Probability_Measure.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Probability_Measure.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -46,7 +46,7 @@
 lemma prob_space_distrD:
   assumes f: "f \<in> measurable M N" and M: "prob_space (distr M N f)" shows "prob_space M"
 proof
-  interpret M!: prob_space "distr M N f" by fact
+  interpret M: prob_space "distr M N f" by fact
   have "f -` space N \<inter> space M = space M"
     using f[THEN measurable_space] by auto
   then show "emeasure M (space M) = 1"
@@ -461,7 +461,7 @@
 
 locale pair_prob_space = pair_sigma_finite M1 M2 + M1: prob_space M1 + M2: prob_space M2 for M1 M2
 
-sublocale pair_prob_space \<subseteq> P: prob_space "M1 \<Otimes>\<^sub>M M2"
+sublocale pair_prob_space \<subseteq> P?: prob_space "M1 \<Otimes>\<^sub>M M2"
 proof
   show "emeasure (M1 \<Otimes>\<^sub>M M2) (space (M1 \<Otimes>\<^sub>M M2)) = 1"
     by (simp add: M2.emeasure_pair_measure_Times M1.emeasure_space_1 M2.emeasure_space_1 space_pair_measure)
@@ -471,7 +471,7 @@
   fixes I :: "'i set"
   assumes prob_space: "\<And>i. prob_space (M i)"
 
-sublocale product_prob_space \<subseteq> M: prob_space "M i" for i
+sublocale product_prob_space \<subseteq> M?: prob_space "M i" for i
   by (rule prob_space)
 
 locale finite_product_prob_space = finite_product_sigma_finite M I + product_prob_space M I for M I
--- a/src/HOL/Probability/Projective_Family.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Projective_Family.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -97,7 +97,7 @@
     by simp
 qed (force simp: generator.simps prod_emb_empty[symmetric])
 
-interpretation generator!: algebra "space (PiM I M)" generator
+interpretation generator: algebra "space (PiM I M)" generator
   by (rule algebra_generator)
 
 lemma sets_PiM_generator: "sets (PiM I M) = sigma_sets (space (PiM I M)) generator"
@@ -407,7 +407,7 @@
 definition CI :: "nat set \<Rightarrow> (nat \<Rightarrow> 'a) measure" where
   "CI J = distr (C 0 (up_to J) (\<lambda>x. undefined)) (PiM J M) (\<lambda>f. restrict f J)"
 
-sublocale PF!: projective_family UNIV CI
+sublocale PF: projective_family UNIV CI
   unfolding projective_family_def
 proof safe
   show "finite J \<Longrightarrow> prob_space (CI J)" for J
@@ -460,7 +460,7 @@
   also have "\<dots> \<le> (INF i. C 0 i (\<lambda>x. undefined) (X i))"
   proof (intro INF_greatest)
     fix n
-    interpret C!: prob_space "C 0 n (\<lambda>x. undefined)"
+    interpret C: prob_space "C 0 n (\<lambda>x. undefined)"
       by (rule prob_space_C) simp
     show "(INF i. CI (J i) (X' i)) \<le> C 0 n (\<lambda>x. undefined) (X n)"
     proof cases
@@ -606,9 +606,9 @@
       using count by (auto simp: t_def)
     then have inj_t_J: "inj_on t (J i)" for i
       by (rule subset_inj_on) auto
-    interpret IT!: Ionescu_Tulcea "\<lambda>i \<omega>. M (f i)" "\<lambda>i. M (f i)"
+    interpret IT: Ionescu_Tulcea "\<lambda>i \<omega>. M (f i)" "\<lambda>i. M (f i)"
       by standard auto
-    interpret Mf!: product_prob_space "\<lambda>x. M (f x)" UNIV
+    interpret Mf: product_prob_space "\<lambda>x. M (f x)" UNIV
       by standard
     have C_eq_PiM: "IT.C 0 n (\<lambda>_. undefined) = PiM {0..<n} (\<lambda>x. M (f x))" for n
     proof (induction n)
--- a/src/HOL/Probability/Projective_Limit.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Projective_Limit.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -469,7 +469,7 @@
 hide_const (open) domain
 hide_const (open) basis_finmap
 
-sublocale polish_projective \<subseteq> P!: prob_space lim
+sublocale polish_projective \<subseteq> P: prob_space lim
 proof
   have *: "emb I {} {\<lambda>x. undefined} = space (\<Pi>\<^sub>M i\<in>I. borel)"
     by (auto simp: prod_emb_def space_PiM)
--- a/src/HOL/Probability/Radon_Nikodym.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Radon_Nikodym.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -935,7 +935,7 @@
     by (auto intro: density_cong)
 next
   assume eq: "density M f = density M g"
-  interpret f!: sigma_finite_measure "density M f" by fact
+  interpret f: sigma_finite_measure "density M f" by fact
   from f.sigma_finite_incseq guess A . note cover = this
 
   have "AE x in M. \<forall>i. x \<in> A i \<longrightarrow> f x = g x"
--- a/src/HOL/Probability/Sigma_Algebra.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Probability/Sigma_Algebra.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -1479,7 +1479,7 @@
 lemma measure_space: "measure_space (space M) (sets M) (emeasure M)"
   by (cases M) (auto simp: space_def sets_def emeasure_def Abs_measure_inverse)
 
-interpretation sets!: sigma_algebra "space M" "sets M" for M :: "'a measure"
+interpretation sets: sigma_algebra "space M" "sets M" for M :: "'a measure"
   using measure_space[of M] by (auto simp: measure_space_def)
 
 definition measure_of :: "'a set \<Rightarrow> 'a set set \<Rightarrow> ('a set \<Rightarrow> ereal) \<Rightarrow> 'a measure" where
--- a/src/HOL/Statespace/state_space.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Statespace/state_space.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -642,7 +642,7 @@
 val renames = Scan.optional (@{keyword "["} |-- Parse.!!! (Parse.list1 rename --| @{keyword "]"})) [];
 
 val parent =
-  Parse_Spec.locale_prefix false --
+  Parse_Spec.locale_prefix --
   ((type_insts -- Parse.xname) || (Parse.xname >> pair [])) -- renames
     >> (fn ((prefix, (insts, name)), renames) => (prefix, (insts, name, renames)));
 
--- a/src/HOL/TPTP/atp_problem_import.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/TPTP/atp_problem_import.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -82,7 +82,7 @@
     val nondefs = pseudo_defs @ nondefs
     val state = Proof.init ctxt
     val params =
-      [("card", "1\<emdash>100"),
+      [("card", "1-100"),
        ("box", "false"),
        ("max_threads", "1"),
        ("batch_size", "5"),
--- a/src/HOL/Tools/Nitpick/nitpick_commands.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Tools/Nitpick/nitpick_commands.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -34,9 +34,9 @@
 type raw_param = string * string list
 
 val default_default_params =
-  [("card", "1\<emdash>10"),
+  [("card", "1-10"),
    ("iter", "0,1,2,4,8,12,16,20,24,28"),
-   ("bits", "1\<emdash>10"),
+   ("bits", "1-10"),
    ("bisim_depth", "9"),
    ("box", "smart"),
    ("finitize", "smart"),
@@ -138,7 +138,7 @@
   Data.map o fold (AList.update (op =)) o normalize_raw_param
 val default_raw_params = Data.get
 
-fun is_punctuation s = (s = "," orelse s = "-" orelse s = "\<emdash>")
+fun is_punctuation s = (s = "," orelse s = "-")
 
 fun stringify_raw_param_value [] = ""
   | stringify_raw_param_value [s] = s
@@ -177,7 +177,7 @@
       let
         val (k1, k2) =
           (case space_explode "-" s of
-             [s] => the_default (s, s) (first_field "\<emdash>" s)
+             [s] => (s, s)
            | ["", s2] => ("-" ^ s2, "-" ^ s2)
            | [s1, s2] => (s1, s2)
            | _ => raise Option.Option)
--- a/src/HOL/Tools/SMT/smt_systems.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Tools/SMT/smt_systems.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -101,13 +101,12 @@
   command = make_command "VERIT",
   options = (fn ctxt => [
     "--proof-version=1",
-    "--proof=-",
     "--proof-prune",
     "--proof-merge",
     "--disable-print-success",
     "--disable-banner",
     "--max-time=" ^ string_of_int (Real.ceil (Config.get ctxt SMT_Config.timeout))]),
-  smt_options = [],
+  smt_options = [(":produce-proofs", "true")],
   default_max_relevant = 200 (* FUDGE *),
   outcome = on_first_non_unsupported_line (outcome_of "unsat" "sat"
     "warning : proof_done: status is still open"),
--- a/src/HOL/Tools/Sledgehammer/async_manager_legacy.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/Tools/Sledgehammer/async_manager_legacy.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -24,7 +24,7 @@
       Runtime.debugging NONE body () handle exn =>
         if Exn.is_interrupt exn then ()
         else writeln ("## INTERNAL ERROR ##\n" ^ Runtime.exn_message exn),
-      Simple_Thread.attributes
+      Standard_Thread.attributes
         {name = "async_manager", stack_limit = NONE, interrupts = interrupts});
 
 fun implode_message (workers, work) =
@@ -108,7 +108,7 @@
               NONE
             else
               let
-                val _ = List.app (Simple_Thread.interrupt_unsynchronized o #1) canceling
+                val _ = List.app (Standard_Thread.interrupt_unsynchronized o #1) canceling
                 val canceling' = filter (Thread.isActive o #1) canceling
                 val state' = make_state manager timeout_heap' active canceling' messages
               in SOME (map #2 timeout_threads, state') end
--- a/src/HOL/ex/LocaleTest2.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/ex/LocaleTest2.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -469,7 +469,7 @@
 subsubsection \<open>Total order @{text "<="} on @{typ int}\<close>
 
 interpretation int: dpo "op <= :: [int, int] => bool"
-  where "(dpo.less (op <=) (x::int) y) = (x < y)"
+  rewrites "(dpo.less (op <=) (x::int) y) = (x < y)"
   txt \<open>We give interpretation for less, but not @{text is_inf} and @{text is_sub}.\<close>
 proof -
   show "dpo (op <= :: [int, int] => bool)"
@@ -488,7 +488,7 @@
   apply (rule int.abs_test) done
 
 interpretation int: dlat "op <= :: [int, int] => bool"
-  where meet_eq: "dlat.meet (op <=) (x::int) y = min x y"
+  rewrites meet_eq: "dlat.meet (op <=) (x::int) y = min x y"
     and join_eq: "dlat.join (op <=) (x::int) y = max x y"
 proof -
   show "dlat (op <= :: [int, int] => bool)"
@@ -525,7 +525,7 @@
 subsubsection \<open>Total order @{text "<="} on @{typ nat}\<close>
 
 interpretation nat: dpo "op <= :: [nat, nat] => bool"
-  where "dpo.less (op <=) (x::nat) y = (x < y)"
+  rewrites "dpo.less (op <=) (x::nat) y = (x < y)"
   txt \<open>We give interpretation for less, but not @{text is_inf} and @{text is_sub}.\<close>
 proof -
   show "dpo (op <= :: [nat, nat] => bool)"
@@ -539,7 +539,7 @@
 qed
 
 interpretation nat: dlat "op <= :: [nat, nat] => bool"
-  where "dlat.meet (op <=) (x::nat) y = min x y"
+  rewrites "dlat.meet (op <=) (x::nat) y = min x y"
     and "dlat.join (op <=) (x::nat) y = max x y"
 proof -
   show "dlat (op <= :: [nat, nat] => bool)"
@@ -576,7 +576,7 @@
 subsubsection \<open>Lattice @{text "dvd"} on @{typ nat}\<close>
 
 interpretation nat_dvd: dpo "op dvd :: [nat, nat] => bool"
-  where "dpo.less (op dvd) (x::nat) y = (x dvd y & x ~= y)"
+  rewrites "dpo.less (op dvd) (x::nat) y = (x dvd y & x ~= y)"
   txt \<open>We give interpretation for less, but not @{text is_inf} and @{text is_sub}.\<close>
 proof -
   show "dpo (op dvd :: [nat, nat] => bool)"
@@ -590,7 +590,7 @@
 qed
 
 interpretation nat_dvd: dlat "op dvd :: [nat, nat] => bool"
-  where "dlat.meet (op dvd) (x::nat) y = gcd x y"
+  rewrites "dlat.meet (op dvd) (x::nat) y = gcd x y"
     and "dlat.join (op dvd) (x::nat) y = lcm x y"
 proof -
   show "dlat (op dvd :: [nat, nat] => bool)"
@@ -826,7 +826,7 @@
 proof -
   have "hom one +++ zero = hom one +++ hom one"
     by (simp add: hom_mult [symmetric] del: hom_mult)
-  then show ?thesis by (simp del: r_one)
+  then show ?thesis by (simp del: sum.r_one)
 qed
 
 end
@@ -835,7 +835,7 @@
 subsubsection \<open>Interpretation of Functions\<close>
 
 interpretation Dfun: Dmonoid "op o" "id :: 'a => 'a"
-  where "Dmonoid.unit (op o) id f = bij (f::'a => 'a)"
+  rewrites "Dmonoid.unit (op o) id f = bij (f::'a => 'a)"
 (*    and "Dmonoid.inv (op o) id" = "inv :: ('a => 'a) => ('a => 'a)" *)
 proof -
   show "Dmonoid op o (id :: 'a => 'a)" proof qed (simp_all add: o_assoc)
@@ -885,7 +885,7 @@
   by rule simp
 
 interpretation Dfun: Dgrp "op o" "id :: unit => unit"
-  where "Dmonoid.inv (op o) id f = inv (f :: unit => unit)"
+  rewrites "Dmonoid.inv (op o) id f = inv (f :: unit => unit)"
 proof -
   have "Dmonoid op o (id :: 'a => 'a)" ..
   note Dmonoid = this
--- a/src/HOL/ex/Tarski.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/HOL/ex/Tarski.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -119,7 +119,7 @@
 locale CL = S +
   assumes cl_co:  "cl : CompleteLattice"
 
-sublocale CL < po: PO
+sublocale CL < po?: PO
 apply (simp_all add: A_def r_def)
 apply unfold_locales
 using cl_co unfolding CompleteLattice_def by auto
@@ -130,7 +130,7 @@
   assumes f_cl:  "(cl,f) : CLF_set" (*was the equivalent "f : CLF_set``{cl}"*)
   defines P_def: "P == fix f A"
 
-sublocale CLF < cl: CL
+sublocale CLF < cl?: CL
 apply (simp_all add: A_def r_def)
 apply unfold_locales
 using f_cl unfolding CLF_set_def by auto
--- a/src/Pure/Concurrent/bash.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/bash.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -31,7 +31,7 @@
     val _ = cleanup_files ();
 
     val system_thread =
-      Simple_Thread.fork {name = "bash", stack_limit = NONE, interrupts = false} (fn () =>
+      Standard_Thread.fork {name = "bash", stack_limit = NONE, interrupts = false} (fn () =>
         Multithreading.with_attributes Multithreading.private_interrupts (fn _ =>
           let
             val _ = File.write script_path script;
@@ -83,7 +83,7 @@
           in () end;
 
     fun cleanup () =
-     (Simple_Thread.interrupt_unsynchronized system_thread;
+     (Standard_Thread.interrupt_unsynchronized system_thread;
       cleanup_files ());
   in
     let
--- a/src/Pure/Concurrent/bash_windows.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/bash_windows.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -35,7 +35,7 @@
     val _ = cleanup_files ();
 
     val system_thread =
-      Simple_Thread.fork {name = "bash", stack_limit = NONE, interrupts = false} (fn () =>
+      Standard_Thread.fork {name = "bash", stack_limit = NONE, interrupts = false} (fn () =>
         Multithreading.with_attributes Multithreading.private_interrupts (fn _ =>
           let
             val _ = File.write script_path script;
@@ -74,7 +74,7 @@
           in () end;
 
     fun cleanup () =
-     (Simple_Thread.interrupt_unsynchronized system_thread;
+     (Standard_Thread.interrupt_unsynchronized system_thread;
       cleanup_files ());
   in
     let
--- a/src/Pure/Concurrent/consumer_thread.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/consumer_thread.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -32,7 +32,7 @@
   private var active = true
   private val mailbox = Mailbox[Option[Consumer_Thread.Request[A]]]
 
-  private val thread = Simple_Thread.fork(name, daemon) { main_loop(Nil) }
+  private val thread = Standard_Thread.fork(name, daemon) { main_loop(Nil) }
   def is_active: Boolean = active && thread.isAlive
 
   private def failure(exn: Throwable): Unit =
--- a/src/Pure/Concurrent/event_timer.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/event_timer.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -105,7 +105,7 @@
 fun manager_check manager =
   if is_some manager andalso Thread.isActive (the manager) then manager
   else
-    SOME (Simple_Thread.fork {name = "event_timer", stack_limit = NONE, interrupts = false}
+    SOME (Standard_Thread.fork {name = "event_timer", stack_limit = NONE, interrupts = false}
       manager_loop);
 
 fun shutdown () =
--- a/src/Pure/Concurrent/future.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/future.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -184,14 +184,14 @@
   let
     val running = Task_Queue.cancel (! queue) group;
     val _ = running |> List.app (fn thread =>
-      if Simple_Thread.is_self thread then ()
-      else Simple_Thread.interrupt_unsynchronized thread);
+      if Standard_Thread.is_self thread then ()
+      else Standard_Thread.interrupt_unsynchronized thread);
   in running end;
 
 fun cancel_all () = (*requires SYNCHRONIZED*)
   let
     val (groups, threads) = Task_Queue.cancel_all (! queue);
-    val _ = List.app Simple_Thread.interrupt_unsynchronized threads;
+    val _ = List.app Standard_Thread.interrupt_unsynchronized threads;
   in groups end;
 
 fun cancel_later group = (*requires SYNCHRONIZED*)
@@ -264,7 +264,7 @@
       Real.floor (Options.default_real "threads_stack_limit" * 1024.0 * 1024.0 * 1024.0);
     val stack_limit = if threads_stack_limit <= 0 then NONE else SOME threads_stack_limit;
     val worker =
-      Simple_Thread.fork {name = "worker", stack_limit = stack_limit, interrupts = false}
+      Standard_Thread.fork {name = "worker", stack_limit = stack_limit, interrupts = false}
         (fn () => worker_loop name);
   in Unsynchronized.change workers (cons (worker, Unsynchronized.ref Working)) end
   handle Fail msg => Multithreading.tracing 0 (fn () => "SCHEDULER: " ^ msg);
@@ -367,7 +367,7 @@
   if scheduler_active () then ()
   else
     scheduler :=
-      SOME (Simple_Thread.fork {name = "scheduler", stack_limit = NONE, interrupts = false}
+      SOME (Standard_Thread.fork {name = "scheduler", stack_limit = NONE, interrupts = false}
         scheduler_loop));
 
 
--- a/src/Pure/Concurrent/future.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/future.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -2,31 +2,24 @@
     Module:     PIDE
     Author:     Makarius
 
-Value-oriented parallel execution via futures and promises in Scala -- with
-signatures as in Isabelle/ML.
+Value-oriented parallel execution via futures and promises.
 */
 
 package isabelle
 
 
-import scala.util.{Success, Failure}
-import scala.concurrent.{ExecutionContext, ExecutionContextExecutor,
-  Future => Scala_Future, Promise => Scala_Promise, Await}
-import scala.concurrent.duration.Duration
+import java.util.concurrent.Callable
 
 
+/* futures and promises */
+
 object Future
 {
-  lazy val execution_context: ExecutionContextExecutor =
-    ExecutionContext.fromExecutorService(Simple_Thread.default_pool)
-
-  def value[A](x: A): Future[A] = new Finished_Future(x)
-
-  def fork[A](body: => A): Future[A] =
-    new Pending_Future(Scala_Future[A](body)(execution_context))
-
-  def promise[A]: Promise[A] =
-    new Promise_Future[A](Scala_Promise[A]())
+  def value[A](x: A): Future[A] = new Value_Future(x)
+  def fork[A](body: => A): Future[A] = new Task_Future[A](body)
+  def promise[A]: Promise[A] = new Promise_Future[A]
+  def thread[A](name: String = "", daemon: Boolean = false)(body: => A): Future[A] =
+    new Thread_Future[A](name, daemon, body)
 }
 
 trait Future[A]
@@ -34,8 +27,10 @@
   def peek: Option[Exn.Result[A]]
   def is_finished: Boolean = peek.isDefined
   def get_finished: A = { require(is_finished); Exn.release(peek.get) }
-  def join: A
+  def join_result: Exn.Result[A]
+  def join: A = Exn.release(join_result)
   def map[B](f: A => B): Future[B] = Future.fork { f(join) }
+  def cancel: Unit
 
   override def toString: String =
     peek match {
@@ -47,46 +42,103 @@
 
 trait Promise[A] extends Future[A]
 {
-  def cancel: Unit
   def fulfill_result(res: Exn.Result[A]): Unit
   def fulfill(x: A): Unit
 }
 
 
-private class Finished_Future[A](x: A) extends Future[A]
+/* value future */
+
+private class Value_Future[A](x: A) extends Future[A]
 {
   val peek: Option[Exn.Result[A]] = Some(Exn.Res(x))
-  val join: A = x
+  def join_result: Exn.Result[A] = peek.get
+  def cancel {}
 }
 
-private class Pending_Future[A](future: Scala_Future[A]) extends Future[A]
+
+/* task future via thread pool */
+
+private class Task_Future[A](body: => A) extends Future[A]
 {
+  private sealed abstract class Status
+  private case object Ready extends Status
+  private case class Running(thread: Thread) extends Status
+  private case object Terminated extends Status
+  private case class Finished(result: Exn.Result[A]) extends Status
+
+  private val status = Synchronized[Status](Ready)
+
   def peek: Option[Exn.Result[A]] =
-    future.value match {
-      case Some(Success(x)) => Some(Exn.Res(x))
-      case Some(Failure(e)) => Some(Exn.Exn(e))
-      case None => None
+    status.value match {
+      case Finished(result) => Some(result)
+      case _ => None
+    }
+
+  private def try_run()
+  {
+    val do_run =
+      status.change_result {
+        case Ready => (true, Running(Thread.currentThread))
+        case st => (false, st)
+      }
+    if (do_run) {
+      val result = Exn.capture(body)
+      status.change(_ => Terminated)
+      status.change(_ => Finished(if (Thread.interrupted) Exn.Exn(Exn.Interrupt()) else result))
     }
-  override def is_finished: Boolean = future.isCompleted
+  }
+  private val task = Standard_Thread.pool.submit(new Callable[A] { def call = body })
 
-  def join: A = Await.result(future, Duration.Inf)
-  override def map[B](f: A => B): Future[B] =
-    new Pending_Future[B](future.map(f)(Future.execution_context))
+  def join_result: Exn.Result[A] =
+  {
+    try_run()
+    status.guarded_access {
+      case st @ Finished(result) => Some((result, st))
+      case _ => None
+    }
+  }
+
+  def cancel =
+  {
+    status.change {
+      case Ready => task.cancel(false); Finished(Exn.Exn(Exn.Interrupt()))
+      case st @ Running(thread) => thread.interrupt; st
+      case st => st
+    }
+  }
 }
 
-private class Promise_Future[A](promise: Scala_Promise[A])
-  extends Pending_Future(promise.future) with Promise[A]
+
+/* promise future */
+
+private class Promise_Future[A] extends Promise[A]
 {
-  override def is_finished: Boolean = promise.isCompleted
+  private val state = Synchronized[Option[Exn.Result[A]]](None)
+  def peek: Option[Exn.Result[A]] = state.value
+
+  def join_result: Exn.Result[A] =
+    state.guarded_access(st => if (st.isEmpty) None else Some((st.get, st)))
+
+  def fulfill_result(result: Exn.Result[A]): Unit =
+    state.change(st => if (st.isEmpty) Some(result) else throw new IllegalStateException)
+
+  def fulfill(x: A): Unit = fulfill_result(Exn.Res(x))
 
   def cancel: Unit =
-    try { fulfill_result(Exn.Exn(Exn.Interrupt())) }
-    catch { case _: IllegalStateException => }
+    state.change(st => if (st.isEmpty) Some(Exn.Exn(Exn.Interrupt())) else st)
+}
+
+
+/* thread future */
 
-  def fulfill_result(res: Exn.Result[A]): Unit =
-    res match {
-      case Exn.Res(x) => promise.success(x)
-      case Exn.Exn(e) => promise.failure(e)
-    }
-  def fulfill(x: A): Unit = promise.success(x)
+private class Thread_Future[A](name: String, daemon: Boolean, body: => A) extends Future[A]
+{
+  private val result = Future.promise[A]
+  private val thread =
+    Standard_Thread.fork(name, daemon) { result.fulfill_result(Exn.capture(body)) }
+
+  def peek: Option[Exn.Result[A]] = result.peek
+  def join_result: Exn.Result[A] = result.join_result
+  def cancel: Unit = thread.interrupt
 }
--- a/src/Pure/Concurrent/mailbox.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/mailbox.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -17,7 +17,7 @@
 
 class Mailbox[A] private()
 {
-  private val mailbox = Synchronized(List.empty[A])
+  private val mailbox = Synchronized[List[A]](Nil)
   override def toString: String = mailbox.value.reverse.mkString("Mailbox(", ",", ")")
 
   def send(msg: A): Unit = mailbox.change(msg :: _)
--- a/src/Pure/Concurrent/par_list.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/par_list.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -8,38 +8,34 @@
 package isabelle
 
 
-import java.util.concurrent.{Future => JFuture, CancellationException}
-
-
 object Par_List
 {
   def managed_results[A, B](f: A => B, xs: List[A]): List[Exn.Result[B]] =
     if (xs.isEmpty || xs.tail.isEmpty) xs.map(x => Exn.capture { f(x) })
     else {
-      val state = Synchronized((List.empty[JFuture[Exn.Result[B]]], false))
+      val state = Synchronized[(List[Future[B]], Boolean)]((Nil, false))
 
       def cancel_other(self: Int = -1): Unit =
-        state.change { case (tasks, canceled) =>
+        state.change { case (futures, canceled) =>
           if (!canceled) {
-            for ((task, i) <- tasks.iterator.zipWithIndex if i != self)
-              task.cancel(true)
+            for ((future, i) <- futures.iterator.zipWithIndex if i != self)
+              future.cancel
           }
-          (tasks, true)
+          (futures, true)
         }
 
       try {
         state.change(_ =>
           (xs.iterator.zipWithIndex.map({ case (x, self) =>
-            Simple_Thread.submit_task {
-              val result = Exn.capture { f(x) }
-              result match { case Exn.Exn(_) => cancel_other(self) case _ => }
-              result
+            Future.fork {
+              Exn.capture { f(x) } match {
+                case Exn.Exn(exn) => cancel_other(self); throw exn
+                case Exn.Res(res) => res
+              }
             }
           }).toList, false))
 
-        state.value._1.map(future =>
-          try { future.get }
-          catch { case _: CancellationException => Exn.Exn(Exn.Interrupt()): Exn.Result[B] })
+        state.value._1.map(_.join_result)
       }
       finally { cancel_other() }
     }
@@ -65,4 +61,3 @@
   def exists[A](P: A => Boolean, xs: List[A]): Boolean = find_some(P, xs).isDefined
   def forall[A](P: A => Boolean, xs: List[A]): Boolean = !exists((x: A) => !P(x), xs)
 }
-
--- a/src/Pure/Concurrent/simple_thread.ML	Tue Nov 10 14:18:41 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-(*  Title:      Pure/Concurrent/simple_thread.ML
-    Author:     Makarius
-
-Simplified thread operations.
-*)
-
-signature SIMPLE_THREAD =
-sig
-  val is_self: Thread.thread -> bool
-  val get_name: unit -> string option
-  val the_name: unit -> string
-  type params = {name: string, stack_limit: int option, interrupts: bool}
-  val attributes: params -> Thread.threadAttribute list
-  val fork: params -> (unit -> unit) -> Thread.thread
-  val join: Thread.thread -> unit
-  val interrupt_unsynchronized: Thread.thread -> unit
-end;
-
-structure Simple_Thread: SIMPLE_THREAD =
-struct
-
-(* self *)
-
-fun is_self thread = Thread.equal (Thread.self (), thread);
-
-
-(* unique name *)
-
-local
-  val tag = Universal.tag () : string Universal.tag;
-  val count = Counter.make ();
-in
-
-fun get_name () = Thread.getLocal tag;
-
-fun the_name () =
-  (case get_name () of
-    NONE => raise Fail "Unknown thread name"
-  | SOME name => name);
-
-fun set_name base =
-  Thread.setLocal (tag, base ^ "/" ^ string_of_int (count ()));
-
-end;
-
-
-(* fork *)
-
-type params = {name: string, stack_limit: int option, interrupts: bool};
-
-fun attributes ({stack_limit, interrupts, ...}: params) =
-  ML_Stack.limit stack_limit @
-  (if interrupts then Multithreading.public_interrupts else Multithreading.no_interrupts);
-
-fun fork (params: params) body =
-  Thread.fork (fn () =>
-    print_exception_trace General.exnMessage tracing (fn () =>
-      (set_name (#name params); body ())
-        handle exn => if Exn.is_interrupt exn then () (*sic!*) else reraise exn),
-    attributes params);
-
-
-(* join *)
-
-fun join thread =
-  while Thread.isActive thread
-  do OS.Process.sleep (seconds 0.1);
-
-
-(* interrupt *)
-
-fun interrupt_unsynchronized thread =
-  Thread.interrupt thread handle Thread _ => ();
-
-end;
--- a/src/Pure/Concurrent/simple_thread.scala	Tue Nov 10 14:18:41 2015 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*  Title:      Pure/Concurrent/simple_thread.scala
-    Module:     PIDE
-    Author:     Makarius
-
-Simplified thread operations.
-*/
-
-package isabelle
-
-
-import java.lang.Thread
-import java.util.concurrent.{Callable, Future => JFuture, ThreadPoolExecutor,
-  TimeUnit, LinkedBlockingQueue}
-
-
-object Simple_Thread
-{
-  /* plain thread */
-
-  def fork(name: String = "", daemon: Boolean = false)(body: => Unit): Thread =
-  {
-    val thread =
-      if (name == null || name == "") new Thread() { override def run = body }
-      else new Thread(name) { override def run = body }
-    thread.setDaemon(daemon)
-    thread.start
-    thread
-  }
-
-
-  /* future result via thread */
-
-  def future[A](name: String = "", daemon: Boolean = false)(body: => A): (Thread, Future[A]) =
-  {
-    val result = Future.promise[A]
-    val thread = fork(name, daemon) { result.fulfill_result(Exn.capture(body)) }
-    (thread, result)
-  }
-
-
-  /* thread pool */
-
-  lazy val default_pool =
-    {
-      val m = Properties.Value.Int.unapply(System.getProperty("isabelle.threads", "0")) getOrElse 0
-      val n = if (m > 0) m else (Runtime.getRuntime.availableProcessors max 1) min 8
-      new ThreadPoolExecutor(n, n, 2500L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable])
-    }
-
-  def submit_task[A](body: => A): JFuture[A] =
-    default_pool.submit(new Callable[A] { def call = body })
-
-
-  /* delayed events */
-
-  final class Delay private [Simple_Thread](
-    first: Boolean, delay: => Time, cancel: () => Unit, event: => Unit)
-  {
-    private var running: Option[Event_Timer.Request] = None
-
-    private def run: Unit =
-    {
-      val do_run = synchronized {
-        if (running.isDefined) { running = None; true } else false
-      }
-      if (do_run) event
-    }
-
-    def invoke(): Unit = synchronized
-    {
-      val new_run =
-        running match {
-          case Some(request) => if (first) false else { request.cancel; cancel(); true }
-          case None => true
-        }
-      if (new_run)
-        running = Some(Event_Timer.request(Time.now() + delay)(run))
-    }
-
-    def revoke(): Unit = synchronized
-    {
-      running match {
-        case Some(request) => request.cancel; cancel(); running = None
-        case None => cancel()
-      }
-    }
-
-    def postpone(alt_delay: Time): Unit = synchronized
-    {
-      running match {
-        case Some(request) =>
-          val alt_time = Time.now() + alt_delay
-          if (request.time < alt_time && request.cancel) {
-            cancel()
-            running = Some(Event_Timer.request(alt_time)(run))
-          }
-          else cancel()
-        case None => cancel()
-      }
-    }
-  }
-
-  // delayed event after first invocation
-  def delay_first(delay: => Time, cancel: () => Unit = () => ())(event: => Unit): Delay =
-    new Delay(true, delay, cancel, event)
-
-  // delayed event after last invocation
-  def delay_last(delay: => Time, cancel: () => Unit = () => ())(event: => Unit): Delay =
-    new Delay(false, delay, cancel, event)
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Concurrent/standard_thread.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -0,0 +1,75 @@
+(*  Title:      Pure/Concurrent/standard_thread.ML
+    Author:     Makarius
+
+Standard thread operations.
+*)
+
+signature STANDARD_THREAD =
+sig
+  val is_self: Thread.thread -> bool
+  val get_name: unit -> string option
+  val the_name: unit -> string
+  type params = {name: string, stack_limit: int option, interrupts: bool}
+  val attributes: params -> Thread.threadAttribute list
+  val fork: params -> (unit -> unit) -> Thread.thread
+  val join: Thread.thread -> unit
+  val interrupt_unsynchronized: Thread.thread -> unit
+end;
+
+structure Standard_Thread: STANDARD_THREAD =
+struct
+
+(* self *)
+
+fun is_self thread = Thread.equal (Thread.self (), thread);
+
+
+(* unique name *)
+
+local
+  val tag = Universal.tag () : string Universal.tag;
+  val count = Counter.make ();
+in
+
+fun get_name () = Thread.getLocal tag;
+
+fun the_name () =
+  (case get_name () of
+    NONE => raise Fail "Unknown thread name"
+  | SOME name => name);
+
+fun set_name base =
+  Thread.setLocal (tag, base ^ "/" ^ string_of_int (count ()));
+
+end;
+
+
+(* fork *)
+
+type params = {name: string, stack_limit: int option, interrupts: bool};
+
+fun attributes ({stack_limit, interrupts, ...}: params) =
+  ML_Stack.limit stack_limit @
+  (if interrupts then Multithreading.public_interrupts else Multithreading.no_interrupts);
+
+fun fork (params: params) body =
+  Thread.fork (fn () =>
+    print_exception_trace General.exnMessage tracing (fn () =>
+      (set_name (#name params); body ())
+        handle exn => if Exn.is_interrupt exn then () (*sic!*) else reraise exn),
+    attributes params);
+
+
+(* join *)
+
+fun join thread =
+  while Thread.isActive thread
+  do OS.Process.sleep (seconds 0.1);
+
+
+(* interrupt *)
+
+fun interrupt_unsynchronized thread =
+  Thread.interrupt thread handle Thread _ => ();
+
+end;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Concurrent/standard_thread.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -0,0 +1,101 @@
+/*  Title:      Pure/Concurrent/standard_thread.scala
+    Module:     PIDE
+    Author:     Makarius
+
+Standard thread operations.
+*/
+
+package isabelle
+
+
+import java.lang.Thread
+import java.util.concurrent.{ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue}
+
+import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
+
+
+object Standard_Thread
+{
+  /* fork */
+
+  def fork(name: String = "", daemon: Boolean = false)(body: => Unit): Thread =
+  {
+    val thread =
+      if (name == null || name == "") new Thread() { override def run = body }
+      else new Thread(name) { override def run = body }
+    thread.setDaemon(daemon)
+    thread.start
+    thread
+  }
+
+
+  /* pool */
+
+  lazy val pool: ThreadPoolExecutor =
+    {
+      val m = Properties.Value.Int.unapply(System.getProperty("isabelle.threads", "0")) getOrElse 0
+      val n = if (m > 0) m else (Runtime.getRuntime.availableProcessors max 1) min 8
+      new ThreadPoolExecutor(n, n, 2500L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable])
+    }
+
+  lazy val execution_context: ExecutionContextExecutor =
+    ExecutionContext.fromExecutorService(pool)
+
+
+  /* delayed events */
+
+  final class Delay private [Standard_Thread](
+    first: Boolean, delay: => Time, cancel: () => Unit, event: => Unit)
+  {
+    private var running: Option[Event_Timer.Request] = None
+
+    private def run: Unit =
+    {
+      val do_run = synchronized {
+        if (running.isDefined) { running = None; true } else false
+      }
+      if (do_run) event
+    }
+
+    def invoke(): Unit = synchronized
+    {
+      val new_run =
+        running match {
+          case Some(request) => if (first) false else { request.cancel; cancel(); true }
+          case None => cancel(); true
+        }
+      if (new_run)
+        running = Some(Event_Timer.request(Time.now() + delay)(run))
+    }
+
+    def revoke(): Unit = synchronized
+    {
+      running match {
+        case Some(request) => request.cancel; cancel(); running = None
+        case None => cancel()
+      }
+    }
+
+    def postpone(alt_delay: Time): Unit = synchronized
+    {
+      running match {
+        case Some(request) =>
+          val alt_time = Time.now() + alt_delay
+          if (request.time < alt_time && request.cancel) {
+            cancel()
+            running = Some(Event_Timer.request(alt_time)(run))
+          }
+          else cancel()
+        case None => cancel()
+      }
+    }
+  }
+
+  // delayed event after first invocation
+  def delay_first(delay: => Time, cancel: () => Unit = () => ())(event: => Unit): Delay =
+    new Delay(true, delay, cancel, event)
+
+  // delayed event after last invocation
+  def delay_last(delay: => Time, cancel: () => Unit = () => ())(event: => Unit): Delay =
+    new Delay(false, delay, cancel, event)
+}
--- a/src/Pure/Concurrent/time_limit.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Concurrent/time_limit.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -22,7 +22,7 @@
 
       val request =
         Event_Timer.request (Time.+ (Time.now (), timeout))
-          (fn () => Simple_Thread.interrupt_unsynchronized self);
+          (fn () => Standard_Thread.interrupt_unsynchronized self);
 
       val result =
         Exn.capture (fn () => Multithreading.with_attributes orig_atts (fn _ => f x)) ();
--- a/src/Pure/GUI/gui_thread.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/GUI/gui_thread.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -50,8 +50,8 @@
   /* delayed events */
 
   def delay_first(delay: => Time, cancel: () => Unit = () => ())(event: => Unit)
-    : Simple_Thread.Delay = Simple_Thread.delay_first(delay, cancel) { later { event } }
+    : Standard_Thread.Delay = Standard_Thread.delay_first(delay, cancel) { later { event } }
 
   def delay_last(delay: => Time, cancel: () => Unit = () => ())(event: => Unit)
-    : Simple_Thread.Delay = Simple_Thread.delay_last(delay, cancel) { later { event } }
+    : Standard_Thread.Delay = Standard_Thread.delay_last(delay, cancel) { later { event } }
 }
--- a/src/Pure/General/antiquote.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/antiquote.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -80,7 +80,6 @@
   Scan.repeats1
    (Scan.many1 (fn (s, _) =>
       not (Symbol.is_control s) andalso s <> "\\<open>" andalso s <> "@" andalso Symbol.not_eof s) ||
-    Scan.one (fn (s, _) => Symbol.is_control s) --| Scan.ahead (~$$ "\\<open>") >> single ||
     $$$ "@" --| Scan.ahead (~$$ "{"));
 
 val scan_antiq_body =
@@ -101,7 +100,10 @@
           (case opt_control of
             SOME (sym, pos) => ((control_name sym, pos), Symbol_Pos.range ((sym, pos) :: body))
           | NONE => (("cartouche", #2 (hd body)), Symbol_Pos.range body));
-      in {name = name, range = range, body = body} end);
+      in {name = name, range = range, body = body} end) ||
+  Scan.one (Symbol.is_control o Symbol_Pos.symbol) >>
+    (fn (sym, pos) =>
+      {name = (control_name sym, pos), range = Symbol_Pos.range [(sym, pos)], body = []});
 
 val scan_antiq =
   Symbol_Pos.scan_pos -- ($$ "@" |-- $$ "{" |-- Symbol_Pos.scan_pos --
--- a/src/Pure/General/antiquote.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/antiquote.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -26,11 +26,11 @@
   {
     private val txt: Parser[String] =
       rep1(many1(s => !Symbol.is_control(s) && !Symbol.is_open(s) && s != "@") |
-        one(Symbol.is_control) <~ guard(opt_term(one(s => !Symbol.is_open(s)))) |
         "@" <~ guard(opt_term(one(s => s != "{")))) ^^ (x => x.mkString)
 
     val control: Parser[String] =
-      opt(one(Symbol.is_control)) ~ cartouche ^^ { case Some(x) ~ y => x + y case None ~ x => x }
+      opt(one(Symbol.is_control)) ~ cartouche ^^ { case Some(x) ~ y => x + y case None ~ x => x } |
+      one(Symbol.is_control)
 
     val antiq_other: Parser[String] =
       many1(s => s != "\"" && s != "`" && s != "}" && !Symbol.is_open(s) && !Symbol.is_close(s))
--- a/src/Pure/General/completion.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/completion.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -246,12 +246,12 @@
   {
     override val whiteSpace = "".r
 
-    private val symbol_regex: Regex = """\\<\^?[A-Za-z0-9_']+>""".r
-    def is_symbol(s: CharSequence): Boolean = symbol_regex.pattern.matcher(s).matches
+    private val symboloid_regex: Regex = """\\([A-Za-z0-9_']+|<\^?[A-Za-z0-9_']+>)""".r
+    def is_symboloid(s: CharSequence): Boolean = symboloid_regex.pattern.matcher(s).matches
 
     private def reverse_symbol: Parser[String] = """>[A-Za-z0-9_']+\^?<\\""".r
     private def reverse_symb: Parser[String] = """[A-Za-z0-9_']{2,}\^?<\\""".r
-    private def escape: Parser[String] = """[a-zA-Z0-9_']+\\""".r
+    private def reverse_escape: Parser[String] = """[a-zA-Z0-9_']+\\""".r
 
     private val word_regex = "[a-zA-Z0-9_'.]+".r
     private def word: Parser[String] = word_regex
@@ -275,7 +275,7 @@
       val parse_word = if (explicit) word else word3
       val reverse_in = new Library.Reverse(in)
       val parser =
-        (reverse_symbol | reverse_symb | escape) ^^ (x => (x.reverse, "")) |
+        (reverse_symbol | reverse_symb | reverse_escape) ^^ (x => (x.reverse, "")) |
         underscores ~ parse_word ~ opt("?") ^^
         { case x ~ y ~ z => (z.getOrElse("") + y.reverse, x) }
       parse(parser, reverse_in) match {
@@ -354,17 +354,18 @@
   private def add_symbols(): Completion =
   {
     val words =
-      (for ((x, _) <- Symbol.names.toList) yield (x, x)) :::
-      (for ((x, y) <- Symbol.names.toList) yield ("\\" + y, x)) :::
-      (for ((x, y) <- Symbol.abbrevs.toList if Completion.Word_Parsers.is_word(y)) yield (y, x))
+      (for ((sym, _) <- Symbol.names.toList) yield (sym, sym)) :::
+      (for ((sym, name) <- Symbol.names.toList) yield ("\\" + name, sym)) :::
+      (for ((sym, abbr) <- Symbol.abbrevs.toList if Completion.Word_Parsers.is_word(abbr))
+        yield (abbr, sym))
 
     val symbol_abbrs =
-      (for ((x, y) <- Symbol.abbrevs.iterator if !Completion.Word_Parsers.is_word(y))
-        yield (y, x)).toList
+      (for ((sym, abbr) <- Symbol.abbrevs.iterator if !Completion.Word_Parsers.is_word(abbr))
+        yield (abbr, sym)).toList
 
     val abbrs =
-      for ((a, b) <- symbol_abbrs ::: Completion.default_abbrs)
-        yield (a.reverse, (a, b))
+      for ((abbr, sym) <- symbol_abbrs ::: Completion.default_abbrs)
+        yield (abbr.reverse, (abbr, sym))
 
     new Completion(
       keywords,
@@ -393,18 +394,15 @@
     {
       val reverse_in = new Library.Reverse(text.subSequence(0, caret))
       Scan.Parsers.parse(Scan.Parsers.literal(abbrevs_lex), reverse_in) match {
-        case Scan.Parsers.Success(reverse_a, _) =>
-          val abbrevs = abbrevs_map.get_list(reverse_a)
+        case Scan.Parsers.Success(reverse_abbr, _) =>
+          val abbrevs = abbrevs_map.get_list(reverse_abbr)
           abbrevs match {
             case Nil => None
-            case (a, _) :: _ =>
+            case (abbr, _) :: _ =>
               val ok =
-                if (a == Completion.antiquote) language_context.antiquotes
-                else
-                  language_context.symbols ||
-                  Completion.default_abbrs.exists(_._1 == a) ||
-                  Completion.Word_Parsers.is_symbol(a)
-              if (ok) Some((a, abbrevs))
+                if (abbr == Completion.antiquote) language_context.antiquotes
+                else language_context.symbols || Completion.default_abbrs.exists(_._1 == abbr)
+              if (ok) Some((abbr, abbrevs))
               else None
           }
         case _ => None
@@ -433,7 +431,7 @@
                     complete_word <- complete_words
                     ok =
                       if (is_keyword(complete_word)) !word_context && language_context.is_outer
-                      else language_context.symbols
+                      else language_context.symbols || Completion.Word_Parsers.is_symboloid(word)
                     if ok
                     completion <- words_map.get_list(complete_word)
                   } yield (complete_word, completion)
@@ -447,6 +445,7 @@
         val immediate =
           explicit ||
             (!Completion.Word_Parsers.is_word(original) &&
+             !Completion.Word_Parsers.is_symboloid(original) &&
               Character.codePointCount(original, 0, original.length) > 1)
         val unique = completions.length == 1
 
--- a/src/Pure/General/exn.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/exn.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -58,7 +58,7 @@
     def apply(): Throwable = new InterruptedException
     def unapply(exn: Throwable): Boolean = is_interrupt(exn)
 
-    def expose() { if (Thread.interrupted()) throw apply() }
+    def expose() { if (Thread.interrupted) throw apply() }
     def impose() { Thread.currentThread.interrupt }
 
     def postpone[A](body: => A): Option[A] =
@@ -104,4 +104,3 @@
   def message(exn: Throwable): String =
     user_message(exn) getOrElse (if (is_interrupt(exn)) "Interrupt" else exn.toString)
 }
-
--- a/src/Pure/General/symbol.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/symbol.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -10,6 +10,7 @@
   val STX: symbol
   val DEL: symbol
   val space: symbol
+  val comment: symbol
   val is_char: symbol -> bool
   val is_utf8: symbol -> bool
   val is_symbolic: symbol -> bool
@@ -93,6 +94,8 @@
 
 val space = chr 32;
 
+val comment = "\\<comment>";
+
 fun is_char s = size s = 1;
 
 fun is_utf8 s = size s > 0 andalso forall_string (fn c => ord c >= 128) s;
--- a/src/Pure/General/symbol.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/symbol.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -433,6 +433,11 @@
     val symbolic = recode_set((for { (sym, _) <- symbols; if raw_symbolic(sym) } yield sym): _*)
 
 
+    /* comment */
+
+    val comment_decoded = decode(comment)
+
+
     /* cartouches */
 
     val open_decoded = decode(open)
@@ -496,10 +501,16 @@
   def is_blank(sym: Symbol): Boolean = symbols.blanks.contains(sym)
 
 
+  /* comment */
+
+  val comment: Symbol = "\\<comment>"
+  def comment_decoded: Symbol = symbols.comment_decoded
+
+
   /* cartouches */
 
-  val open = "\\<open>"
-  val close = "\\<close>"
+  val open: Symbol = "\\<open>"
+  val close: Symbol = "\\<close>"
 
   def open_decoded: Symbol = symbols.open_decoded
   def close_decoded: Symbol = symbols.close_decoded
--- a/src/Pure/General/time.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/General/time.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -31,6 +31,7 @@
   def + (t: Time): Time = new Time(ms + t.ms)
   def - (t: Time): Time = new Time(ms - t.ms)
 
+  def compare(t: Time): Int = ms compare t.ms
   def < (t: Time): Boolean = ms < t.ms
   def <= (t: Time): Boolean = ms <= t.ms
   def > (t: Time): Boolean = ms > t.ms
--- a/src/Pure/Isar/isar_syn.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Isar/isar_syn.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -384,7 +384,7 @@
 (* locales *)
 
 val locale_val =
-  Parse_Spec.locale_expression false --
+  Parse_Spec.locale_expression --
     Scan.optional (@{keyword "+"} |-- Parse.!!! (Scan.repeat1 Parse_Spec.context_element)) [] ||
   Scan.repeat1 Parse_Spec.context_element >> pair ([], []);
 
@@ -402,30 +402,30 @@
       >> (fn elems =>
           Toplevel.begin_local_theory true (Experiment.experiment_cmd elems #> snd)));
 
-fun interpretation_args mandatory =
-  Parse.!!! (Parse_Spec.locale_expression mandatory) --
+val interpretation_args =
+  Parse.!!! Parse_Spec.locale_expression --
     Scan.optional
-      (Parse.where_ |-- Parse.and_list1 (Parse_Spec.opt_thm_name ":" -- Parse.prop)) [];
+      (@{keyword "rewrites"} |-- Parse.and_list1 (Parse_Spec.opt_thm_name ":" -- Parse.prop)) [];
 
 val _ =
   Outer_Syntax.command @{command_keyword sublocale}
     "prove sublocale relation between a locale and a locale expression"
     ((Parse.position Parse.xname --| (@{keyword "\<subseteq>"} || @{keyword "<"}) --
-      interpretation_args false >> (fn (loc, (expr, equations)) =>
+      interpretation_args >> (fn (loc, (expr, equations)) =>
         Toplevel.theory_to_proof (Expression.sublocale_global_cmd loc expr equations)))
-    || interpretation_args false >> (fn (expr, equations) =>
+    || interpretation_args >> (fn (expr, equations) =>
         Toplevel.local_theory_to_proof NONE NONE (Expression.sublocale_cmd expr equations)));
 
 val _ =
   Outer_Syntax.command @{command_keyword interpretation}
     "prove interpretation of locale expression in local theory"
-    (interpretation_args true >> (fn (expr, equations) =>
+    (interpretation_args >> (fn (expr, equations) =>
       Toplevel.local_theory_to_proof NONE NONE (Expression.interpretation_cmd expr equations)));
 
 val _ =
   Outer_Syntax.command @{command_keyword interpret}
     "prove interpretation of locale expression in proof context"
-    (interpretation_args true >> (fn (expr, equations) =>
+    (interpretation_args >> (fn (expr, equations) =>
       Toplevel.proof' (Expression.interpret_cmd expr equations)));
 
 
@@ -817,7 +817,7 @@
 val _ =
   Outer_Syntax.command @{command_keyword print_dependencies}
     "print dependencies of locale expression"
-    (Parse.opt_bang -- Parse_Spec.locale_expression true >> (fn (b, expr) =>
+    (Parse.opt_bang -- Parse_Spec.locale_expression >> (fn (b, expr) =>
       Toplevel.keep (fn state => Expression.print_dependencies (Toplevel.context_of state) b expr)));
 
 val _ =
--- a/src/Pure/Isar/outer_syntax.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Isar/outer_syntax.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -197,7 +197,7 @@
             in msg ^ quote (Markup.markup Markup.keyword1 name) end))
     end);
 
-val parse_cmt = Parse.$$$ "--" -- Parse.!!! Parse.document_source;
+val parse_cmt = (Parse.$$$ "--" || Parse.$$$ Symbol.comment) -- Parse.!!! Parse.document_source;
 
 fun commands_source thy =
   Token.source_proper #>
@@ -261,7 +261,7 @@
 (* side-comments *)
 
 fun cmts (t1 :: t2 :: toks) =
-      if Token.keyword_with (fn s => s = "--") t1 then t2 :: cmts toks
+      if Token.keyword_with (fn s => s = "--" orelse s = Symbol.comment) t1 then t2 :: cmts toks
       else cmts (t2 :: toks)
   | cmts _ = [];
 
--- a/src/Pure/Isar/parse_spec.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Isar/parse_spec.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -19,9 +19,9 @@
   val locale_fixes: (binding * string option * mixfix) list parser
   val locale_insts: (string option list * (Attrib.binding * string) list) parser
   val class_expression: string list parser
-  val locale_prefix: bool -> (string * bool) parser
+  val locale_prefix: (string * bool) parser
   val locale_keyword: string parser
-  val locale_expression: bool -> Expression.expression parser
+  val locale_expression: Expression.expression parser
   val context_element: Element.context parser
   val statement: (Attrib.binding * (string * string list) list) list parser
   val if_statement: (Attrib.binding * (string * string list) list) list parser
@@ -105,11 +105,9 @@
 
 in
 
-fun locale_prefix mandatory =
+val locale_prefix =
   Scan.optional
-    (Parse.name --
-      (Parse.$$$ "!" >> K true || Parse.$$$ "?" >> K false || Scan.succeed mandatory) --|
-      Parse.$$$ ":")
+    (Parse.name -- (Scan.option (Parse.$$$ "?") >> is_none) --| Parse.$$$ ":")
     ("", false);
 
 val locale_keyword =
@@ -118,10 +116,11 @@
 
 val class_expression = plus1_unless locale_keyword Parse.class;
 
-fun locale_expression mandatory =
+val locale_expression =
   let
     val expr2 = Parse.position Parse.xname;
-    val expr1 = locale_prefix mandatory -- expr2 --
+    val expr1 =
+      locale_prefix -- expr2 --
       Scan.optional instance (Expression.Named []) >> (fn ((p, l), i) => (l, (p, i)));
     val expr0 = plus1_unless locale_keyword expr1;
   in expr0 -- Scan.optional (Parse.$$$ "for" |-- Parse.!!! locale_fixes) [] end;
--- a/src/Pure/ML/ml_antiquotations.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/ML/ml_antiquotations.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -10,7 +10,15 @@
 (* ML support *)
 
 val _ = Theory.setup
- (ML_Antiquotation.inline @{binding assert}
+ (ML_Antiquotation.value @{binding cartouche}
+    (Args.context -- Scan.lift (Parse.position Args.cartouche_input) >> (fn (ctxt, (source, pos)) =>
+      "Input.source true " ^ ML_Syntax.print_string (Input.text_of source) ^ " " ^
+        ML_Syntax.atomic (ML_Syntax.print_range (Input.range_of source)))) #>
+
+  ML_Antiquotation.inline @{binding undefined}
+    (Scan.succeed "(raise General.Match)") #>
+
+  ML_Antiquotation.inline @{binding assert}
     (Scan.succeed "(fn b => if b then () else raise General.Fail \"Assertion failed\")") #>
 
   ML_Antiquotation.inline @{binding make_string}
--- a/src/Pure/ML/ml_context.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/ML/ml_context.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -132,10 +132,6 @@
 
 fun reset_env name = ML_Lex.tokenize ("structure " ^ name ^ " = struct end");
 
-fun expanding (Antiquote.Text tok) = ML_Lex.is_cartouche tok
-  | expanding (Antiquote.Control _) = true
-  | expanding (Antiquote.Antiq _) = true;
-
 fun eval_antiquotes (ants, pos) opt_context =
   let
     val visible =
@@ -145,7 +141,7 @@
     val opt_ctxt = Option.map Context.proof_of opt_context;
 
     val ((ml_env, ml_body), opt_ctxt') =
-      if forall (not o expanding) ants
+      if forall (fn Antiquote.Text _ => true | _ => false) ants
       then (([], map (fn Antiquote.Text tok => tok) ants), opt_ctxt)
       else
         let
@@ -155,22 +151,10 @@
             let val (decl, ctxt') = apply_antiquotation src ctxt
             in (decl #> tokenize range, ctxt') end;
 
-          fun expand (Antiquote.Text tok) ctxt =
-                if ML_Lex.is_cartouche tok then
-                  let
-                    val range = ML_Lex.range_of tok;
-                    val text =
-                      Symbol_Pos.explode (ML_Lex.content_of tok, #1 range)
-                      |> Symbol_Pos.cartouche_content
-                      |> Symbol_Pos.implode_range range |> #1;
-                    val (decl, ctxt') =
-                      value_decl "input"
-                        ("Input.source true " ^ ML_Syntax.print_string text  ^ " " ^
-                          ML_Syntax.atomic (ML_Syntax.print_range range)) ctxt;
-                  in (decl #> tokenize range, ctxt') end
-                else (K ([], [tok]), ctxt)
+          fun expand (Antiquote.Text tok) ctxt = (K ([], [tok]), ctxt)
             | expand (Antiquote.Control {name, range, body}) ctxt =
-                expand_src range (Token.src name [Token.read_cartouche body]) ctxt
+                expand_src range
+                  (Token.src name (if null body then [] else [Token.read_cartouche body])) ctxt
             | expand (Antiquote.Antiq {range, body, ...}) ctxt =
                 expand_src range
                   (Token.read_antiq (Thy_Header.get_keywords' ctxt) antiq (body, #1 range)) ctxt;
--- a/src/Pure/ML/ml_lex.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/ML/ml_lex.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -9,10 +9,9 @@
   val keywords: string list
   datatype token_kind =
     Keyword | Ident | Long_Ident | Type_Var | Word | Int | Real | Char | String |
-    Space | Cartouche | Comment | Error of string | EOF
+    Space | Comment | Error of string | EOF
   eqtype token
   val stopper: token Scan.stopper
-  val is_cartouche: token -> bool
   val is_regular: token -> bool
   val is_improper: token -> bool
   val set_range: Position.range -> token -> token
@@ -64,7 +63,7 @@
 
 datatype token_kind =
   Keyword | Ident | Long_Ident | Type_Var | Word | Int | Real | Char | String |
-  Space | Cartouche | Comment | Error of string | EOF;
+  Space | Comment | Error of string | EOF;
 
 datatype token = Token of Position.range * (token_kind * string);
 
@@ -103,9 +102,6 @@
 fun is_delimiter (Token (_, (Keyword, x))) = not (Symbol.is_ascii_identifier x)
   | is_delimiter _ = false;
 
-fun is_cartouche (Token (_, (Cartouche, _))) = true
-  | is_cartouche _ = false;
-
 fun is_regular (Token (_, (Error _, _))) = false
   | is_regular (Token (_, (EOF, _))) = false
   | is_regular _ = true;
@@ -150,7 +146,6 @@
   | Real => (Markup.ML_numeral, "")
   | Char => (Markup.ML_char, "")
   | String => (if SML then Markup.SML_string else Markup.ML_string, "")
-  | Cartouche => (Markup.ML_cartouche, "")
   | Comment => (if SML then Markup.SML_comment else Markup.ML_comment, "")
   | Error msg => (Markup.bad, msg)
   | _ => (Markup.empty, "");
@@ -293,7 +288,6 @@
 val scan_sml = scan_ml >> Antiquote.Text;
 
 val scan_ml_antiq =
-  Symbol_Pos.scan_cartouche err_prefix >> (Antiquote.Text o token Cartouche) ||
   Antiquote.scan_control >> Antiquote.Control ||
   Antiquote.scan_antiq >> Antiquote.Antiq ||
   scan_ml >> Antiquote.Text;
@@ -363,4 +357,3 @@
 end;
 
 end;
-
--- a/src/Pure/ML/ml_lex.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/ML/ml_lex.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -50,7 +50,6 @@
     val CHAR = Value("character")
     val STRING = Value("quoted string")
     val SPACE = Value("white space")
-    val CARTOUCHE = Value("text cartouche")
     val COMMENT = Value("comment text")
     val CONTROL = Value("control symbol antiquotation")
     val ANTIQ = Value("antiquotation")
@@ -135,15 +134,6 @@
     }
 
 
-    /* ML cartouche */
-
-    private val ml_cartouche: Parser[Token] =
-      cartouche ^^ (x => Token(Kind.CARTOUCHE, x))
-
-    private def ml_cartouche_line(ctxt: Scan.Line_Context): Parser[(Token, Scan.Line_Context)] =
-      cartouche_line(ctxt) ^^ { case (x, c) => (Token(Kind.CARTOUCHE, x), c) }
-
-
     /* ML comment */
 
     private val ml_comment: Parser[Token] =
@@ -156,7 +146,7 @@
     /* delimited token */
 
     private def delimited_token: Parser[Token] =
-      ml_char | (ml_string | (ml_cartouche | ml_comment))
+      ml_char | (ml_string | ml_comment)
 
     private val recover_delimited: Parser[Token] =
       (recover_ml_char | (recover_ml_string | (recover_cartouche | recover_comment))) ^^
@@ -217,7 +207,7 @@
 
       val bad = one(_ => true) ^^ (x => Token(Kind.ERROR, x))
 
-      space | (recover_delimited | (ml_control | (ml_antiq |
+      space | (ml_control | (recover_delimited | (ml_antiq |
         (((word | (real | (int | (long_ident | (ident | type_var))))) ||| keyword) | bad))))
     }
 
@@ -259,9 +249,8 @@
       if (SML) ml_string_line(ctxt) | (ml_comment_line(ctxt) | other)
       else
         ml_string_line(ctxt) |
-          (ml_cartouche_line(ctxt) |
-            (ml_comment_line(ctxt) |
-              (ml_antiq_start(ctxt) | (ml_antiq_stop(ctxt) | (ml_antiq_body(ctxt) | other)))))
+          (ml_comment_line(ctxt) |
+            (ml_antiq_start(ctxt) | (ml_antiq_stop(ctxt) | (ml_antiq_body(ctxt) | other))))
     }
   }
 
@@ -292,4 +281,3 @@
     (toks.toList, ctxt)
   }
 }
-
--- a/src/Pure/PIDE/command.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/PIDE/command.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -308,10 +308,11 @@
 
   private def clean_tokens(tokens: List[Token]): List[(Token, Int)] =
   {
+    val markers = Set("%", "--", Symbol.comment, Symbol.comment_decoded)
     def clean(toks: List[(Token, Int)]): List[(Token, Int)] =
       toks match {
         case (t1, i1) :: (t2, i2) :: rest =>
-          if (t1.is_keyword && (t1.source == "%" || t1.source == "--")) clean(rest)
+          if (t1.is_keyword && markers(t1.source)) clean(rest)
           else (t1, i1) :: clean((t2, i2) :: rest)
         case _ => toks
       }
--- a/src/Pure/PIDE/markup.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/PIDE/markup.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -36,7 +36,7 @@
   val language_prop: bool -> T
   val language_ML: bool -> T
   val language_SML: bool -> T
-  val language_document: {symbols: bool, delimited: bool} -> T
+  val language_document: bool -> T
   val language_antiquotation: T
   val language_text: bool -> T
   val language_rail: T
@@ -101,7 +101,6 @@
   val ML_numeralN: string val ML_numeral: T
   val ML_charN: string val ML_char: T
   val ML_stringN: string val ML_string: T
-  val ML_cartoucheN: string val ML_cartouche: T
   val ML_commentN: string val ML_comment: T
   val SML_stringN: string val SML_string: T
   val SML_commentN: string val SML_comment: T
@@ -310,8 +309,7 @@
 val language_prop = language' {name = "prop", symbols = true, antiquotes = false};
 val language_ML = language' {name = "ML", symbols = false, antiquotes = true};
 val language_SML = language' {name = "SML", symbols = false, antiquotes = false};
-fun language_document {symbols, delimited} =
-  language' {name = "document", symbols = symbols, antiquotes = true} delimited;
+val language_document = language' {name = "document", symbols = false, antiquotes = true};
 val language_antiquotation =
   language {name = "antiquotation", symbols = true, antiquotes = false, delimited = true};
 val language_text = language' {name = "text", symbols = true, antiquotes = false};
@@ -440,7 +438,6 @@
 val (ML_numeralN, ML_numeral) = markup_elem "ML_numeral";
 val (ML_charN, ML_char) = markup_elem "ML_char";
 val (ML_stringN, ML_string) = markup_elem "ML_string";
-val (ML_cartoucheN, ML_cartouche) = markup_elem "ML_cartouche";
 val (ML_commentN, ML_comment) = markup_elem "ML_comment";
 val (SML_stringN, SML_string) = markup_elem "SML_string";
 val (SML_commentN, SML_comment) = markup_elem "SML_comment";
--- a/src/Pure/PIDE/markup.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/PIDE/markup.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -260,7 +260,6 @@
   val ML_NUMERAL = "ML_numeral"
   val ML_CHAR = "ML_char"
   val ML_STRING = "ML_string"
-  val ML_CARTOUCHE = "ML_cartouche"
   val ML_COMMENT = "ML_comment"
   val SML_STRING = "SML_string"
   val SML_COMMENT = "SML_comment"
--- a/src/Pure/PIDE/prover.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/PIDE/prover.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -121,8 +121,8 @@
 
   /** process manager **/
 
-  private val (_, process_result) =
-    Simple_Thread.future("process_result") { system_process.join }
+  private val process_result =
+    Future.thread("process_result") { system_process.join }
 
   private def terminate_process()
   {
@@ -132,7 +132,7 @@
     }
   }
 
-  private val process_manager = Simple_Thread.fork("process_manager")
+  private val process_manager = Standard_Thread.fork("process_manager")
   {
     val (startup_failed, startup_errors) =
     {
@@ -230,7 +230,7 @@
       if (err) ("standard_error", system_process.stderr, Markup.STDERR)
       else ("standard_output", system_process.stdout, Markup.STDOUT)
 
-    Simple_Thread.fork(name) {
+    Standard_Thread.fork(name) {
       try {
         var result = new StringBuilder(100)
         var finished = false
@@ -268,7 +268,7 @@
     class Protocol_Error(msg: String) extends Exception(msg)
 
     val name = "message_output"
-    Simple_Thread.fork(name) {
+    Standard_Thread.fork(name) {
       val default_buffer = new Array[Byte](65536)
       var c = -1
 
--- a/src/Pure/PIDE/session.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/PIDE/session.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -24,7 +24,7 @@
 
   class Outlet[A](dispatcher: Consumer_Thread[() => Unit])
   {
-    private val consumers = Synchronized(List.empty[Consumer[A]])
+    private val consumers = Synchronized[List[Consumer[A]]](Nil)
 
     def += (c: Consumer[A]) { consumers.change(Library.update(c)) }
     def -= (c: Consumer[A]) { consumers.change(Library.remove(c)) }
@@ -291,7 +291,7 @@
       nodes = Set.empty
       commands = Set.empty
     }
-    private val delay_flush = Simple_Thread.delay_first(output_delay) { flush() }
+    private val delay_flush = Standard_Thread.delay_first(output_delay) { flush() }
 
     def invoke(assign: Boolean, cmds: List[Command]): Unit = synchronized {
       assignment |= assign
@@ -330,7 +330,7 @@
 
   private object prover
   {
-    private val variable = Synchronized(None: Option[Prover])
+    private val variable = Synchronized[Option[Prover]](None)
 
     def defined: Boolean = variable.value.isDefined
     def get: Prover = variable.value.get
@@ -353,7 +353,7 @@
 
   /* manager thread */
 
-  private val delay_prune = Simple_Thread.delay_first(prune_delay) { manager.send(Prune_History) }
+  private val delay_prune = Standard_Thread.delay_first(prune_delay) { manager.send(Prune_History) }
 
   private val manager: Consumer_Thread[Any] =
   {
--- a/src/Pure/Pure.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Pure.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -6,13 +6,13 @@
 
 theory Pure
   keywords
-    "!!" "!" "+" "--" ":" ";" "<" "<=" "=" "=>" "?" "[" "\<equiv>"
+    "!!" "!" "+" "--" ":" ";" "<" "<=" "=" "=>" "?" "[" "\<comment>" "\<equiv>"
     "\<leftharpoondown>" "\<rightharpoonup>" "\<rightleftharpoons>"
     "\<subseteq>" "]" "assumes" "attach" "binder" "constrains"
     "defines" "fixes" "for" "identifier" "if" "in" "includes" "infix"
     "infixl" "infixr" "is" "notes" "obtains" "open" "output"
-    "overloaded" "pervasive" "premises" "private" "qualified" "shows"
-    "structure" "unchecked" "where" "when" "|"
+    "overloaded" "pervasive" "premises" "private" "qualified" "rewrites"
+    "shows" "structure" "unchecked" "where" "when" "|"
   and "text" "txt" :: document_body
   and "text_raw" :: document_raw
   and "default_sort" :: thy_decl == ""
--- a/src/Pure/ROOT	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/ROOT	Tue Nov 10 14:43:29 2015 +0000
@@ -86,9 +86,9 @@
     "Concurrent/par_list.ML"
     "Concurrent/par_list_sequential.ML"
     "Concurrent/random.ML"
-    "Concurrent/simple_thread.ML"
     "Concurrent/single_assignment.ML"
     "Concurrent/single_assignment_sequential.ML"
+    "Concurrent/standard_thread.ML"
     "Concurrent/synchronized.ML"
     "Concurrent/synchronized_sequential.ML"
     "Concurrent/task_queue.ML"
--- a/src/Pure/ROOT.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/ROOT.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -109,7 +109,7 @@
 then use "ML/ml_statistics_polyml-5.5.0.ML"
 else use "ML/ml_statistics_dummy.ML";
 
-use "Concurrent/simple_thread.ML";
+use "Concurrent/standard_thread.ML";
 
 use "Concurrent/single_assignment.ML";
 if Multithreading.available then ()
--- a/src/Pure/System/invoke_scala.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/System/invoke_scala.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -8,7 +8,6 @@
 
 
 import java.lang.reflect.{Method, Modifier, InvocationTargetException}
-import java.util.concurrent.{Future => JFuture}
 
 import scala.util.matching.Regex
 
@@ -72,7 +71,7 @@
 
 class Invoke_Scala extends Session.Protocol_Handler
 {
-  private var futures = Map.empty[String, JFuture[Unit]]
+  private var futures = Map.empty[String, Future[Unit]]
 
   private def fulfill(prover: Prover, id: String, tag: Invoke_Scala.Tag.Value, res: String): Unit =
     synchronized
@@ -83,9 +82,9 @@
       }
     }
 
-  private def cancel(prover: Prover, id: String, future: JFuture[Unit])
+  private def cancel(prover: Prover, id: String, future: Future[Unit])
   {
-    future.cancel(true)
+    future.cancel
     fulfill(prover, id, Invoke_Scala.Tag.INTERRUPT, "")
   }
 
@@ -94,7 +93,7 @@
     msg.properties match {
       case Markup.Invoke_Scala(name, id) =>
         futures += (id ->
-          Simple_Thread.submit_task {
+          Future.fork {
             val (tag, result) = Invoke_Scala.method(name, msg.text)
             fulfill(prover, id, tag, result)
           })
--- a/src/Pure/System/isabelle_system.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/System/isabelle_system.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -328,14 +328,10 @@
       proc.stdin.close
 
       val limited = new Limited_Progress(proc, progress_limit)
-      val (_, stdout) =
-        Simple_Thread.future("bash_stdout") {
-          File.read_lines(proc.stdout, limited(progress_stdout))
-        }
-      val (_, stderr) =
-        Simple_Thread.future("bash_stderr") {
-          File.read_lines(proc.stderr, limited(progress_stderr))
-        }
+      val stdout =
+        Future.thread("bash_stdout") { File.read_lines(proc.stdout, limited(progress_stdout)) }
+      val stderr =
+        Future.thread("bash_stderr") { File.read_lines(proc.stderr, limited(progress_stderr)) }
 
       val rc =
         try { proc.join }
--- a/src/Pure/System/message_channel.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/System/message_channel.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -60,11 +60,11 @@
     let
       val mbox = Mailbox.create ();
       val thread =
-        Simple_Thread.fork {name = "channel", stack_limit = NONE, interrupts = false}
+        Standard_Thread.fork {name = "channel", stack_limit = NONE, interrupts = false}
           (message_output mbox channel);
       fun send msg = Mailbox.send mbox (SOME msg);
       fun shutdown () =
-        (Mailbox.send mbox NONE; Mailbox.await_empty mbox; Simple_Thread.join thread);
+        (Mailbox.send mbox NONE; Mailbox.await_empty mbox; Standard_Thread.join thread);
     in Message_Channel {send = send, shutdown = shutdown} end
   else
     let
--- a/src/Pure/System/options.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/System/options.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -75,7 +75,7 @@
   private val PREFS = PREFS_DIR + Path.basic("preferences")
 
   lazy val options_syntax =
-    Outer_Syntax.init() + ":" + "=" + "--" +
+    Outer_Syntax.init() + ":" + "=" + "--" + Symbol.comment + Symbol.comment_decoded +
       (SECTION, Keyword.DOCUMENT_HEADING) + PUBLIC + (OPTION, Keyword.THY_DECL)
 
   lazy val prefs_syntax = Outer_Syntax.init() + "="
@@ -89,12 +89,15 @@
         { case s ~ n => if (s.isDefined) "-" + n else n } |
       atom("option value", tok => tok.is_name || tok.is_float)
 
+    def comment_marker: Parser[String] =
+      $$$("--") | $$$(Symbol.comment) | $$$(Symbol.comment_decoded)
+
     val option_entry: Parser[Options => Options] =
     {
       command(SECTION) ~! text ^^
         { case _ ~ a => (options: Options) => options.set_section(a) } |
       opt($$$(PUBLIC)) ~ command(OPTION) ~! (position(option_name) ~ $$$(":") ~ option_type ~
-      $$$("=") ~ option_value ~ ($$$("--") ~! text ^^ { case _ ~ x => x } | success(""))) ^^
+      $$$("=") ~ option_value ~ (comment_marker ~! text ^^ { case _ ~ x => x } | success(""))) ^^
         { case a ~ _ ~ ((b, pos) ~ _ ~ c ~ _ ~ d ~ e) =>
             (options: Options) => options.declare(a.isDefined, pos, b, c, d, e) }
     }
--- a/src/Pure/Thy/latex.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Thy/latex.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -10,7 +10,6 @@
   val output_known_symbols: (string -> bool) * (string -> bool) ->
     Symbol.symbol list -> string
   val output_symbols: Symbol.symbol list -> string
-  val output_ctrl_symbols: Symbol.symbol list -> string
   val output_token: Token.T -> string
   val begin_delim: string -> string
   val end_delim: string -> string
@@ -36,7 +35,7 @@
       | "\t" => "\\ "
       | "\n" => "\\isanewline\n"
       | s =>
-          if exists_string (fn s' => s = s') "\"#$%&'<>\\^_{}~"
+          if exists_string (fn s' => s = s') "\"#$%&',-<>\\^_`{}~"
           then enclose "{\\char`\\" "}" s else s);
 
 
@@ -99,11 +98,6 @@
   | Symbol.Malformed s => error (Symbol.malformed_msg s)
   | Symbol.EOF => error "Bad EOF symbol");
 
-fun output_ctrl_sym sym =
-  (case Symbol.decode sym of
-    Symbol.Control s => enclose "\\isactrl" " " s
-  | _ => sym);
-
 in
 
 val output_known_symbols = implode oo (map o output_known_sym);
@@ -119,8 +113,6 @@
         enclose "%\n\\isaantiq\n" "{}%\n\\endisaantiq\n"
           (output_symbols (map Symbol_Pos.symbol body)));
 
-val output_ctrl_symbols = implode o map output_ctrl_sym;
-
 end;
 
 
--- a/src/Pure/Thy/markdown.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Thy/markdown.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -19,9 +19,9 @@
 
 signature MARKDOWN =
 sig
-  val is_control: Symbol.symbol -> bool
   datatype kind = Itemize | Enumerate | Description
   val print_kind: kind -> string
+  val is_control: Symbol.symbol -> bool
   type line
   val line_source: line -> Antiquote.text_antiquote list
   val line_is_item: line -> bool
@@ -39,9 +39,7 @@
 structure Markdown: MARKDOWN =
 struct
 
-(* document lines *)
-
-val is_control = member (op =) ["\\<^item>", "\\<^enum>", "\\<^descr>"];
+(* item kinds *)
 
 datatype kind = Itemize | Enumerate | Description;
 
@@ -49,6 +47,13 @@
   | print_kind Enumerate = "enumerate"
   | print_kind Description = "description";
 
+val kinds = [("item", Itemize), ("enum", Enumerate), ("descr", Description)];
+
+val is_control = member (op =) ["\\<^item>", "\\<^enum>", "\\<^descr>"];
+
+
+(* document lines *)
+
 datatype line =
   Line of
    {source: Antiquote.text_antiquote list,
@@ -84,19 +89,22 @@
 fun is_space ((s, _): Symbol_Pos.T) = s = Symbol.space;
 val is_empty = forall (fn Antiquote.Text ss => forall is_space ss | _ => false);
 
-val scan_marker =
-  Scan.many is_space -- Symbol_Pos.scan_pos --
-  Scan.option
-   (Symbol_Pos.$$ "\\<^item>" >> K Itemize ||
-    Symbol_Pos.$$ "\\<^enum>" >> K Enumerate ||
-    Symbol_Pos.$$ "\\<^descr>" >> K Description) --| Scan.many is_space
-  >> (fn ((sp, pos), item) => (length sp, item, if is_some item then pos else Position.none));
+fun strip_spaces (Antiquote.Text ss :: rest) =
+      let val (sp, ss') = take_prefix is_space ss
+      in (length sp, if null ss' then rest else Antiquote.Text ss' :: rest) end
+  | strip_spaces source = (0, source);
 
-fun read_marker (Antiquote.Text ss :: rest) =
-      (case Scan.finite Symbol_Pos.stopper scan_marker ss of
-        (marker, []) => (marker, rest)
-      | (marker, ss') => (marker, Antiquote.Text ss' :: rest))
-  | read_marker source = ((0, NONE, Position.none), source);
+fun read_marker source =
+  let val (indent, source') = strip_spaces source in
+    (case source' of
+      (control as Antiquote.Control {name = (name, pos), body = [], ...}) :: rest =>
+        let
+          val item = AList.lookup (op =) kinds name;
+          val item_pos = if is_some item then pos else Position.none;
+          val (_, rest') = strip_spaces (if is_some item then rest else control :: rest);
+        in ((indent, item, item_pos), rest') end
+    | _ => ((indent, NONE, Position.none), source'))
+  end;
 
 in
 
--- a/src/Pure/Thy/thy_output.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Thy/thy_output.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -177,7 +177,7 @@
 
 fun eval_antiquote _ (Antiquote.Text ss) = Symbol_Pos.content ss
   | eval_antiquote state (Antiquote.Control {name, body, ...}) =
-      eval_antiq state ([], Token.src name [Token.read_cartouche body])
+      eval_antiq state ([], Token.src name (if null body then [] else [Token.read_cartouche body]))
   | eval_antiquote state (Antiquote.Antiq {range = (pos, _), body, ...}) =
       let
         val keywords =
@@ -196,14 +196,10 @@
 fun output_text state {markdown} source =
   let
     val pos = Input.pos_of source;
-    val _ =
-      Position.report pos
-        (Markup.language_document
-          {symbols = Options.default_bool "document_symbols", delimited = Input.is_delimited source});
+    val _ = Position.report pos (Markup.language_document (Input.is_delimited source));
     val syms = Input.source_explode source;
 
-    val output_antiquote = eval_antiquote state #> Symbol.explode #> Latex.output_ctrl_symbols;
-    val output_antiquotes = map output_antiquote #> implode;
+    val output_antiquotes = map (eval_antiquote state) #> implode;
 
     fun output_line line =
       (if Markdown.line_is_item line then "\\item " else "") ^
@@ -430,7 +426,8 @@
             (Basic_Token cmd, (markup_false, d)))]));
 
     val cmt = Scan.peek (fn d =>
-      Parse.$$$ "--" |-- Parse.!!!! (improper |-- Parse.document_source) >>
+      (Parse.$$$ "--" || Parse.$$$ Symbol.comment) |--
+        Parse.!!!! (improper |-- Parse.document_source) >>
         (fn source => (NONE, (Markup_Token ("cmt", source), ("", d)))));
 
     val other = Scan.peek (fn d =>
@@ -603,6 +600,16 @@
 
 (** concrete antiquotations **)
 
+(* control spacing *)
+
+val _ =
+  Theory.setup
+   (antiquotation @{binding noindent} (Scan.succeed ()) (fn _ => fn () => "\\noindent") #>
+    antiquotation @{binding smallskip} (Scan.succeed ()) (fn _ => fn () => "\\smallskip") #>
+    antiquotation @{binding medskip} (Scan.succeed ()) (fn _ => fn () => "\\medskip") #>
+    antiquotation @{binding bigskip} (Scan.succeed ()) (fn _ => fn () => "\\bigskip"));
+
+
 (* control style *)
 
 local
@@ -615,7 +622,8 @@
 
 val _ =
   Theory.setup
-   (control_antiquotation @{binding emph} "\\emph{" "}" #>
+   (control_antiquotation @{binding footnote} "\\footnote{" "}" #>
+    control_antiquotation @{binding emph} "\\emph{" "}" #>
     control_antiquotation @{binding bold} "\\textbf{" "}");
 
 end;
--- a/src/Pure/Tools/build.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Tools/build.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -50,6 +50,9 @@
     files: List[Path],
     document_files: List[(Path, Path)],
     entry_digest: SHA1.Digest)
+  {
+    def timeout: Time = Time.seconds(options.real("timeout") * options.real("timeout_scale"))
+  }
 
   def is_pure(name: String): Boolean = name == "RAW" || name == "Pure"
 
@@ -342,7 +345,6 @@
         Map(timings.map({ case (name, (_, t)) => (name, t) }): _*).withDefaultValue(0.0)
 
       def outdegree(name: String): Int = graph.imm_succs(name).size
-      def timeout(name: String): Double = tree(name).options.real("timeout")
 
       object Ordering extends scala.math.Ordering[String]
       {
@@ -359,7 +361,7 @@
             case 0 =>
               compare_timing(name2, name1) match {
                 case 0 =>
-                  timeout(name2) compare timeout(name1) match {
+                  tree(name2).timeout compare tree(name1).timeout match {
                     case 0 => name1 compare name2
                     case ord => ord
                   }
@@ -598,8 +600,8 @@
         """
       }
 
-    private val (thread, result) =
-      Simple_Thread.future("build") {
+    private val result =
+      Future.thread("build") {
         Isabelle_System.bash_env(info.dir.file, env, script,
           progress_stdout = (line: String) =>
             Library.try_unprefix("\floading_theory = ", line) match {
@@ -614,13 +616,13 @@
           strict = false)
       }
 
-    def terminate: Unit = thread.interrupt
+    def terminate: Unit = result.cancel
     def is_finished: Boolean = result.is_finished
 
     @volatile private var was_timeout = false
     private val timeout_request: Option[Event_Timer.Request] =
     {
-      val timeout = info.options.seconds("timeout")
+      val timeout = info.timeout
       if (timeout > Time.zero)
         Some(Event_Timer.request(Time.now() + timeout) { terminate; was_timeout = true })
       else None
--- a/src/Pure/Tools/debugger.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Tools/debugger.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -24,7 +24,7 @@
   if msg = "" then ()
   else
     Output.protocol_message
-      (Markup.debugger_output (Simple_Thread.the_name ()))
+      (Markup.debugger_output (Standard_Thread.the_name ()))
       [Markup.markup (kind, Markup.serial_properties (serial ())) msg];
 
 val writeln_message = output_message Markup.writelnN;
@@ -255,7 +255,7 @@
         (SOME (fn (_, break) =>
           if not (is_debugging ()) andalso (! break orelse is_break () orelse is_stepping ())
           then
-            (case Simple_Thread.get_name () of
+            (case Standard_Thread.get_name () of
               SOME thread_name => debugger_loop thread_name
             | NONE => ())
           else ()))));
--- a/src/Pure/Tools/debugger.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Tools/debugger.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -114,7 +114,7 @@
   case object Update
 
   private val delay_update =
-    Simple_Thread.delay_first(global_state.value.session.output_delay) {
+    Standard_Thread.delay_first(global_state.value.session.output_delay) {
       global_state.value.session.debugger_updates.post(Update)
     }
 
--- a/src/Pure/Tools/print_operation.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Tools/print_operation.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -20,7 +20,7 @@
 
   class Handler extends Session.Protocol_Handler
   {
-    private val print_operations = Synchronized(Nil: List[(String, String)])
+    private val print_operations = Synchronized[List[(String, String)]](Nil)
 
     def get: List[(String, String)] = print_operations.value
 
--- a/src/Pure/Tools/update_cartouches.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/Tools/update_cartouches.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -1,7 +1,7 @@
 /*  Title:      Pure/Tools/update_cartouches.scala
     Author:     Makarius
 
-Update theory syntax to use cartouches.
+Update theory syntax to use cartouches etc.
 */
 
 package isabelle
@@ -37,11 +37,11 @@
     }
   }
 
-  def update_cartouches(replace_text: Boolean, path: Path)
+  def update_cartouches(replace_comment: Boolean, replace_text: Boolean, path: Path)
   {
     val text0 = File.read(path)
 
-    // outer syntax cartouches
+    // outer syntax cartouches and comment markers
     val text1 =
       (for (tok <- Token.explode(Keyword.Keywords.empty, text0).iterator)
         yield {
@@ -52,6 +52,7 @@
               case s => tok.source
             }
           }
+          else if (replace_comment && tok.source == "--") Symbol.comment
           else tok.source
         }
       ).mkString
@@ -87,8 +88,10 @@
   {
     Command_Line.tool0 {
       args.toList match {
-        case Properties.Value.Boolean(replace_text) :: files =>
-          files.foreach(file => update_cartouches(replace_text, Path.explode(file)))
+        case Properties.Value.Boolean(replace_comment) ::
+            Properties.Value.Boolean(replace_text) :: files =>
+          files.foreach(file =>
+            update_cartouches(replace_comment, replace_text, Path.explode(file)))
         case _ => error("Bad arguments:\n" + cat_lines(args))
       }
     }
--- a/src/Pure/build-jars	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Pure/build-jars	Tue Nov 10 14:43:29 2015 +0000
@@ -16,7 +16,7 @@
   Concurrent/future.scala
   Concurrent/mailbox.scala
   Concurrent/par_list.scala
-  Concurrent/simple_thread.scala
+  Concurrent/standard_thread.scala
   Concurrent/synchronized.scala
   GUI/color_value.scala
   GUI/gui.scala
--- a/src/Tools/Graphview/graphview.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/Graphview/graphview.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -156,7 +156,7 @@
 
   object Selection
   {
-    private val state = Synchronized(List.empty[Graph_Display.Node])
+    private val state = Synchronized[List[Graph_Display.Node]](Nil)
 
     def get(): List[Graph_Display.Node] = state.value
     def contains(node: Graph_Display.Node): Boolean = get().contains(node)
--- a/src/Tools/Graphview/mutator_event.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/Graphview/mutator_event.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -21,7 +21,7 @@
 
   class Bus
   {
-    private val receivers = Synchronized(List.empty[Receiver])
+    private val receivers = Synchronized[List[Receiver]](Nil)
 
     def += (r: Receiver) { receivers.change(Library.insert(r)) }
     def -= (r: Receiver) { receivers.change(Library.remove(r)) }
--- a/src/Tools/jEdit/src/active.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/active.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -30,7 +30,7 @@
             // FIXME avoid hard-wired stuff
             elem match {
               case XML.Elem(Markup(Markup.BROWSER, _), body) =>
-                Future.fork {
+                Standard_Thread.fork("browser") {
                   val graph_file = Isabelle_System.tmp_file("graph")
                   File.write(graph_file, XML.content(body))
                   Isabelle_System.bash_env(null,
@@ -39,7 +39,7 @@
                 }
 
               case XML.Elem(Markup(Markup.GRAPHVIEW, _), body) =>
-                Future.fork {
+                Standard_Thread.fork("graphview") {
                   val graph =
                     Exn.capture { Graph_Display.decode_graph(body).transitive_reduction_acyclic }
                   GUI_Thread.later { Graphview_Dockable(view, snapshot, graph) }
--- a/src/Tools/jEdit/src/completion_popup.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/completion_popup.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -128,16 +128,17 @@
           if (line_range.contains(text_area.getCaretPosition)) {
             JEdit_Lib.before_caret_range(text_area, rendering).try_restrict(line_range) match {
               case Some(range) if !range.is_singularity =>
-                rendering.semantic_completion(range) match {
+                val range0 =
+                  Completion.Result.merge(Completion.History.empty,
+                    syntax_completion(Completion.History.empty, false, Some(rendering)),
+                    Completion.Result.merge(Completion.History.empty,
+                      path_completion(rendering),
+                      Bibtex_JEdit.completion(Completion.History.empty, text_area, rendering)))
+                  .map(_.range)
+                rendering.semantic_completion(range0, range) match {
+                  case None => range0
                   case Some(Text.Info(_, Completion.No_Completion)) => None
                   case Some(Text.Info(range1, _: Completion.Names)) => Some(range1)
-                  case None =>
-                    Completion.Result.merge(Completion.History.empty,
-                      syntax_completion(Completion.History.empty, false, Some(rendering)),
-                      Completion.Result.merge(Completion.History.empty,
-                        path_completion(rendering),
-                        Bibtex_JEdit.completion(Completion.History.empty, text_area, rendering)))
-                    .map(_.range)
                 }
               case _ => None
             }
@@ -344,43 +345,36 @@
       }
 
       if (buffer.isEditable) {
-        val (no_completion, semantic_completion, opt_rendering) =
+        val opt_rendering = PIDE.document_view(text_area).map(_.get_rendering())
+        val result0 = syntax_completion(history, explicit, opt_rendering)
+        val (no_completion, semantic_completion) =
         {
-          PIDE.document_view(text_area) match {
-            case Some(doc_view) =>
-              val rendering = doc_view.get_rendering()
-              val (no_completion, result) =
-              {
-                val caret_range = JEdit_Lib.before_caret_range(text_area, rendering)
-                rendering.semantic_completion(caret_range) match {
-                  case Some(Text.Info(_, Completion.No_Completion)) => (true, None)
-                  case Some(Text.Info(range, names: Completion.Names)) =>
-                    val result =
-                      JEdit_Lib.try_get_text(buffer, range) match {
-                        case Some(original) => names.complete(range, history, decode, original)
-                        case None => None
-                      }
-                    (false, result)
-                  case None => (false, None)
-                }
+          opt_rendering match {
+            case Some(rendering) =>
+              val caret_range = JEdit_Lib.before_caret_range(text_area, rendering)
+              rendering.semantic_completion(result0.map(_.range), caret_range) match {
+                case Some(Text.Info(_, Completion.No_Completion)) => (true, None)
+                case Some(Text.Info(range, names: Completion.Names)) =>
+                  JEdit_Lib.try_get_text(buffer, range) match {
+                    case Some(original) => (false, names.complete(range, history, decode, original))
+                    case None => (false, None)
+                  }
+                case None => (false, None)
               }
-              (no_completion, result, Some(rendering))
-            case None => (false, None, None)
+            case None => (false, None)
           }
         }
         if (no_completion) false
         else {
           val result =
           {
-            val result0 =
+            val result1 =
               if (word_only) None
-              else
-                Completion.Result.merge(history, semantic_completion,
-                  syntax_completion(history, explicit, opt_rendering))
+              else Completion.Result.merge(history, semantic_completion, result0)
             opt_rendering match {
-              case None => result0
+              case None => result1
               case Some(rendering) =>
-                Completion.Result.merge(history, result0,
+                Completion.Result.merge(history, result1,
                   Completion.Result.merge(history,
                     Spell_Checker.completion(text_area, explicit, rendering),
                     Completion.Result.merge(history,
@@ -771,4 +765,3 @@
     popup.hide
   }
 }
-
--- a/src/Tools/jEdit/src/documentation_dockable.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/documentation_dockable.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -59,7 +59,7 @@
         if (path.is_file)
           PIDE.editor.goto_file(true, view, File.platform_path(path))
         else {
-          Future.fork {
+          Standard_Thread.fork("documentation") {
             try { Doc.view(path) }
             catch {
               case exn: Throwable =>
--- a/src/Tools/jEdit/src/jedit_editor.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/jedit_editor.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -194,7 +194,7 @@
     new Hyperlink {
       val external = true
       def follow(view: View): Unit =
-        Future.fork {
+        Standard_Thread.fork("hyperlink_url") {
           try { Isabelle_System.open(name) }
           catch {
             case exn: Throwable =>
--- a/src/Tools/jEdit/src/jedit_options.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/jedit_options.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -9,7 +9,7 @@
 
 import isabelle._
 
-import java.awt.Color
+import java.awt.{Font, Color}
 import javax.swing.{InputVerifier, JComponent, UIManager}
 import javax.swing.text.JTextComponent
 
@@ -91,7 +91,8 @@
         val default_font = UIManager.getFont("TextField.font")
         val text_area =
           new TextArea with Option_Component {
-            if (default_font != null) font = default_font
+            if (default_font != null) font =
+              new Font(default_font.getFamily, default_font.getStyle, default_font.getSize)
             name = opt_name
             val title = opt_title
             def load = text = value.check_name(opt_name).value
--- a/src/Tools/jEdit/src/monitor_dockable.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/monitor_dockable.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -22,7 +22,7 @@
 
 class Monitor_Dockable(view: View, position: String) extends Dockable(view, position)
 {
-  private val rev_stats = Synchronized(Nil: List[Properties.T])
+  private val rev_stats = Synchronized[List[Properties.T]](Nil)
 
 
   /* chart data -- owned by GUI thread */
--- a/src/Tools/jEdit/src/pretty_text_area.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/pretty_text_area.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -10,7 +10,6 @@
 
 import isabelle._
 
-import java.util.concurrent.{Future => JFuture}
 import java.awt.{Color, Font, Toolkit, Window}
 import java.awt.event.KeyEvent
 import javax.swing.JTextField
@@ -19,9 +18,11 @@
 import scala.swing.{Label, Component}
 import scala.util.matching.Regex
 
-import org.gjt.sp.jedit.{jEdit, View, Registers}
+import org.gjt.sp.jedit.{jEdit, View, Registers, JEditBeanShellAction}
+import org.gjt.sp.jedit.input.{DefaultInputHandlerProvider, TextAreaInputHandler}
 import org.gjt.sp.jedit.textarea.{AntiAlias, JEditEmbeddedTextArea}
 import org.gjt.sp.jedit.syntax.SyntaxStyle
+import org.gjt.sp.jedit.gui.KeyEventTranslator
 import org.gjt.sp.util.{SyntaxUtilities, Log}
 
 
@@ -75,7 +76,7 @@
   private var current_base_results = Command.Results.empty
   private var current_rendering: Rendering =
     Pretty_Text_Area.text_rendering(current_base_snapshot, current_base_results, Nil)._2
-  private var future_refresh: Option[JFuture[Unit]] = None
+  private var future_refresh: Option[Future[Unit]] = None
 
   private val rich_text_area =
     new Rich_Text_Area(view, text_area, () => current_rendering, close_action,
@@ -128,9 +129,9 @@
       val base_results = current_base_results
       val formatted_body = Pretty.formatted(current_body, margin, metric)
 
-      future_refresh.map(_.cancel(true))
+      future_refresh.map(_.cancel)
       future_refresh =
-        Some(Simple_Thread.submit_task {
+        Some(Future.fork {
           val (text, rendering) =
             try { Pretty_Text_Area.text_rendering(base_snapshot, base_results, formatted_body) }
             catch { case exn: Throwable => Log.log(Log.ERROR, this, exn); throw exn }
@@ -140,11 +141,9 @@
             current_rendering = rendering
             JEdit_Lib.buffer_edit(getBuffer) {
               rich_text_area.active_reset()
-              getBuffer.setReadOnly(false)
               getBuffer.setFoldHandler(new Fold_Handling.Document_Fold_Handler(rendering))
               setText(text)
               setCaretPosition(0)
-              getBuffer.setReadOnly(true)
             }
           }
         })
@@ -225,6 +224,14 @@
 
   /* key handling */
 
+  inputHandlerProvider =
+    new DefaultInputHandlerProvider(new TextAreaInputHandler(text_area) {
+      override def getAction(action: String): JEditBeanShellAction =
+        text_area.getActionContext.getAction(action)
+      override def processKeyEvent(evt: KeyEvent, from: Int, global: Boolean) {}
+      override def handleKey(key: KeyEventTranslator.Key, dry_run: Boolean): Boolean = false
+    })
+
   addKeyListener(JEdit_Lib.key_listener(
     key_pressed = (evt: KeyEvent) =>
       {
@@ -261,9 +268,7 @@
   getPainter.setLineHighlightEnabled(false)
 
   getBuffer.setTokenMarker(Isabelle.mode_token_marker("isabelle-output").get)
-  getBuffer.setReadOnly(true)
   getBuffer.setStringProperty("noWordSep", "_'.?")
 
   rich_text_area.activate()
 }
-
--- a/src/Tools/jEdit/src/rendering.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/rendering.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -94,6 +94,7 @@
 
   def token_markup(syntax: Outer_Syntax, token: Token): Byte =
     if (token.is_command) command_style(syntax.keywords.command_kind(token.content).getOrElse(""))
+    else if (token.is_keyword && token.source == Symbol.comment_decoded) JEditToken.NULL
     else if (token.is_delimiter) JEditToken.OPERATOR
     else token_style(token.kind)
 
@@ -114,7 +115,6 @@
       ML_Lex.Kind.CHAR -> LITERAL2,
       ML_Lex.Kind.STRING -> LITERAL1,
       ML_Lex.Kind.SPACE -> NULL,
-      ML_Lex.Kind.CARTOUCHE -> COMMENT4,
       ML_Lex.Kind.COMMENT -> COMMENT1,
       ML_Lex.Kind.ANTIQ -> NULL,
       ML_Lex.Kind.ANTIQ_START -> LITERAL4,
@@ -143,7 +143,7 @@
   private val language_context_elements =
     Markup.Elements(Markup.STRING, Markup.ALT_STRING, Markup.VERBATIM,
       Markup.CARTOUCHE, Markup.COMMENT, Markup.LANGUAGE,
-      Markup.ML_STRING, Markup.ML_CARTOUCHE, Markup.ML_COMMENT)
+      Markup.ML_STRING, Markup.ML_COMMENT)
 
   private val language_elements = Markup.Elements(Markup.LANGUAGE)
 
@@ -292,12 +292,17 @@
 
   /* completion */
 
-  def semantic_completion(range: Text.Range): Option[Text.Info[Completion.Semantic]] =
+  def semantic_completion(completed_range: Option[Text.Range], range: Text.Range)
+      : Option[Text.Info[Completion.Semantic]] =
     if (snapshot.is_outdated) None
     else {
       snapshot.select(range, Rendering.semantic_completion_elements, _ =>
         {
-          case Completion.Semantic.Info(info) => Some(info)
+          case Completion.Semantic.Info(info) =>
+            completed_range match {
+              case Some(range0) if range0.contains(info.range) && range0 != info.range => None
+              case _ => Some(info)
+            }
           case _ => None
         }).headOption.map(_.info)
     }
@@ -309,9 +314,7 @@
           if (delimited) Some(Completion.Language_Context(language, symbols, antiquotes))
           else None
         case Text.Info(_, elem)
-        if elem.name == Markup.ML_STRING ||
-          elem.name == Markup.ML_CARTOUCHE ||
-          elem.name == Markup.ML_COMMENT =>
+        if elem.name == Markup.ML_STRING || elem.name == Markup.ML_COMMENT =>
           Some(Completion.Language_Context.ML_inner)
         case Text.Info(_, _) =>
           Some(Completion.Language_Context.inner)
@@ -675,7 +678,7 @@
     val (states, other) =
       results.iterator.map(_._2).filterNot(Protocol.is_result(_)).toList
         .partition(Protocol.is_state(_))
-    states ::: other
+    if (options.bool("editor_output_state")) states ::: other else other
   }
 
 
@@ -778,7 +781,6 @@
       Markup.ML_NUMERAL -> inner_numeral_color,
       Markup.ML_CHAR -> inner_quoted_color,
       Markup.ML_STRING -> inner_quoted_color,
-      Markup.ML_CARTOUCHE -> inner_cartouche_color,
       Markup.ML_COMMENT -> inner_comment_color,
       Markup.SML_STRING -> inner_quoted_color,
       Markup.SML_COMMENT -> inner_comment_color)
--- a/src/Tools/jEdit/src/scala_console.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/scala_console.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -137,7 +137,7 @@
 
   private class Interpreter
   {
-    private val running = Synchronized(None: Option[Thread])
+    private val running = Synchronized[Option[Thread]](None)
     def interrupt { running.change(opt => { opt.foreach(_.interrupt); opt }) }
 
     private val settings = new GenericRunnerSettings(report_error)
@@ -165,7 +165,7 @@
           }
           finally {
             running.change(_ => None)
-            Thread.interrupted()
+            Thread.interrupted
           }
           GUI_Thread.later {
             if (err != null) err.commandDone()
--- a/src/Tools/jEdit/src/session_build.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/session_build.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -161,7 +161,7 @@
     setLocationRelativeTo(view)
     setVisible(true)
 
-    Simple_Thread.fork("session_build") {
+    Standard_Thread.fork("session_build") {
       progress.echo("Build started for Isabelle/" + Isabelle_Logic.session_name() + " ...")
 
       val (out, rc) =
--- a/src/Tools/jEdit/src/text_overview.scala	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/jEdit/src/text_overview.scala	Tue Nov 10 14:43:29 2015 +0000
@@ -11,7 +11,6 @@
 
 import scala.annotation.tailrec
 
-import java.util.concurrent.{Future => JFuture}
 import java.awt.{Graphics, Graphics2D, BorderLayout, Dimension, Color}
 import java.awt.event.{MouseAdapter, MouseEvent}
 import javax.swing.{JPanel, ToolTipManager}
@@ -102,8 +101,8 @@
 
   /* asynchronous refresh */
 
-  private var future_refresh: Option[JFuture[Unit]] = None
-  private def cancel(): Unit = future_refresh.map(_.cancel(true))
+  private var future_refresh: Option[Future[Unit]] = None
+  private def cancel(): Unit = future_refresh.map(_.cancel)
 
   def invoke(): Unit = delay_refresh.invoke()
   def revoke(): Unit = delay_refresh.revoke()
@@ -128,7 +127,7 @@
             }
 
             future_refresh =
-              Some(Simple_Thread.submit_task {
+              Some(Future.fork {
                 val line_count = overview.line_count
                 val char_count = overview.char_count
                 val L = overview.L
--- a/src/Tools/permanent_interpretation.ML	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/Tools/permanent_interpretation.ML	Tue Nov 10 14:43:29 2015 +0000
@@ -99,7 +99,7 @@
 val _ =
   Outer_Syntax.local_theory_to_proof @{command_keyword permanent_interpretation}
     "prove interpretation of locale expression into named theory"
-    (Parse.!!! (Parse_Spec.locale_expression true) --
+    (Parse.!!! Parse_Spec.locale_expression --
       Scan.optional (@{keyword "defining"} |-- Parse.and_list1 (Parse_Spec.opt_thm_name ":"
         -- ((Parse.binding -- Parse.opt_mixfix') --| @{keyword "="} -- Parse.term))) [] --
       Scan.optional (Parse.where_ |-- Parse.and_list1 (Parse_Spec.opt_thm_name ":" -- Parse.prop)) []
--- a/src/ZF/ex/Group.thy	Tue Nov 10 14:18:41 2015 +0000
+++ b/src/ZF/ex/Group.thy	Tue Nov 10 14:43:29 2015 +0000
@@ -429,7 +429,7 @@
 proof -
   have "h ` \<one> \<cdot>\<^bsub>H\<^esub> \<one>\<^bsub>H\<^esub> = (h ` \<one>) \<cdot>\<^bsub>H\<^esub> (h ` \<one>)"
     by (simp add: hom_mult [symmetric] del: hom_mult)
-  then show ?thesis by (simp del: r_one)
+  then show ?thesis by (simp del: H.r_one)
 qed
 
 lemma (in group_hom) inv_closed [simp]:
@@ -445,7 +445,7 @@
   also from x have "... = h ` x \<cdot>\<^bsub>H\<^esub> inv\<^bsub>H\<^esub> (h ` x)"
     by (simp add: hom_mult [symmetric] H.r_inv del: hom_mult)
   finally have "h ` x \<cdot>\<^bsub>H\<^esub> h ` (inv x) = h ` x \<cdot>\<^bsub>H\<^esub> inv\<^bsub>H\<^esub> (h ` x)" .
-  with x show ?thesis by (simp del: inv)
+  with x show ?thesis by (simp del: H.inv)
 qed
 
 subsection \<open>Commutative Structures\<close>
@@ -596,7 +596,7 @@
   "set_inv\<^bsub>G\<^esub> H == \<Union>h\<in>H. {inv\<^bsub>G\<^esub> h}"
 
 
-locale normal = subgroup: subgroup + group +
+locale normal = subgroup + group +
   assumes coset_eq: "(\<forall>x \<in> carrier(G). H #> x = x <# H)"
 
 notation