merged.
authorhuffman
Mon, 05 Jan 2009 07:54:16 -0800
changeset 29354 6ef5ddf22d3a
parent 29353 3d2e35c23c66 (current diff)
parent 29350 c7735554d291 (diff)
child 29355 642cac18e155
child 29371 bab4e907d881
merged.
src/FOL/ex/LocaleTest.thy
src/HOL/Complex/Fundamental_Theorem_Algebra.thy
src/HOL/Complex/README.html
src/HOL/Complex/document/root.tex
src/HOL/Hyperreal/SEQ.thy
src/HOL/Library/Dense_Linear_Order.thy
src/HOL/Real/HahnBanach/Bounds.thy
src/HOL/Real/HahnBanach/FunctionNorm.thy
src/HOL/Real/HahnBanach/FunctionOrder.thy
src/HOL/Real/HahnBanach/HahnBanach.thy
src/HOL/Real/HahnBanach/HahnBanachExtLemmas.thy
src/HOL/Real/HahnBanach/HahnBanachLemmas.thy
src/HOL/Real/HahnBanach/HahnBanachSupLemmas.thy
src/HOL/Real/HahnBanach/Linearform.thy
src/HOL/Real/HahnBanach/NormedSpace.thy
src/HOL/Real/HahnBanach/README.html
src/HOL/Real/HahnBanach/ROOT.ML
src/HOL/Real/HahnBanach/Subspace.thy
src/HOL/Real/HahnBanach/VectorSpace.thy
src/HOL/Real/HahnBanach/ZornLemma.thy
src/HOL/Real/HahnBanach/document/root.bib
src/HOL/Real/HahnBanach/document/root.tex
src/HOL/Real/RealVector.thy
src/Pure/Thy/thy_edit.ML
--- a/Admin/isatest/isatest-stats	Mon Dec 29 11:04:27 2008 -0800
+++ b/Admin/isatest/isatest-stats	Mon Jan 05 07:54:16 2009 -0800
@@ -31,28 +31,33 @@
   HOL-Word \
   HOL-ex \
   ZF \
-  ZF-Constructible\
+  ZF-Constructible \
   ZF-UNITY"
 
 AFP_SESSIONS="\
-  CoreC++\
-  LinearQuantifierElim\
-  HOL-DiskPaxos\
-  HOL-Fermat3_4\
-  HOL-Flyspeck-Tame\
-  HOL-Group-Ring-Module\
-  HOL-JinjaThreads\
-  HOL-Jinja\
-  HOL-JiveDataStoreModel\
-  HOL-POPLmark-deBruijn\
-  HOL-Program-Conflict-Analysis\
-  HOL-RSAPSS\
-  HOL-Recursion-Theory-I\
-  HOL-SumSquares\
-  HOL-Topology\
-  HOL-Valuation\
-  Simpl-BDD\
-  Simpl"
+  CoreC++ \
+  HOL-BytecodeLogicJmlTypes \
+  HOL-DiskPaxos \
+  HOL-Fermat3_4 \
+  HOL-Flyspeck-Tame \
+  HOL-Group-Ring-Module \
+  HOL-Jinja \
+  HOL-JinjaThreads \
+  HOL-JiveDataStoreModel \
+  HOL-POPLmark-deBruijn \
+  HOL-Program-Conflict-Analysis \
+  HOL-RSAPSS \
+  HOL-Recursion-Theory-I \
+  HOL-SATSolverVerification \
+  HOL-SIFPL \
+  HOL-SenSocialChoice \
+  HOL-Slicing \
+  HOL-SumSquares \
+  HOL-Topology \
+  HOL-Valuation \
+  LinearQuantifierElim \
+  Simpl \
+  Simpl-BDD"
 
 for PLATFORM in $PLATFORMS
 do
--- a/Admin/isatest/settings/at-mac-poly-5.1-para	Mon Dec 29 11:04:27 2008 -0800
+++ b/Admin/isatest/settings/at-mac-poly-5.1-para	Mon Jan 05 07:54:16 2009 -0800
@@ -4,7 +4,7 @@
   ML_SYSTEM="polyml-5.2.1"
   ML_PLATFORM="x86-darwin"
   ML_HOME="$POLYML_HOME/$ML_PLATFORM"
-  ML_OPTIONS="--immutable 800 --mutable 1200"
+  ML_OPTIONS="--mutable 500 --immutable 1500"
 
 
 ISABELLE_HOME_USER=~/isabelle-at-mac-poly-e
--- a/NEWS	Mon Dec 29 11:04:27 2008 -0800
+++ b/NEWS	Mon Jan 05 07:54:16 2009 -0800
@@ -130,7 +130,7 @@
 consider declaring a new locale with additional type constraints on the
 parameters (context element "constrains").
 
-* Dropped "locale (open)".  INCOMPATBILITY.
+* Dropped "locale (open)".  INCOMPATIBILITY.
 
 * Interpretation commands no longer attempt to simplify goal.
 INCOMPATIBILITY: in rare situations the generated goal differs.  Use
@@ -139,6 +139,36 @@
 * Interpretation commands no longer accept interpretation attributes.
 INCOMPATBILITY.
 
+* Complete re-implementation of locales.  INCOMPATIBILITY.
+The most important changes are listed below.  See documentation
+(forthcoming) and tutorial (also forthcoming) for details.
+
+- In locale expressions, instantiation replaces renaming.  Parameters
+must be declared in a for clause.  To aid compatibility with previous
+parameter inheritance, in locale declarations, parameters that are not
+'touched' (instantiation position "_" or omitted) are implicitly added
+with their syntax at the beginning of the for clause.
+
+- Syntax from abbreviations and definitions in locales is available in
+locale expressions and context elements.  The latter is particularly
+useful in locale declarations.
+
+- More flexible mechanisms to qualify names generated by locale
+expressions.  Qualifiers (prefixes) may be specified in locale
+expressions.  Available are normal qualifiers (syntax "name:") and strict
+qualifiers (syntax "name!:").  The latter must occur in name references
+and are useful to avoid accidental hiding of names, the former are
+optional.  Qualifiers derived from the parameter names of a locale are no
+longer generated.
+
+- "sublocale l < e" replaces "interpretation l < e".  The instantiation
+clause in "interpretation" and "interpret" (square brackets) is no
+longer available.  Use locale expressions.
+
+- When converting proof scripts, be sure to replace qualifiers in
+"interpretation" and "interpret" by strict qualifiers.  Qualifiers in
+locale expressions range over a single locale instance only.
+
 * Command 'instance': attached definitions no longer accepted.
 INCOMPATIBILITY, use proper 'instantiation' target.
 
@@ -157,11 +187,12 @@
 
 *** HOL ***
 
-* Made repository layout more coherent with logical
-distribution structure:
+* Made source layout more coherent with logical distribution
+structure:
 
     src/HOL/Library/RType.thy ~> src/HOL/Typerep.thy
     src/HOL/Library/Code_Message.thy ~> src/HOL/
+    src/HOL/Library/Dense_Linear_Order.thy ~> src/HOL/
     src/HOL/Library/GCD.thy ~> src/HOL/
     src/HOL/Library/Order_Relation.thy ~> src/HOL/
     src/HOL/Library/Parity.thy ~> src/HOL/
@@ -177,6 +208,7 @@
     src/HOL/Complex/Complex_Main.thy ~> src/HOL/
     src/HOL/Complex/Complex.thy ~> src/HOL/
     src/HOL/Complex/FrechetDeriv.thy ~> src/HOL/
+    src/HOL/Complex/Fundamental_Theorem_Algebra.thy ~> src/HOL/
     src/HOL/Hyperreal/Deriv.thy ~> src/HOL/
     src/HOL/Hyperreal/Fact.thy ~> src/HOL/
     src/HOL/Hyperreal/Integration.thy ~> src/HOL/
@@ -186,9 +218,12 @@
     src/HOL/Hyperreal/MacLaurin.thy ~> src/HOL/
     src/HOL/Hyperreal/NthRoot.thy ~> src/HOL/
     src/HOL/Hyperreal/Series.thy ~> src/HOL/
+    src/HOL/Hyperreal/SEQ.thy ~> src/HOL/
     src/HOL/Hyperreal/Taylor.thy ~> src/HOL/
     src/HOL/Hyperreal/Transcendental.thy ~> src/HOL/
     src/HOL/Real/Float ~> src/HOL/Library/
+    src/HOL/Real/HahnBanach ~> src/HOL/HahnBanach
+    src/HOL/Real/RealVector.thy ~> src/HOL/
 
     src/HOL/arith_data.ML ~> src/HOL/Tools
     src/HOL/hologic.ML ~> src/HOL/Tools
--- a/doc-src/IsarAdvanced/Classes/Thy/Classes.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Classes/Thy/Classes.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -375,8 +375,8 @@
 
 text {* \noindent together with a corresponding interpretation: *}
 
-interpretation %quote idem_class:
-  idem ["f \<Colon> (\<alpha>\<Colon>idem) \<Rightarrow> \<alpha>"]
+interpretation %quote idem_class':    (* FIXME proper prefix? *)
+  idem "f \<Colon> (\<alpha>\<Colon>idem) \<Rightarrow> \<alpha>"
 proof qed (rule idem)
 
 text {*
@@ -459,7 +459,7 @@
   of monoids for lists:
 *}
 
-interpretation %quote list_monoid: monoid [append "[]"]
+class_interpretation %quote list_monoid: monoid [append "[]"]
   proof qed auto
 
 text {*
@@ -474,10 +474,10 @@
   "replicate 0 _ = []"
   | "replicate (Suc n) xs = xs @ replicate n xs"
 
-interpretation %quote list_monoid: monoid [append "[]"] where
+class_interpretation %quote list_monoid: monoid [append "[]"] where
   "monoid.pow_nat append [] = replicate"
 proof -
-  interpret monoid [append "[]"] ..
+  class_interpret monoid [append "[]"] ..
   show "monoid.pow_nat append [] = replicate"
   proof
     fix n
--- a/doc-src/IsarAdvanced/Classes/Thy/document/Classes.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Classes/Thy/document/Classes.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -681,8 +681,8 @@
 %
 \isatagquote
 \isacommand{interpretation}\isamarkupfalse%
-\ idem{\isacharunderscore}class{\isacharcolon}\isanewline
-\ \ idem\ {\isacharbrackleft}{\isachardoublequoteopen}f\ {\isasymColon}\ {\isacharparenleft}{\isasymalpha}{\isasymColon}idem{\isacharparenright}\ {\isasymRightarrow}\ {\isasymalpha}{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ idem{\isacharunderscore}class{\isacharprime}{\isacharcolon}\ \ \ \ \isanewline
+\ \ idem\ {\isachardoublequoteopen}f\ {\isasymColon}\ {\isacharparenleft}{\isasymalpha}{\isasymColon}idem{\isacharparenright}\ {\isasymRightarrow}\ {\isasymalpha}{\isachardoublequoteclose}\isanewline
 \isacommand{proof}\isamarkupfalse%
 \ \isacommand{qed}\isamarkupfalse%
 \ {\isacharparenleft}rule\ idem{\isacharparenright}%
@@ -843,7 +843,7 @@
 \endisadelimquote
 %
 \isatagquote
-\isacommand{interpretation}\isamarkupfalse%
+\isacommand{class{\isacharunderscore}interpretation}\isamarkupfalse%
 \ list{\isacharunderscore}monoid{\isacharcolon}\ monoid\ {\isacharbrackleft}append\ {\isachardoublequoteopen}{\isacharbrackleft}{\isacharbrackright}{\isachardoublequoteclose}{\isacharbrackright}\isanewline
 \ \ \isacommand{proof}\isamarkupfalse%
 \ \isacommand{qed}\isamarkupfalse%
@@ -874,12 +874,12 @@
 \ \ {\isachardoublequoteopen}replicate\ {\isadigit{0}}\ {\isacharunderscore}\ {\isacharequal}\ {\isacharbrackleft}{\isacharbrackright}{\isachardoublequoteclose}\isanewline
 \ \ {\isacharbar}\ {\isachardoublequoteopen}replicate\ {\isacharparenleft}Suc\ n{\isacharparenright}\ xs\ {\isacharequal}\ xs\ {\isacharat}\ replicate\ n\ xs{\isachardoublequoteclose}\isanewline
 \isanewline
-\isacommand{interpretation}\isamarkupfalse%
+\isacommand{class{\isacharunderscore}interpretation}\isamarkupfalse%
 \ list{\isacharunderscore}monoid{\isacharcolon}\ monoid\ {\isacharbrackleft}append\ {\isachardoublequoteopen}{\isacharbrackleft}{\isacharbrackright}{\isachardoublequoteclose}{\isacharbrackright}\ \isakeyword{where}\isanewline
 \ \ {\isachardoublequoteopen}monoid{\isachardot}pow{\isacharunderscore}nat\ append\ {\isacharbrackleft}{\isacharbrackright}\ {\isacharequal}\ replicate{\isachardoublequoteclose}\isanewline
 \isacommand{proof}\isamarkupfalse%
 \ {\isacharminus}\isanewline
-\ \ \isacommand{interpret}\isamarkupfalse%
+\ \ \isacommand{class{\isacharunderscore}interpret}\isamarkupfalse%
 \ monoid\ {\isacharbrackleft}append\ {\isachardoublequoteopen}{\isacharbrackleft}{\isacharbrackright}{\isachardoublequoteclose}{\isacharbrackright}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{show}\isamarkupfalse%
@@ -1231,7 +1231,7 @@
 \hspace*{0pt}\\
 \hspace*{0pt}fun inverse{\char95}int i = IntInf.{\char126}~i;\\
 \hspace*{0pt}\\
-\hspace*{0pt}val neutral{\char95}int :~IntInf.int = (0 :~IntInf.int);\\
+\hspace*{0pt}val neutral{\char95}int :~IntInf.int = (0 :~IntInf.int)\\
 \hspace*{0pt}\\
 \hspace*{0pt}fun mult{\char95}int i j = IntInf.+ (i,~j);\\
 \hspace*{0pt}\\
@@ -1258,7 +1258,7 @@
 \hspace*{0pt} ~~~else inverse A{\char95}~(pow{\char95}nat (monoid{\char95}group A{\char95}) (nat (IntInf.{\char126}~k)) x));\\
 \hspace*{0pt}\\
 \hspace*{0pt}val example :~IntInf.int =\\
-\hspace*{0pt} ~pow{\char95}int group{\char95}int (10 :~IntInf.int) ({\char126}2 :~IntInf.int);\\
+\hspace*{0pt} ~pow{\char95}int group{\char95}int (10 :~IntInf.int) ({\char126}2 :~IntInf.int)\\
 \hspace*{0pt}\\
 \hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
--- a/doc-src/IsarAdvanced/Codegen/Thy/ML.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/ML.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -24,9 +24,9 @@
   \begin{mldecls}
   @{index_ML Code.add_eqn: "thm -> theory -> theory"} \\
   @{index_ML Code.del_eqn: "thm -> theory -> theory"} \\
-  @{index_ML Code.add_eqnl: "string * (thm * bool) list Lazy.T -> theory -> theory"} \\
-  @{index_ML Code.map_pre: "(MetaSimplifier.simpset -> MetaSimplifier.simpset) -> theory -> theory"} \\
-  @{index_ML Code.map_post: "(MetaSimplifier.simpset -> MetaSimplifier.simpset) -> theory -> theory"} \\
+  @{index_ML Code.add_eqnl: "string * (thm * bool) list lazy -> theory -> theory"} \\
+  @{index_ML Code.map_pre: "(simpset -> simpset) -> theory -> theory"} \\
+  @{index_ML Code.map_post: "(simpset -> simpset) -> theory -> theory"} \\
   @{index_ML Code.add_functrans: "string * (theory -> (thm * bool) list -> (thm * bool) list option)
     -> theory -> theory"} \\
   @{index_ML Code.del_functrans: "string -> theory -> theory"} \\
@@ -80,7 +80,7 @@
   \begin{mldecls}
   @{index_ML Code_Unit.read_const: "theory -> string -> string"} \\
   @{index_ML Code_Unit.head_eqn: "theory -> thm -> string * ((string * sort) list * typ)"} \\
-  @{index_ML Code_Unit.rewrite_eqn: "MetaSimplifier.simpset -> thm -> thm"} \\
+  @{index_ML Code_Unit.rewrite_eqn: "simpset -> thm -> thm"} \\
   \end{mldecls}
 
   \begin{description}
--- a/doc-src/IsarAdvanced/Codegen/Thy/Setup.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/Setup.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -5,7 +5,7 @@
 
 ML {* no_document use_thys
   ["Efficient_Nat", "Code_Char_chr", "Product_ord", "Imperative_HOL",
-   "~~/src/HOL/Complex/ex/ReflectedFerrack"] *}
+   "~~/src/HOL/ex/ReflectedFerrack"] *}
 
 ML_val {* Code_Target.code_width := 74 *}
 
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/Adaption.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/Adaption.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -281,9 +281,9 @@
 \hspace*{0pt}and less{\char95}eq{\char95}nat (Suc m) n = less{\char95}nat m n\\
 \hspace*{0pt} ~| less{\char95}eq{\char95}nat Zero{\char95}nat n = True;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun in{\char95}interval (k, l) n = anda (less{\char95}eq{\char95}nat k n) (less{\char95}eq{\char95}nat n l);\\
+\hspace*{0pt}fun in{\char95}interval (k,~l) n = anda (less{\char95}eq{\char95}nat k n) (less{\char95}eq{\char95}nat n l);\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -357,9 +357,9 @@
 \hspace*{0pt}and less{\char95}eq{\char95}nat (Suc m) n = less{\char95}nat m n\\
 \hspace*{0pt} ~| less{\char95}eq{\char95}nat Zero{\char95}nat n = true;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun in{\char95}interval (k, l) n = (less{\char95}eq{\char95}nat k n) andalso (less{\char95}eq{\char95}nat n l);\\
+\hspace*{0pt}fun in{\char95}interval (k,~l) n = (less{\char95}eq{\char95}nat k n) andalso (less{\char95}eq{\char95}nat n l);\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -414,9 +414,9 @@
 \hspace*{0pt}and less{\char95}eq{\char95}nat (Suc m) n = less{\char95}nat m n\\
 \hspace*{0pt} ~| less{\char95}eq{\char95}nat Zero{\char95}nat n = true;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun in{\char95}interval (k, l) n = less{\char95}eq{\char95}nat k n andalso less{\char95}eq{\char95}nat n l;\\
+\hspace*{0pt}fun in{\char95}interval (k,~l) n = less{\char95}eq{\char95}nat k n andalso less{\char95}eq{\char95}nat n l;\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/Introduction.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/Introduction.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -164,20 +164,20 @@
 \hspace*{0pt}\\
 \hspace*{0pt}datatype 'a queue = Queue of 'a list * 'a list;\\
 \hspace*{0pt}\\
-\hspace*{0pt}val empty :~'a queue = Queue ([], []);\\
+\hspace*{0pt}val empty :~'a queue = Queue ([],~[])\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun dequeue (Queue ([], [])) = (NONE, Queue ([], []))\\
-\hspace*{0pt} ~| dequeue (Queue (xs, y ::~ys)) = (SOME y, Queue (xs, ys))\\
-\hspace*{0pt} ~| dequeue (Queue (v ::~va, [])) =\\
+\hspace*{0pt}fun dequeue (Queue ([],~[])) = (NONE,~Queue ([],~[]))\\
+\hspace*{0pt} ~| dequeue (Queue (xs,~y ::~ys)) = (SOME y,~Queue (xs,~ys))\\
+\hspace*{0pt} ~| dequeue (Queue (v ::~va,~[])) =\\
 \hspace*{0pt} ~~~let\\
 \hspace*{0pt} ~~~~~val y ::~ys = rev (v ::~va);\\
 \hspace*{0pt} ~~~in\\
-\hspace*{0pt} ~~~~~(SOME y, Queue ([], ys))\\
+\hspace*{0pt} ~~~~~(SOME y,~Queue ([],~ys))\\
 \hspace*{0pt} ~~~end;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun enqueue x (Queue (xs, ys)) = Queue (x ::~xs, ys);\\
+\hspace*{0pt}fun enqueue x (Queue (xs,~ys)) = Queue (x ::~xs,~ys);\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -233,31 +233,31 @@
 \hspace*{0pt}module Example where {\char123}\\
 \hspace*{0pt}\\
 \hspace*{0pt}\\
-\hspace*{0pt}foldla ::~forall a b. (a -> b -> a) -> a -> [b] -> a;\\
+\hspace*{0pt}foldla ::~forall a b.~(a -> b -> a) -> a -> [b] -> a;\\
 \hspace*{0pt}foldla f a [] = a;\\
 \hspace*{0pt}foldla f a (x :~xs) = foldla f (f a x) xs;\\
 \hspace*{0pt}\\
-\hspace*{0pt}rev ::~forall a. [a] -> [a];\\
+\hspace*{0pt}rev ::~forall a.~[a] -> [a];\\
 \hspace*{0pt}rev xs = foldla ({\char92}~xsa x -> x :~xsa) [] xs;\\
 \hspace*{0pt}\\
-\hspace*{0pt}list{\char95}case ::~forall t a. t -> (a -> [a] -> t) -> [a] -> t;\\
+\hspace*{0pt}list{\char95}case ::~forall t a.~t -> (a -> [a] -> t) -> [a] -> t;\\
 \hspace*{0pt}list{\char95}case f1 f2 (a :~list) = f2 a list;\\
 \hspace*{0pt}list{\char95}case f1 f2 [] = f1;\\
 \hspace*{0pt}\\
 \hspace*{0pt}data Queue a = Queue [a] [a];\\
 \hspace*{0pt}\\
-\hspace*{0pt}empty ::~forall a. Queue a;\\
+\hspace*{0pt}empty ::~forall a.~Queue a;\\
 \hspace*{0pt}empty = Queue [] [];\\
 \hspace*{0pt}\\
-\hspace*{0pt}dequeue ::~forall a. Queue a -> (Maybe a, Queue a);\\
-\hspace*{0pt}dequeue (Queue [] []) = (Nothing, Queue [] []);\\
-\hspace*{0pt}dequeue (Queue xs (y :~ys)) = (Just y, Queue xs ys);\\
+\hspace*{0pt}dequeue ::~forall a.~Queue a -> (Maybe a,~Queue a);\\
+\hspace*{0pt}dequeue (Queue [] []) = (Nothing,~Queue [] []);\\
+\hspace*{0pt}dequeue (Queue xs (y :~ys)) = (Just y,~Queue xs ys);\\
 \hspace*{0pt}dequeue (Queue (v :~va) []) =\\
 \hspace*{0pt} ~let {\char123}\\
 \hspace*{0pt} ~~~(y :~ys) = rev (v :~va);\\
-\hspace*{0pt} ~{\char125}~in (Just y, Queue [] ys);\\
+\hspace*{0pt} ~{\char125}~in (Just y,~Queue [] ys);\\
 \hspace*{0pt}\\
-\hspace*{0pt}enqueue ::~forall a. a -> Queue a -> Queue a;\\
+\hspace*{0pt}enqueue ::~forall a.~a -> Queue a -> Queue a;\\
 \hspace*{0pt}enqueue x (Queue xs ys) = Queue (x :~xs) ys;\\
 \hspace*{0pt}\\
 \hspace*{0pt}{\char125}%
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/ML.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/ML.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -54,9 +54,9 @@
 \begin{mldecls}
   \indexml{Code.add\_eqn}\verb|Code.add_eqn: thm -> theory -> theory| \\
   \indexml{Code.del\_eqn}\verb|Code.del_eqn: thm -> theory -> theory| \\
-  \indexml{Code.add\_eqnl}\verb|Code.add_eqnl: string * (thm * bool) list Lazy.T -> theory -> theory| \\
-  \indexml{Code.map\_pre}\verb|Code.map_pre: (MetaSimplifier.simpset -> MetaSimplifier.simpset) -> theory -> theory| \\
-  \indexml{Code.map\_post}\verb|Code.map_post: (MetaSimplifier.simpset -> MetaSimplifier.simpset) -> theory -> theory| \\
+  \indexml{Code.add\_eqnl}\verb|Code.add_eqnl: string * (thm * bool) list lazy -> theory -> theory| \\
+  \indexml{Code.map\_pre}\verb|Code.map_pre: (simpset -> simpset) -> theory -> theory| \\
+  \indexml{Code.map\_post}\verb|Code.map_post: (simpset -> simpset) -> theory -> theory| \\
   \indexml{Code.add\_functrans}\verb|Code.add_functrans: string * (theory -> (thm * bool) list -> (thm * bool) list option)|\isasep\isanewline%
 \verb|    -> theory -> theory| \\
   \indexml{Code.del\_functrans}\verb|Code.del_functrans: string -> theory -> theory| \\
@@ -126,7 +126,7 @@
 \begin{mldecls}
   \indexml{Code\_Unit.read\_const}\verb|Code_Unit.read_const: theory -> string -> string| \\
   \indexml{Code\_Unit.head\_eqn}\verb|Code_Unit.head_eqn: theory -> thm -> string * ((string * sort) list * typ)| \\
-  \indexml{Code\_Unit.rewrite\_eqn}\verb|Code_Unit.rewrite_eqn: MetaSimplifier.simpset -> thm -> thm| \\
+  \indexml{Code\_Unit.rewrite\_eqn}\verb|Code_Unit.rewrite_eqn: simpset -> thm -> thm| \\
   \end{mldecls}
 
   \begin{description}
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/Program.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/Program.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -87,10 +87,10 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}dequeue ::~forall a. Queue a -> (Maybe a, Queue a);\\
-\hspace*{0pt}dequeue (Queue xs (y :~ys)) = (Just y, Queue xs ys);\\
+\hspace*{0pt}dequeue ::~forall a.~Queue a -> (Maybe a,~Queue a);\\
+\hspace*{0pt}dequeue (Queue xs (y :~ys)) = (Just y,~Queue xs ys);\\
 \hspace*{0pt}dequeue (Queue xs []) =\\
-\hspace*{0pt} ~(if nulla xs then (Nothing, Queue [] [])\\
+\hspace*{0pt} ~(if nulla xs then (Nothing,~Queue [] [])\\
 \hspace*{0pt} ~~~else dequeue (Queue [] (rev xs)));%
 \end{isamarkuptext}%
 \isamarkuptrue%
@@ -286,7 +286,7 @@
 \hspace*{0pt} ~neutral ::~a;\\
 \hspace*{0pt}{\char125};\\
 \hspace*{0pt}\\
-\hspace*{0pt}pow ::~forall a. (Monoid a) => Nat -> a -> a;\\
+\hspace*{0pt}pow ::~forall a.~(Monoid a) => Nat -> a -> a;\\
 \hspace*{0pt}pow Zero{\char95}nat a = neutral;\\
 \hspace*{0pt}pow (Suc n) a = mult a (pow n a);\\
 \hspace*{0pt}\\
@@ -346,7 +346,7 @@
 \hspace*{0pt}type 'a semigroup = {\char123}mult :~'a -> 'a -> 'a{\char125};\\
 \hspace*{0pt}fun mult (A{\char95}:'a semigroup) = {\char35}mult A{\char95};\\
 \hspace*{0pt}\\
-\hspace*{0pt}type 'a monoid = {\char123}Program{\char95}{\char95}semigroup{\char95}monoid :~'a semigroup, neutral :~'a{\char125};\\
+\hspace*{0pt}type 'a monoid = {\char123}Program{\char95}{\char95}semigroup{\char95}monoid :~'a semigroup,~neutral :~'a{\char125};\\
 \hspace*{0pt}fun semigroup{\char95}monoid (A{\char95}:'a monoid) = {\char35}Program{\char95}{\char95}semigroup{\char95}monoid A{\char95};\\
 \hspace*{0pt}fun neutral (A{\char95}:'a monoid) = {\char35}neutral A{\char95};\\
 \hspace*{0pt}\\
@@ -356,7 +356,7 @@
 \hspace*{0pt}fun plus{\char95}nat (Suc m) n = plus{\char95}nat m (Suc n)\\
 \hspace*{0pt} ~| plus{\char95}nat Zero{\char95}nat n = n;\\
 \hspace*{0pt}\\
-\hspace*{0pt}val neutral{\char95}nat :~nat = Suc Zero{\char95}nat;\\
+\hspace*{0pt}val neutral{\char95}nat :~nat = Suc Zero{\char95}nat\\
 \hspace*{0pt}\\
 \hspace*{0pt}fun mult{\char95}nat Zero{\char95}nat n = Zero{\char95}nat\\
 \hspace*{0pt} ~| mult{\char95}nat (Suc m) n = plus{\char95}nat n (mult{\char95}nat m n);\\
@@ -364,12 +364,12 @@
 \hspace*{0pt}val semigroup{\char95}nat = {\char123}mult = mult{\char95}nat{\char125}~:~nat semigroup;\\
 \hspace*{0pt}\\
 \hspace*{0pt}val monoid{\char95}nat =\\
-\hspace*{0pt} ~{\char123}Program{\char95}{\char95}semigroup{\char95}monoid = semigroup{\char95}nat, neutral = neutral{\char95}nat{\char125}~:\\
+\hspace*{0pt} ~{\char123}Program{\char95}{\char95}semigroup{\char95}monoid = semigroup{\char95}nat,~neutral = neutral{\char95}nat{\char125}~:\\
 \hspace*{0pt} ~nat monoid;\\
 \hspace*{0pt}\\
 \hspace*{0pt}fun bexp n = pow monoid{\char95}nat n (Suc (Suc Zero{\char95}nat));\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -675,7 +675,7 @@
 \hspace*{0pt} ~| plus{\char95}nat m Zero{\char95}nat = m\\
 \hspace*{0pt} ~| plus{\char95}nat Zero{\char95}nat n = n;\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -794,7 +794,7 @@
 \hspace*{0pt} ~~~~~~~~~~~~else collect{\char95}duplicates A{\char95}~xs (z ::~ys) zs)\\
 \hspace*{0pt} ~~~~~else collect{\char95}duplicates A{\char95}~(z ::~xs) (z ::~ys) zs);\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -918,7 +918,7 @@
 \hspace*{0pt}type 'a eq = {\char123}eq :~'a -> 'a -> bool{\char125};\\
 \hspace*{0pt}fun eq (A{\char95}:'a eq) = {\char35}eq A{\char95};\\
 \hspace*{0pt}\\
-\hspace*{0pt}type 'a ord = {\char123}less{\char95}eq :~'a -> 'a -> bool, less :~'a -> 'a -> bool{\char125};\\
+\hspace*{0pt}type 'a ord = {\char123}less{\char95}eq :~'a -> 'a -> bool,~less :~'a -> 'a -> bool{\char125};\\
 \hspace*{0pt}fun less{\char95}eq (A{\char95}:'a ord) = {\char35}less{\char95}eq A{\char95};\\
 \hspace*{0pt}fun less (A{\char95}:'a ord) = {\char35}less A{\char95};\\
 \hspace*{0pt}\\
@@ -930,16 +930,16 @@
 \hspace*{0pt}type 'a order = {\char123}Orderings{\char95}{\char95}preorder{\char95}order :~'a preorder{\char125};\\
 \hspace*{0pt}fun preorder{\char95}order (A{\char95}:'a order) = {\char35}Orderings{\char95}{\char95}preorder{\char95}order A{\char95};\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun less{\char95}eqa (A1{\char95}, A2{\char95}) B{\char95}~(x1, y1) (x2, y2) =\\
+\hspace*{0pt}fun less{\char95}eqa (A1{\char95},~A2{\char95}) B{\char95}~(x1,~y1) (x2,~y2) =\\
 \hspace*{0pt} ~less ((ord{\char95}preorder o preorder{\char95}order) A2{\char95}) x1 x2 orelse\\
 \hspace*{0pt} ~~~eqop A1{\char95}~x1 x2 andalso\\
 \hspace*{0pt} ~~~~~less{\char95}eq ((ord{\char95}preorder o preorder{\char95}order) B{\char95}) y1 y2\\
-\hspace*{0pt} ~| less{\char95}eqa (A1{\char95}, A2{\char95}) B{\char95}~(x1, y1) (x2, y2) =\\
+\hspace*{0pt} ~| less{\char95}eqa (A1{\char95},~A2{\char95}) B{\char95}~(x1,~y1) (x2,~y2) =\\
 \hspace*{0pt} ~~~less ((ord{\char95}preorder o preorder{\char95}order) A2{\char95}) x1 x2 orelse\\
 \hspace*{0pt} ~~~~~eqop A1{\char95}~x1 x2 andalso\\
 \hspace*{0pt} ~~~~~~~less{\char95}eq ((ord{\char95}preorder o preorder{\char95}order) B{\char95}) y1 y2;\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -1052,10 +1052,10 @@
 \hspace*{0pt} ~| list{\char95}all2 p xs [] = null xs\\
 \hspace*{0pt} ~| list{\char95}all2 p [] ys = null ys;\\
 \hspace*{0pt}\\
-\hspace*{0pt}fun eq{\char95}monotype (Mono (tyco1, typargs1)) (Mono (tyco2, typargs2)) =\\
+\hspace*{0pt}fun eq{\char95}monotype (Mono (tyco1,~typargs1)) (Mono (tyco2,~typargs2)) =\\
 \hspace*{0pt} ~eq{\char95}nat tyco1 tyco2 andalso list{\char95}all2 eq{\char95}monotype typargs1 typargs2;\\
 \hspace*{0pt}\\
-\hspace*{0pt}end; (*struct Example*)%
+\hspace*{0pt}end;~(*struct Example*)%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -1108,12 +1108,12 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}strict{\char95}dequeue ::~forall a. Queue a -> (a, Queue a);\\
-\hspace*{0pt}strict{\char95}dequeue (Queue xs (y :~ys)) = (y, Queue xs ys);\\
+\hspace*{0pt}strict{\char95}dequeue ::~forall a.~Queue a -> (a,~Queue a);\\
+\hspace*{0pt}strict{\char95}dequeue (Queue xs (y :~ys)) = (y,~Queue xs ys);\\
 \hspace*{0pt}strict{\char95}dequeue (Queue xs []) =\\
 \hspace*{0pt} ~let {\char123}\\
 \hspace*{0pt} ~~~(y :~ys) = rev xs;\\
-\hspace*{0pt} ~{\char125}~in (y, Queue [] ys);%
+\hspace*{0pt} ~{\char125}~in (y,~Queue [] ys);%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -1204,13 +1204,13 @@
 \begin{isamarkuptext}%
 \isatypewriter%
 \noindent%
-\hspace*{0pt}empty{\char95}queue ::~forall a. a;\\
+\hspace*{0pt}empty{\char95}queue ::~forall a.~a;\\
 \hspace*{0pt}empty{\char95}queue = error {\char34}empty{\char95}queue{\char34};\\
 \hspace*{0pt}\\
-\hspace*{0pt}strict{\char95}dequeue' ::~forall a. Queue a -> (a, Queue a);\\
+\hspace*{0pt}strict{\char95}dequeue' ::~forall a.~Queue a -> (a,~Queue a);\\
 \hspace*{0pt}strict{\char95}dequeue' (Queue xs []) =\\
 \hspace*{0pt} ~(if nulla xs then empty{\char95}queue else strict{\char95}dequeue' (Queue [] (rev xs)));\\
-\hspace*{0pt}strict{\char95}dequeue' (Queue xs (y :~ys)) = (y, Queue xs ys);%
+\hspace*{0pt}strict{\char95}dequeue' (Queue xs (y :~ys)) = (y,~Queue xs ys);%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
--- a/doc-src/IsarAdvanced/Codegen/Thy/examples/Example.hs	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/examples/Example.hs	Mon Jan 05 07:54:16 2009 -0800
@@ -1,3 +1,5 @@
+{-# OPTIONS_GHC -fglasgow-exts #-}
+
 module Example where {
 
 
--- a/doc-src/IsarAdvanced/Codegen/Thy/examples/example.ML	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Codegen/Thy/examples/example.ML	Mon Jan 05 07:54:16 2009 -0800
@@ -11,7 +11,7 @@
 
 datatype 'a queue = Queue of 'a list * 'a list;
 
-val empty : 'a queue = Queue ([], []);
+val empty : 'a queue = Queue ([], [])
 
 fun dequeue (Queue ([], [])) = (NONE, Queue ([], []))
   | dequeue (Queue (xs, y :: ys)) = (SOME y, Queue (xs, ys))
--- a/doc-src/IsarAdvanced/Functions/Thy/document/Functions.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarAdvanced/Functions/Thy/document/Functions.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -440,7 +440,7 @@
 \isamarkuptrue%
 %
 \begin{isamarkuptext}%
-The the key to this error message is the matrix at the bottom. The rows
+The key to this error message is the matrix at the bottom. The rows
   of that matrix correspond to the different recursive calls (In our
   case, there is just one). The columns are the function's arguments 
   (expressed through different measure functions, which map the
@@ -674,7 +674,7 @@
 {\isacharbar}\ {\isachardoublequoteopen}And\ X\ X\ {\isacharequal}\ X{\isachardoublequoteclose}%
 \begin{isamarkuptext}%
 This definition is useful, because the equations can directly be used
-  as simplification rules rules. But the patterns overlap: For example,
+  as simplification rules. But the patterns overlap: For example,
   the expression \isa{And\ T\ T} is matched by both the first and
   the second equation. By default, Isabelle makes the patterns disjoint by
   splitting them up, producing instances:%
@@ -829,13 +829,21 @@
   either \isa{{\isadigit{0}}}, \isa{{\isadigit{1}}} or \isa{n\ {\isacharplus}\ {\isadigit{2}}}:
 
   \begin{isabelle}%
-\ {\isadigit{1}}{\isachardot}\ {\isasymAnd}P\ x{\isachardot}\ {\isasymlbrakk}x\ {\isacharequal}\ {\isadigit{0}}\ {\isasymLongrightarrow}\ P{\isacharsemicolon}\ x\ {\isacharequal}\ {\isadigit{1}}\ {\isasymLongrightarrow}\ P{\isacharsemicolon}\ {\isasymAnd}n{\isachardot}\ x\ {\isacharequal}\ n\ {\isacharplus}\ {\isadigit{2}}\ {\isasymLongrightarrow}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ P%
+\ {\isadigit{1}}{\isachardot}\ {\isasymAnd}P\ x{\isachardot}\ {\isasymlbrakk}x\ {\isacharequal}\ {\isadigit{0}}\ {\isasymLongrightarrow}\ P{\isacharsemicolon}\ x\ {\isacharequal}\ {\isadigit{1}}\ {\isasymLongrightarrow}\ P{\isacharsemicolon}\ {\isasymAnd}n{\isachardot}\ x\ {\isacharequal}\ n\ {\isacharplus}\ {\isadigit{2}}\ {\isasymLongrightarrow}\ P{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\isanewline
+\ {\isadigit{2}}{\isachardot}\ {\isadigit{0}}\ {\isacharequal}\ {\isadigit{0}}\ {\isasymLongrightarrow}\ {\isadigit{1}}\ {\isacharequal}\ {\isadigit{1}}\isanewline
+\ {\isadigit{3}}{\isachardot}\ {\isadigit{0}}\ {\isacharequal}\ {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isadigit{1}}\ {\isacharequal}\ {\isadigit{1}}\isanewline
+\ {\isadigit{4}}{\isachardot}\ {\isasymAnd}n{\isachardot}\ {\isadigit{0}}\ {\isacharequal}\ n\ {\isacharplus}\ {\isadigit{2}}\ {\isasymLongrightarrow}\ {\isadigit{1}}\ {\isacharequal}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ n\ {\isacharplus}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ {\isacharparenleft}Suc\ n{\isacharparenright}\isanewline
+\ {\isadigit{5}}{\isachardot}\ {\isadigit{1}}\ {\isacharequal}\ {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isadigit{1}}\ {\isacharequal}\ {\isadigit{1}}\isanewline
+\ {\isadigit{6}}{\isachardot}\ {\isasymAnd}n{\isachardot}\ {\isadigit{1}}\ {\isacharequal}\ n\ {\isacharplus}\ {\isadigit{2}}\ {\isasymLongrightarrow}\ {\isadigit{1}}\ {\isacharequal}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ n\ {\isacharplus}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ {\isacharparenleft}Suc\ n{\isacharparenright}\isanewline
+\ {\isadigit{7}}{\isachardot}\ {\isasymAnd}n\ na{\isachardot}\isanewline
+\isaindent{\ {\isadigit{7}}{\isachardot}\ \ \ \ }n\ {\isacharplus}\ {\isadigit{2}}\ {\isacharequal}\ na\ {\isacharplus}\ {\isadigit{2}}\ {\isasymLongrightarrow}\isanewline
+\isaindent{\ {\isadigit{7}}{\isachardot}\ \ \ \ }fib{\isadigit{2}}{\isacharunderscore}sumC\ n\ {\isacharplus}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ {\isacharparenleft}Suc\ n{\isacharparenright}\ {\isacharequal}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ na\ {\isacharplus}\ fib{\isadigit{2}}{\isacharunderscore}sumC\ {\isacharparenleft}Suc\ na{\isacharparenright}%
 \end{isabelle}
 
   This is an arithmetic triviality, but unfortunately the
   \isa{arith} method cannot handle this specific form of an
   elimination rule. However, we can use the method \isa{atomize{\isacharunderscore}elim} to do an ad-hoc conversion to a disjunction of
-  existentials, which can then be soved by the arithmetic decision procedure.
+  existentials, which can then be solved by the arithmetic decision procedure.
   Pattern compatibility and termination are automatic as usual.%
 \end{isamarkuptxt}%
 \isamarkuptrue%
--- a/doc-src/IsarOverview/Isar/Logic.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarOverview/Isar/Logic.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -34,54 +34,51 @@
 be enclosed in double quotes. However, we will continue to do so for
 uniformity.
 
-Trivial proofs, in particular those by assumption, should be trivial
-to perform. Proof ``.'' does just that (and a bit more). Thus
-naming of assumptions is often superfluous: *}
+Instead of applying fact @{text a} via the @{text rule} method, we can
+also push it directly onto our goal.  The proof is then immediate,
+which is formally written as ``.'' in Isar: *}
 lemma "A \<longrightarrow> A"
 proof
-  assume "A"
-  show "A" .
+  assume a: "A"
+  from a show "A" .
 qed
 
-text{* To hide proofs by assumption further, \isakeyword{by}@{text"(method)"}
-first applies @{text method} and then tries to solve all remaining subgoals
-by assumption: *}
+text{* We can also push several facts towards a goal, and put another
+rule in between to establish some result that is one step further
+removed.  We illustrate this by introducing a trivial conjunction: *}
 lemma "A \<longrightarrow> A \<and> A"
 proof
-  assume "A"
-  show "A \<and> A" by(rule conjI)
+  assume a: "A"
+  from a and a show "A \<and> A" by(rule conjI)
 qed
 text{*\noindent Rule @{thm[source]conjI} is of course @{thm conjI}.
-A drawback of implicit proofs by assumption is that it
-is no longer obvious where an assumption is used.
 
 Proofs of the form \isakeyword{by}@{text"(rule"}~\emph{name}@{text")"}
-can be abbreviated to ``..''  if \emph{name} refers to one of the
+can be abbreviated to ``..'' if \emph{name} refers to one of the
 predefined introduction rules (or elimination rules, see below): *}
 
 lemma "A \<longrightarrow> A \<and> A"
 proof
-  assume "A"
-  show "A \<and> A" ..
+  assume a: "A"
+  from a and a show "A \<and> A" ..
 qed
 text{*\noindent
 This is what happens: first the matching introduction rule @{thm[source]conjI}
-is applied (first ``.''), then the two subgoals are solved by assumption
-(second ``.''). *}
+is applied (first ``.''), the remaining problem is solved immediately (second ``.''). *}
 
 subsubsection{*Elimination rules*}
 
 text{*A typical elimination rule is @{thm[source]conjE}, $\land$-elimination:
 @{thm[display,indent=5]conjE}  In the following proof it is applied
 by hand, after its first (\emph{major}) premise has been eliminated via
-@{text"[OF AB]"}: *}
+@{text"[OF ab]"}: *}
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
-  assume AB: "A \<and> B"
+  assume ab: "A \<and> B"
   show "B \<and> A"
-  proof (rule conjE[OF AB])  -- {*@{text"conjE[OF AB]"}: @{thm conjE[OF AB]} *}
-    assume "A" "B"
-    show ?thesis ..
+  proof (rule conjE[OF ab])  -- {*@{text"conjE[OF ab]"}: @{thm conjE[OF ab]} *}
+    assume a: "A" and b: "B"
+    from b and a show ?thesis ..
   qed
 qed
 text{*\noindent Note that the term @{text"?thesis"} always stands for the
@@ -106,11 +103,11 @@
 
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
-  assume AB: "A \<and> B"
-  from AB show "B \<and> A"
+  assume ab: "A \<and> B"
+  from ab show "B \<and> A"
   proof
-    assume "A" "B"
-    show ?thesis ..
+    assume a: "A" and b: "B"
+    from b and a show ?thesis ..
   qed
 qed
 
@@ -120,15 +117,16 @@
 such that the proof of each proposition builds on the previous proposition.
 \end{quote}
 The previous proposition can be referred to via the fact @{text this}.
-This greatly reduces the need for explicit naming of propositions:
+This greatly reduces the need for explicit naming of propositions.  We also
+rearrange the additional inner assumptions into proper order for immediate use:
 *}
 lemma "A \<and> B \<longrightarrow> B \<and> A"
 proof
   assume "A \<and> B"
   from this show "B \<and> A"
   proof
-    assume "A" "B"
-    show ?thesis ..
+    assume "B" "A"
+    from this show ?thesis ..
   qed
 qed
 
@@ -199,11 +197,11 @@
     assume nn: "\<not> (\<not> A \<or> \<not> B)"
     have "\<not> A"
     proof
-      assume "A"
+      assume a: "A"
       have "\<not> B"
       proof
-        assume "B"
-        have "A \<and> B" ..
+        assume b: "B"
+        from a and b have "A \<and> B" ..
         with n show False ..
       qed
       hence "\<not> A \<or> \<not> B" ..
@@ -282,28 +280,28 @@
 \isakeyword{assumes} and \isakeyword{shows} elements which allow direct
 naming of assumptions: *}
 
-lemma assumes AB: "large_A \<and> large_B"
+lemma assumes ab: "large_A \<and> large_B"
   shows "large_B \<and> large_A" (is "?B \<and> ?A")
 proof
-  from AB show "?B" ..
+  from ab show "?B" ..
 next
-  from AB show "?A" ..
+  from ab show "?A" ..
 qed
 text{*\noindent Note the difference between @{text ?AB}, a term, and
-@{text AB}, a fact.
+@{text ab}, a fact.
 
 Finally we want to start the proof with $\land$-elimination so we
 don't have to perform it twice, as above. Here is a slick way to
 achieve this: *}
 
-lemma assumes AB: "large_A \<and> large_B"
+lemma assumes ab: "large_A \<and> large_B"
   shows "large_B \<and> large_A" (is "?B \<and> ?A")
-using AB
+using ab
 proof
-  assume "?A" "?B" show ?thesis ..
+  assume "?B" "?A" thus ?thesis ..
 qed
 text{*\noindent Command \isakeyword{using} can appear before a proof
-and adds further facts to those piped into the proof. Here @{text AB}
+and adds further facts to those piped into the proof. Here @{text ab}
 is the only such fact and it triggers $\land$-elimination. Another
 frequent idiom is as follows:
 \begin{center}
@@ -319,23 +317,23 @@
 not be what we had in mind.
 A simple ``@{text"-"}'' prevents this \emph{faux pas}: *}
 
-lemma assumes AB: "A \<or> B" shows "B \<or> A"
+lemma assumes ab: "A \<or> B" shows "B \<or> A"
 proof -
-  from AB show ?thesis
+  from ab show ?thesis
   proof
-    assume A show ?thesis ..
+    assume A thus ?thesis ..
   next
-    assume B show ?thesis ..
+    assume B thus ?thesis ..
   qed
 qed
 text{*\noindent Alternatively one can feed @{prop"A \<or> B"} directly
 into the proof, thus triggering the elimination rule: *}
-lemma assumes AB: "A \<or> B" shows "B \<or> A"
-using AB
+lemma assumes ab: "A \<or> B" shows "B \<or> A"
+using ab
 proof
-  assume A show ?thesis ..
+  assume A thus ?thesis ..
 next
-  assume B show ?thesis ..
+  assume B thus ?thesis ..
 qed
 text{* \noindent Remember that eliminations have priority over
 introductions.
@@ -416,7 +414,7 @@
   proof              -- "@{thm[source]exE}: @{thm exE}"
     fix x
     assume "P(f x)"
-    show ?thesis ..  -- "@{thm[source]exI}: @{thm exI}"
+    thus ?thesis ..  -- "@{thm[source]exI}: @{thm exI}"
   qed
 qed
 text{*\noindent Explicit $\exists$-elimination as seen above can become
@@ -499,12 +497,12 @@
       assume "y \<in> ?S"
       hence "y \<notin> f y"   by simp
       hence "y \<notin> ?S"    by(simp add: `?S = f y`)
-      thus False         by contradiction
+      with `y \<in> ?S` show False by contradiction
     next
       assume "y \<notin> ?S"
       hence "y \<in> f y"   by simp
       hence "y \<in> ?S"    by(simp add: `?S = f y`)
-      thus False         by contradiction
+      with `y \<notin> ?S` show False by contradiction
     qed
   qed
 qed
--- a/doc-src/IsarOverview/Isar/document/Logic.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/IsarOverview/Isar/document/Logic.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -97,9 +97,9 @@
 be enclosed in double quotes. However, we will continue to do so for
 uniformity.
 
-Trivial proofs, in particular those by assumption, should be trivial
-to perform. Proof ``.'' does just that (and a bit more). Thus
-naming of assumptions is often superfluous:%
+Instead of applying fact \isa{a} via the \isa{rule} method, we can
+also push it directly onto our goal.  The proof is then immediate,
+which is formally written as ``.'' in Isar:%
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
@@ -113,8 +113,9 @@
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
+\ a{\isacharcolon}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ a\ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}A{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{qed}\isamarkupfalse%
@@ -127,9 +128,9 @@
 \endisadelimproof
 %
 \begin{isamarkuptext}%
-To hide proofs by assumption further, \isakeyword{by}\isa{{\isacharparenleft}method{\isacharparenright}}
-first applies \isa{method} and then tries to solve all remaining subgoals
-by assumption:%
+We can also push several facts towards a goal, and put another
+rule in between to establish some result that is one step further
+removed.  We illustrate this by introducing a trivial conjunction:%
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
@@ -143,8 +144,9 @@
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
+\ a{\isacharcolon}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ a\ \isakeyword{and}\ a\ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}A\ {\isasymand}\ A{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
 {\isacharparenleft}rule\ conjI{\isacharparenright}\isanewline
 \isacommand{qed}\isamarkupfalse%
@@ -158,11 +160,9 @@
 %
 \begin{isamarkuptext}%
 \noindent Rule \isa{conjI} is of course \isa{{\isasymlbrakk}{\isacharquery}P{\isacharsemicolon}\ {\isacharquery}Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}P\ {\isasymand}\ {\isacharquery}Q}.
-A drawback of implicit proofs by assumption is that it
-is no longer obvious where an assumption is used.
 
 Proofs of the form \isakeyword{by}\isa{{\isacharparenleft}rule}~\emph{name}\isa{{\isacharparenright}}
-can be abbreviated to ``..''  if \emph{name} refers to one of the
+can be abbreviated to ``..'' if \emph{name} refers to one of the
 predefined introduction rules (or elimination rules, see below):%
 \end{isamarkuptext}%
 \isamarkuptrue%
@@ -177,8 +177,9 @@
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
+\ a{\isacharcolon}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ a\ \isakeyword{and}\ a\ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}A\ {\isasymand}\ A{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{qed}\isamarkupfalse%
@@ -193,8 +194,7 @@
 \begin{isamarkuptext}%
 \noindent
 This is what happens: first the matching introduction rule \isa{conjI}
-is applied (first ``.''), then the two subgoals are solved by assumption
-(second ``.'').%
+is applied (first ``.''), the remaining problem is solved immediately (second ``.'').%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
@@ -208,7 +208,7 @@
 \ \ \ \ \ {\isasymlbrakk}{\isacharquery}P\ {\isasymand}\ {\isacharquery}Q{\isacharsemicolon}\ {\isasymlbrakk}{\isacharquery}P{\isacharsemicolon}\ {\isacharquery}Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}R{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}R%
 \end{isabelle}  In the following proof it is applied
 by hand, after its first (\emph{major}) premise has been eliminated via
-\isa{{\isacharbrackleft}OF\ AB{\isacharbrackright}}:%
+\isa{{\isacharbrackleft}OF\ ab{\isacharbrackright}}:%
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
@@ -222,17 +222,18 @@
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ AB{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\isanewline
+\ ab{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}B\ {\isasymand}\ A{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{proof}\isamarkupfalse%
-\ {\isacharparenleft}rule\ conjE{\isacharbrackleft}OF\ AB{\isacharbrackright}{\isacharparenright}\ \ %
-\isamarkupcmt{\isa{conjE{\isacharbrackleft}OF\ AB{\isacharbrackright}}: \isa{{\isacharparenleft}{\isasymlbrakk}A{\isacharsemicolon}\ B{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}R{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharquery}R}%
+\ {\isacharparenleft}rule\ conjE{\isacharbrackleft}OF\ ab{\isacharbrackright}{\isacharparenright}\ \ %
+\isamarkupcmt{\isa{conjE{\isacharbrackleft}OF\ ab{\isacharbrackright}}: \isa{{\isacharparenleft}{\isasymlbrakk}A{\isacharsemicolon}\ B{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}R{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharquery}R}%
 }
 \isanewline
 \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ a{\isacharcolon}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\ \isakeyword{and}\ b{\isacharcolon}\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{from}\isamarkupfalse%
+\ b\ \isakeyword{and}\ a\ \isacommand{show}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{qed}\isamarkupfalse%
@@ -279,15 +280,16 @@
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ AB{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\isanewline
+\ ab{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{from}\isamarkupfalse%
-\ AB\ \isacommand{show}\isamarkupfalse%
+\ ab\ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}B\ {\isasymand}\ A{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ a{\isacharcolon}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\ \isakeyword{and}\ b{\isacharcolon}\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{from}\isamarkupfalse%
+\ b\ \isakeyword{and}\ a\ \isacommand{show}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{qed}\isamarkupfalse%
@@ -308,7 +310,8 @@
 such that the proof of each proposition builds on the previous proposition.
 \end{quote}
 The previous proposition can be referred to via the fact \isa{this}.
-This greatly reduces the need for explicit naming of propositions:%
+This greatly reduces the need for explicit naming of propositions.  We also
+rearrange the additional inner assumptions into proper order for immediate use:%
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
@@ -329,8 +332,9 @@
 \ \ \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{from}\isamarkupfalse%
+\ this\ \isacommand{show}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{qed}\isamarkupfalse%
@@ -455,14 +459,15 @@
 \ \ \ \ \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
+\ a{\isacharcolon}\ {\isachardoublequoteopen}A{\isachardoublequoteclose}\isanewline
 \ \ \ \ \ \ \isacommand{have}\isamarkupfalse%
 \ {\isachardoublequoteopen}{\isasymnot}\ B{\isachardoublequoteclose}\isanewline
 \ \ \ \ \ \ \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \ \ \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
-\ \ \ \ \ \ \ \ \isacommand{have}\isamarkupfalse%
+\ b{\isacharcolon}\ {\isachardoublequoteopen}B{\isachardoublequoteclose}\isanewline
+\ \ \ \ \ \ \ \ \isacommand{from}\isamarkupfalse%
+\ a\ \isakeyword{and}\ b\ \isacommand{have}\isamarkupfalse%
 \ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \ \ \ \ \ \ \isacommand{with}\isamarkupfalse%
@@ -622,7 +627,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
-\ \isakeyword{assumes}\ AB{\isacharcolon}\ {\isachardoublequoteopen}large{\isacharunderscore}A\ {\isasymand}\ large{\isacharunderscore}B{\isachardoublequoteclose}\isanewline
+\ \isakeyword{assumes}\ ab{\isacharcolon}\ {\isachardoublequoteopen}large{\isacharunderscore}A\ {\isasymand}\ large{\isacharunderscore}B{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{shows}\ {\isachardoublequoteopen}large{\isacharunderscore}B\ {\isasymand}\ large{\isacharunderscore}A{\isachardoublequoteclose}\ {\isacharparenleft}\isakeyword{is}\ {\isachardoublequoteopen}{\isacharquery}B\ {\isasymand}\ {\isacharquery}A{\isachardoublequoteclose}{\isacharparenright}\isanewline
 %
 \isadelimproof
@@ -633,13 +638,13 @@
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{from}\isamarkupfalse%
-\ AB\ \isacommand{show}\isamarkupfalse%
+\ ab\ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}{\isacharquery}B{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{next}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{from}\isamarkupfalse%
-\ AB\ \isacommand{show}\isamarkupfalse%
+\ ab\ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}{\isacharquery}A{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{qed}\isamarkupfalse%
@@ -653,7 +658,7 @@
 %
 \begin{isamarkuptext}%
 \noindent Note the difference between \isa{{\isacharquery}AB}, a term, and
-\isa{AB}, a fact.
+\isa{ab}, a fact.
 
 Finally we want to start the proof with $\land$-elimination so we
 don't have to perform it twice, as above. Here is a slick way to
@@ -661,7 +666,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
-\ \isakeyword{assumes}\ AB{\isacharcolon}\ {\isachardoublequoteopen}large{\isacharunderscore}A\ {\isasymand}\ large{\isacharunderscore}B{\isachardoublequoteclose}\isanewline
+\ \isakeyword{assumes}\ ab{\isacharcolon}\ {\isachardoublequoteopen}large{\isacharunderscore}A\ {\isasymand}\ large{\isacharunderscore}B{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{shows}\ {\isachardoublequoteopen}large{\isacharunderscore}B\ {\isasymand}\ large{\isacharunderscore}A{\isachardoublequoteclose}\ {\isacharparenleft}\isakeyword{is}\ {\isachardoublequoteopen}{\isacharquery}B\ {\isasymand}\ {\isacharquery}A{\isachardoublequoteclose}{\isacharparenright}\isanewline
 %
 \isadelimproof
@@ -670,11 +675,11 @@
 %
 \isatagproof
 \isacommand{using}\isamarkupfalse%
-\ AB\isanewline
+\ ab\isanewline
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isacharquery}A{\isachardoublequoteclose}\ {\isachardoublequoteopen}{\isacharquery}B{\isachardoublequoteclose}\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isacharquery}B{\isachardoublequoteclose}\ {\isachardoublequoteopen}{\isacharquery}A{\isachardoublequoteclose}\ \isacommand{thus}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{qed}\isamarkupfalse%
@@ -688,7 +693,7 @@
 %
 \begin{isamarkuptext}%
 \noindent Command \isakeyword{using} can appear before a proof
-and adds further facts to those piped into the proof. Here \isa{AB}
+and adds further facts to those piped into the proof. Here \isa{ab}
 is the only such fact and it triggers $\land$-elimination. Another
 frequent idiom is as follows:
 \begin{center}
@@ -706,7 +711,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
-\ \isakeyword{assumes}\ AB{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isakeyword{shows}\ {\isachardoublequoteopen}B\ {\isasymor}\ A{\isachardoublequoteclose}\isanewline
+\ \isakeyword{assumes}\ ab{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isakeyword{shows}\ {\isachardoublequoteopen}B\ {\isasymor}\ A{\isachardoublequoteclose}\isanewline
 %
 \isadelimproof
 %
@@ -716,18 +721,18 @@
 \isacommand{proof}\isamarkupfalse%
 \ {\isacharminus}\isanewline
 \ \ \isacommand{from}\isamarkupfalse%
-\ AB\ \isacommand{show}\isamarkupfalse%
+\ ab\ \isacommand{show}\isamarkupfalse%
 \ {\isacharquery}thesis\isanewline
 \ \ \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ A\ \isacommand{show}\isamarkupfalse%
+\ A\ \isacommand{thus}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{next}\isamarkupfalse%
 \isanewline
 \ \ \ \ \isacommand{assume}\isamarkupfalse%
-\ B\ \isacommand{show}\isamarkupfalse%
+\ B\ \isacommand{thus}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{qed}\isamarkupfalse%
@@ -747,7 +752,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{lemma}\isamarkupfalse%
-\ \isakeyword{assumes}\ AB{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isakeyword{shows}\ {\isachardoublequoteopen}B\ {\isasymor}\ A{\isachardoublequoteclose}\isanewline
+\ \isakeyword{assumes}\ ab{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isakeyword{shows}\ {\isachardoublequoteopen}B\ {\isasymor}\ A{\isachardoublequoteclose}\isanewline
 %
 \isadelimproof
 %
@@ -755,17 +760,17 @@
 %
 \isatagproof
 \isacommand{using}\isamarkupfalse%
-\ AB\isanewline
+\ ab\isanewline
 \isacommand{proof}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ A\ \isacommand{show}\isamarkupfalse%
+\ A\ \isacommand{thus}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{next}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{assume}\isamarkupfalse%
-\ B\ \isacommand{show}\isamarkupfalse%
+\ B\ \isacommand{thus}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \isanewline
 \isacommand{qed}\isamarkupfalse%
@@ -945,7 +950,7 @@
 \ x\isanewline
 \ \ \ \ \isacommand{assume}\isamarkupfalse%
 \ {\isachardoublequoteopen}P{\isacharparenleft}f\ x{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ \ \ \ \isacommand{thus}\isamarkupfalse%
 \ {\isacharquery}thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
 \ \ %
 \isamarkupcmt{\isa{exI}: \isa{{\isacharquery}P\ {\isacharquery}x\ {\isasymLongrightarrow}\ {\isasymexists}x{\isachardot}\ {\isacharquery}P\ x}%
@@ -1155,8 +1160,9 @@
 \ \ \ \ \ \ \isacommand{hence}\isamarkupfalse%
 \ {\isachardoublequoteopen}y\ {\isasymnotin}\ {\isacharquery}S{\isachardoublequoteclose}\ \ \ \ \isacommand{by}\isamarkupfalse%
 {\isacharparenleft}simp\ add{\isacharcolon}\ {\isacharbackquoteopen}{\isacharquery}S\ {\isacharequal}\ f\ y{\isacharbackquoteclose}{\isacharparenright}\isanewline
-\ \ \ \ \ \ \isacommand{thus}\isamarkupfalse%
-\ False\ \ \ \ \ \ \ \ \ \isacommand{by}\isamarkupfalse%
+\ \ \ \ \ \ \isacommand{with}\isamarkupfalse%
+\ {\isacharbackquoteopen}y\ {\isasymin}\ {\isacharquery}S{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ False\ \isacommand{by}\isamarkupfalse%
 \ contradiction\isanewline
 \ \ \ \ \isacommand{next}\isamarkupfalse%
 \isanewline
@@ -1168,8 +1174,9 @@
 \ \ \ \ \ \ \isacommand{hence}\isamarkupfalse%
 \ {\isachardoublequoteopen}y\ {\isasymin}\ {\isacharquery}S{\isachardoublequoteclose}\ \ \ \ \isacommand{by}\isamarkupfalse%
 {\isacharparenleft}simp\ add{\isacharcolon}\ {\isacharbackquoteopen}{\isacharquery}S\ {\isacharequal}\ f\ y{\isacharbackquoteclose}{\isacharparenright}\isanewline
-\ \ \ \ \ \ \isacommand{thus}\isamarkupfalse%
-\ False\ \ \ \ \ \ \ \ \ \isacommand{by}\isamarkupfalse%
+\ \ \ \ \ \ \isacommand{with}\isamarkupfalse%
+\ {\isacharbackquoteopen}y\ {\isasymnotin}\ {\isacharquery}S{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ False\ \isacommand{by}\isamarkupfalse%
 \ contradiction\isanewline
 \ \ \ \ \isacommand{qed}\isamarkupfalse%
 \isanewline
--- a/doc-src/LaTeXsugar/Sugar/document/Sugar.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/LaTeXsugar/Sugar/document/Sugar.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -148,7 +148,7 @@
 \begin{isamarkuptext}%
 If you print anything, especially theorems, containing
 schematic variables they are prefixed with a question mark:
-\verb!@!\verb!{thm conjI}! results in \isa{{\isasymlbrakk}{\isacharquery}P{\isacharsemicolon}\ {\isacharquery}Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}P\ {\isasymand}\ {\isacharquery}Q}. Most of the time
+\verb!@!\verb!{thm conjI}! results in \isa{{\isasymlbrakk}P{\isacharsemicolon}\ Q{\isasymrbrakk}\ {\isasymLongrightarrow}\ P\ {\isasymand}\ Q}. Most of the time
 you would rather not see the question marks. There is an attribute
 \verb!no_vars! that you can attach to the theorem that turns its
 schematic into ordinary free variables: \verb!@!\verb!{thm conjI[no_vars]}!
--- a/doc-src/Locales/Locales/Examples.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/Examples.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -610,7 +610,7 @@
   Changes to the locale hierarchy may be declared
   with the \isakeyword{interpretation} command. *}
 
-  interpretation %visible total_order \<subseteq> lattice
+  sublocale %visible total_order \<subseteq> lattice
 
 txt {* This enters the context of locale @{text total_order}, in
   which the goal @{subgoals [display]} must be shown.  First, the
@@ -652,7 +652,7 @@
 
 text {* Similarly, total orders are distributive lattices. *}
 
-  interpretation total_order \<subseteq> distrib_lattice
+  sublocale total_order \<subseteq> distrib_lattice
   proof unfold_locales
     fix %"proof" x y z
     show "x \<sqinter> (y \<squnion> z) = x \<sqinter> y \<squnion> x \<sqinter> z" (is "?l = ?r")
--- a/doc-src/Locales/Locales/Examples1.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/Examples1.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -45,7 +45,7 @@
   @{text partial_order} in the global context of the theory.  The
   parameter @{term le} is replaced by @{term "op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"}. *} 
 
-  interpretation %visible nat: partial_order ["op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+  interpretation %visible nat: partial_order "op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"
 txt {* The locale name is succeeded by a \emph{parameter
   instantiation}.  In general, this is a list of terms, which refer to
   the parameters in the order of declaration in the locale.  The
--- a/doc-src/Locales/Locales/Examples2.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/Examples2.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -9,7 +9,7 @@
   \isakeyword{where} and require proofs.  The revised command,
   replacing @{text "\<sqsubset>"} by @{text "<"}, is: *}
 
-interpretation %visible nat: partial_order ["op \<le> :: [nat, nat] \<Rightarrow> bool"]
+interpretation %visible nat: partial_order "op \<le> :: [nat, nat] \<Rightarrow> bool"
   where "partial_order.less op \<le> (x::nat) y = (x < y)"
 proof -
   txt {* The goals are @{subgoals [display]}
--- a/doc-src/Locales/Locales/Examples3.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/Examples3.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -16,12 +16,12 @@
   \isakeyword{interpret}).  This interpretation is inside the proof of the global
   interpretation.  The third revision of the example illustrates this.  *}
 
-interpretation %visible nat: partial_order ["op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+interpretation %visible nat: partial_order "op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"
   where "partial_order.less (op \<le>) (x::nat) y = (x < y)"
 proof -
   show "partial_order (op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool)"
     by unfold_locales auto
-  then interpret nat: partial_order ["op \<le> :: [nat, nat] \<Rightarrow> bool"] .
+  then interpret nat: partial_order "op \<le> :: [nat, nat] \<Rightarrow> bool" .
   show "partial_order.less (op \<le>) (x::nat) y = (x < y)"
     unfolding nat.less_def by auto
 qed
@@ -48,7 +48,7 @@
   interpretation is reproduced in order to give an example of a more
   elaborate interpretation proof.  *}
 
-interpretation %visible nat: lattice ["op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+interpretation %visible nat: lattice "op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"
   where "lattice.meet op \<le> (x::nat) y = min x y"
     and "lattice.join op \<le> (x::nat) y = max x y"
 proof -
@@ -63,7 +63,7 @@
     by arith+
   txt {* In order to show the equations, we put ourselves in a
     situation where the lattice theorems can be used in a convenient way. *}
-  then interpret nat: lattice ["op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"] .
+  then interpret nat: lattice "op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool" .
   show "lattice.meet op \<le> (x::nat) y = min x y"
     by (bestsimp simp: nat.meet_def nat.is_inf_def)
   show "lattice.join op \<le> (x::nat) y = max x y"
@@ -73,7 +73,7 @@
 text {* That the relation @{text \<le>} is a total order completes this
   sequence of interpretations. *}
 
-interpretation %visible nat: total_order ["op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+interpretation %visible nat: total_order "op \<le> :: nat \<Rightarrow> nat \<Rightarrow> bool"
   by unfold_locales arith
 
 text {* Theorems that are available in the theory at this point are shown in
@@ -130,12 +130,12 @@
   but not a total order.  Interpretation again proceeds
   incrementally. *}
 
-interpretation nat_dvd: partial_order ["op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+interpretation nat_dvd: partial_order "op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"
   where "partial_order.less op dvd (x::nat) y = (x dvd y \<and> x \<noteq> y)"
 proof -
   show "partial_order (op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool)"
     by unfold_locales (auto simp: dvd_def)
-  then interpret nat_dvd: partial_order ["op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"] .
+  then interpret nat_dvd: partial_order "op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool" .
   show "partial_order.less op dvd (x::nat) y = (x dvd y \<and> x \<noteq> y)"
     apply (unfold nat_dvd.less_def)
     apply auto
@@ -145,7 +145,7 @@
 text {* Note that there is no symbol for strict divisibility.  Instead,
   interpretation substitutes @{term "x dvd y \<and> x \<noteq> y"}.   *}
 
-interpretation nat_dvd: lattice ["op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+interpretation nat_dvd: lattice "op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"
   where nat_dvd_meet_eq:
       "lattice.meet op dvd = gcd"
     and nat_dvd_join_eq:
@@ -159,7 +159,7 @@
     apply (rule_tac x = "lcm x y" in exI)
     apply (auto intro: lcm_dvd1 lcm_dvd2 lcm_least)
     done
-  then interpret nat_dvd: lattice ["op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"] .
+  then interpret nat_dvd: lattice "op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool" .
   show "lattice.meet op dvd = gcd"
     apply (auto simp add: expand_fun_eq)
     apply (unfold nat_dvd.meet_def)
@@ -203,7 +203,7 @@
 ML %invisible {* reset quick_and_dirty *}
   
 interpretation %visible nat_dvd:
-  distrib_lattice ["op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"]
+  distrib_lattice "op dvd :: nat \<Rightarrow> nat \<Rightarrow> bool"
   apply unfold_locales
   txt {* @{subgoals [display]} *}
   apply (unfold nat_dvd_meet_eq nat_dvd_join_eq)
@@ -262,7 +262,7 @@
   preserving maps can be declared in the following way.  *}
 
   locale order_preserving =
-    partial_order + partial_order le' (infixl "\<preceq>" 50) +
+    partial_order + po': partial_order le' for le' (infixl "\<preceq>" 50) +
     fixes \<phi> :: "'a \<Rightarrow> 'b"
     assumes hom_le: "x \<sqsubseteq> y \<Longrightarrow> \<phi> x \<preceq> \<phi> y"
 
@@ -288,7 +288,8 @@
   obtained by appending the conclusions of the left locale and of the
   right locale.  *}
 
-text {* The locale @{text order_preserving} contains theorems for both
+text {* % FIXME needs update
+  The locale @{text order_preserving} contains theorems for both
   orders @{text \<sqsubseteq>} and @{text \<preceq>}.  How can one refer to a theorem for
   a particular order, @{text \<sqsubseteq>} or @{text \<preceq>}?  Names in locales are
   qualified by the locale parameters.  More precisely, a name is
@@ -297,20 +298,21 @@
 
 context %invisible order_preserving begin
 
-text {*
-  @{thm [source] le.less_le_trans}: @{thm le.less_le_trans}
+text {* % FIXME needs update?
+  @{thm [source] less_le_trans}: @{thm less_le_trans}
 
-  @{thm [source] le_le'_\<phi>.hom_le}: @{thm le_le'_\<phi>.hom_le}
+  @{thm [source] hom_le}: @{thm hom_le}
   *}
 
 text {* When renaming a locale, the morphism is also applied
   to the qualifiers.  Hence theorems for the partial order @{text \<preceq>}
   are qualified by @{text le'}.  For example, @{thm [source]
-  le'.less_le_trans}: @{thm [display, indent=2] le'.less_le_trans} *}
+  po'.less_le_trans}: @{thm [display, indent=2] po'.less_le_trans} *}
 
 end %invisible
 
-text {* This example reveals that there is no infix syntax for the strict
+text {* % FIXME needs update?
+  This example reveals that there is no infix syntax for the strict
   version of @{text \<preceq>}!  This can, of course, not be introduced
   automatically, but it can be declared manually through an abbreviation.
   *}
@@ -319,7 +321,7 @@
     less' (infixl "\<prec>" 50) where "less' \<equiv> partial_order.less le'"
 
 text {* Now the theorem is displayed nicely as
-  @{thm [locale=order_preserving] le'.less_le_trans}.  *}
+  @{thm [locale=order_preserving] po'.less_le_trans}.  *}
 
 text {* Not only names of theorems are qualified.  In fact, all names
   are qualified, in particular names introduced by definitions and
@@ -331,7 +333,7 @@
 text {* Two more locales illustrate working with locale expressions.
   A map @{text \<phi>} is a lattice homomorphism if it preserves meet and join. *}
 
-  locale lattice_hom = lattice + lattice le' (infixl "\<preceq>" 50) +
+  locale lattice_hom = lattice + lat'!: lattice le' for le' (infixl "\<preceq>" 50) +
     fixes \<phi>
     assumes hom_meet:
 	"\<phi> (lattice.meet le x y) = lattice.meet le' (\<phi> x) (\<phi> y)"
@@ -339,14 +341,14 @@
 	"\<phi> (lattice.join le x y) = lattice.join le' (\<phi> x) (\<phi> y)"
 
   abbreviation (in lattice_hom)
-    meet' (infixl "\<sqinter>''" 50) where "meet' \<equiv> le'.meet"
+    meet' (infixl "\<sqinter>''" 50) where "meet' \<equiv> lat'.meet"
   abbreviation (in lattice_hom)
-    join' (infixl "\<squnion>''" 50) where "join' \<equiv> le'.join"
+    join' (infixl "\<squnion>''" 50) where "join' \<equiv> lat'.join"
 
 text {* A homomorphism is an endomorphism if both orders coincide. *}
 
   locale lattice_end =
-    lattice_hom le (infixl "\<sqsubseteq>" 50) le (infixl "\<sqsubseteq>" 50)
+    lattice_hom le le for le (infixl "\<sqsubseteq>" 50)
 
 text {* The inheritance diagram of the situation we have now is shown
   in Figure~\ref{fig:hom}, where the dashed line depicts an
@@ -395,20 +397,20 @@
   preserving.  As the final example of this section, a locale
   interpretation is used to assert this. *}
 
-  interpretation lattice_hom \<subseteq> order_preserving proof unfold_locales
+  sublocale lattice_hom \<subseteq> order_preserving proof unfold_locales
     fix x y
     assume "x \<sqsubseteq> y"
-    then have "y = (x \<squnion> y)" by (simp add: le.join_connection)
+    then have "y = (x \<squnion> y)" by (simp add: join_connection)
     then have "\<phi> y = (\<phi> x \<squnion>' \<phi> y)" by (simp add: hom_join [symmetric])
-    then show "\<phi> x \<preceq> \<phi> y" by (simp add: le'.join_connection)
+    then show "\<phi> x \<preceq> \<phi> y" by (simp add: lat'.join_connection)
   qed
 
 text {* Theorems and other declarations --- syntax, in particular ---
   from the locale @{text order_preserving} are now active in @{text
   lattice_hom}, for example
 
-  @{thm [locale=lattice_hom, source] le'.less_le_trans}:
-  @{thm [locale=lattice_hom] le'.less_le_trans}
+  @{thm [locale=lattice_hom, source] lat'.less_le_trans}:
+  @{thm [locale=lattice_hom] lat'.less_le_trans}
   *}
 
 
--- a/doc-src/Locales/Locales/document/Examples.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/document/Examples.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -1222,7 +1222,7 @@
 \endisadelimvisible
 %
 \isatagvisible
-\isacommand{interpretation}\isamarkupfalse%
+\isacommand{sublocale}\isamarkupfalse%
 \ total{\isacharunderscore}order\ {\isasymsubseteq}\ lattice%
 \begin{isamarkuptxt}%
 This enters the context of locale \isa{total{\isacharunderscore}order}, in
@@ -1325,7 +1325,7 @@
 Similarly, total orders are distributive lattices.%
 \end{isamarkuptext}%
 \isamarkuptrue%
-\ \ \isacommand{interpretation}\isamarkupfalse%
+\ \ \isacommand{sublocale}\isamarkupfalse%
 \ total{\isacharunderscore}order\ {\isasymsubseteq}\ distrib{\isacharunderscore}lattice\isanewline
 %
 \isadelimproof
--- a/doc-src/Locales/Locales/document/Examples1.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/document/Examples1.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -74,7 +74,7 @@
 %
 \isatagvisible
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}%
+\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}%
 \begin{isamarkuptxt}%
 The locale name is succeeded by a \emph{parameter
   instantiation}.  In general, this is a list of terms, which refer to
--- a/doc-src/Locales/Locales/document/Examples2.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/document/Examples2.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -34,7 +34,7 @@
 %
 \isatagvisible
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ {\isacharbrackleft}nat{\isacharcomma}\ nat{\isacharbrackright}\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ {\isacharbrackleft}nat{\isacharcomma}\ nat{\isacharbrackright}\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{where}\ {\isachardoublequoteopen}partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasymle}\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ {\isacharparenleft}x\ {\isacharless}\ y{\isacharparenright}{\isachardoublequoteclose}\isanewline
 \isacommand{proof}\isamarkupfalse%
 \ {\isacharminus}%
--- a/doc-src/Locales/Locales/document/Examples3.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/Locales/Locales/document/Examples3.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -43,7 +43,7 @@
 %
 \isatagvisible
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{where}\ {\isachardoublequoteopen}partial{\isacharunderscore}order{\isachardot}less\ {\isacharparenleft}op\ {\isasymle}{\isacharparenright}\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ {\isacharparenleft}x\ {\isacharless}\ y{\isacharparenright}{\isachardoublequoteclose}\isanewline
 \isacommand{proof}\isamarkupfalse%
 \ {\isacharminus}\isanewline
@@ -53,7 +53,7 @@
 \ unfold{\isacharunderscore}locales\ auto\isanewline
 \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{interpret}\isamarkupfalse%
-\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ {\isacharbrackleft}nat{\isacharcomma}\ nat{\isacharbrackright}\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\ \isacommand{{\isachardot}}\isamarkupfalse%
+\ nat{\isacharcolon}\ partial{\isacharunderscore}order\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ {\isacharbrackleft}nat{\isacharcomma}\ nat{\isacharbrackright}\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}partial{\isacharunderscore}order{\isachardot}less\ {\isacharparenleft}op\ {\isasymle}{\isacharparenright}\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ {\isacharparenleft}x\ {\isacharless}\ y{\isacharparenright}{\isachardoublequoteclose}\isanewline
@@ -91,8 +91,8 @@
 \begin{isamarkuptext}%
 Further interpretations are necessary to reuse theorems from
   the other locales.  In \isa{lattice} the operations \isa{{\isasymsqinter}} and
-  \isa{{\isasymsqunion}} are substituted by \isa{ord{\isacharunderscore}class{\isachardot}min} and
-  \isa{ord{\isacharunderscore}class{\isachardot}max}.  The entire proof for the
+  \isa{{\isasymsqunion}} are substituted by \isa{min} and
+  \isa{max}.  The entire proof for the
   interpretation is reproduced in order to give an example of a more
   elaborate interpretation proof.%
 \end{isamarkuptext}%
@@ -104,7 +104,7 @@
 %
 \isatagvisible
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharcolon}\ lattice\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ nat{\isacharcolon}\ lattice\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{where}\ {\isachardoublequoteopen}lattice{\isachardot}meet\ op\ {\isasymle}\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ min\ x\ y{\isachardoublequoteclose}\isanewline
 \ \ \ \ \isakeyword{and}\ {\isachardoublequoteopen}lattice{\isachardot}join\ op\ {\isasymle}\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ max\ x\ y{\isachardoublequoteclose}\isanewline
 \isacommand{proof}\isamarkupfalse%
@@ -143,7 +143,7 @@
 \isamarkuptrue%
 \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{interpret}\isamarkupfalse%
-\ nat{\isacharcolon}\ lattice\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\ \isacommand{{\isachardot}}\isamarkupfalse%
+\ nat{\isacharcolon}\ lattice\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}lattice{\isachardot}meet\ op\ {\isasymle}\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ min\ x\ y{\isachardoublequoteclose}\isanewline
@@ -174,7 +174,7 @@
 %
 \isatagvisible
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharcolon}\ total{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ nat{\isacharcolon}\ total{\isacharunderscore}order\ {\isachardoublequoteopen}op\ {\isasymle}\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{by}\isamarkupfalse%
 \ unfold{\isacharunderscore}locales\ arith%
 \endisatagvisible
@@ -196,11 +196,11 @@
   \isa{nat{\isachardot}less{\isacharunderscore}def} from locale \isa{partial{\isacharunderscore}order}: \\
   \quad \isa{{\isacharparenleft}{\isacharquery}x\ {\isacharless}\ {\isacharquery}y{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}{\isacharquery}x\ {\isasymle}\ {\isacharquery}y\ {\isasymand}\ {\isacharquery}x\ {\isasymnoteq}\ {\isacharquery}y{\isacharparenright}} \\
   \isa{nat{\isachardot}meet{\isacharunderscore}left} from locale \isa{lattice}: \\
-  \quad \isa{ord{\isacharunderscore}class{\isachardot}min\ {\isacharquery}x\ {\isacharquery}y\ {\isasymle}\ {\isacharquery}x} \\
+  \quad \isa{min\ {\isacharquery}x\ {\isacharquery}y\ {\isasymle}\ {\isacharquery}x} \\
   \isa{nat{\isachardot}join{\isacharunderscore}distr} from locale \isa{distrib{\isacharunderscore}lattice}: \\
-  \quad \isa{ord{\isacharunderscore}class{\isachardot}max\ {\isacharquery}x\ {\isacharparenleft}ord{\isacharunderscore}class{\isachardot}min\ {\isacharquery}y\ {\isacharquery}z{\isacharparenright}\ {\isacharequal}\ ord{\isacharunderscore}class{\isachardot}min\ {\isacharparenleft}ord{\isacharunderscore}class{\isachardot}max\ {\isacharquery}x\ {\isacharquery}y{\isacharparenright}\ {\isacharparenleft}ord{\isacharunderscore}class{\isachardot}max\ {\isacharquery}x\ {\isacharquery}z{\isacharparenright}} \\
+  \quad \isa{lattice{\isachardot}join\ op\ {\isasymle}\ {\isacharquery}x\ {\isacharparenleft}lattice{\isachardot}meet\ op\ {\isasymle}\ {\isacharquery}y\ {\isacharquery}z{\isacharparenright}\ {\isacharequal}\ lattice{\isachardot}meet\ op\ {\isasymle}\ {\isacharparenleft}lattice{\isachardot}join\ op\ {\isasymle}\ {\isacharquery}x\ {\isacharquery}y{\isacharparenright}\ {\isacharparenleft}lattice{\isachardot}join\ op\ {\isasymle}\ {\isacharquery}x\ {\isacharquery}z{\isacharparenright}} \\
   \isa{nat{\isachardot}less{\isacharunderscore}total} from locale \isa{total{\isacharunderscore}order}: \\
-  \quad \isa{{\isacharquery}x\ {\isacharless}\ {\isacharquery}y\ {\isasymor}\ {\isacharquery}x\ {\isacharequal}\ {\isacharquery}y\ {\isasymor}\ {\isacharquery}y\ {\isacharless}\ {\isacharquery}x}
+  \quad \isa{partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasymle}\ {\isacharquery}x\ {\isacharquery}y\ {\isasymor}\ {\isacharquery}x\ {\isacharequal}\ {\isacharquery}y\ {\isasymor}\ partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasymle}\ {\isacharquery}y\ {\isacharquery}x}
 \end{tabular}
 \end{center}
 \hrule
@@ -244,7 +244,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharunderscore}dvd{\isacharcolon}\ partial{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ nat{\isacharunderscore}dvd{\isacharcolon}\ partial{\isacharunderscore}order\ {\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{where}\ {\isachardoublequoteopen}partial{\isacharunderscore}order{\isachardot}less\ op\ dvd\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ {\isacharparenleft}x\ dvd\ y\ {\isasymand}\ x\ {\isasymnoteq}\ y{\isacharparenright}{\isachardoublequoteclose}\isanewline
 %
 \isadelimproof
@@ -260,7 +260,7 @@
 \ unfold{\isacharunderscore}locales\ {\isacharparenleft}auto\ simp{\isacharcolon}\ dvd{\isacharunderscore}def{\isacharparenright}\isanewline
 \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{interpret}\isamarkupfalse%
-\ nat{\isacharunderscore}dvd{\isacharcolon}\ partial{\isacharunderscore}order\ {\isacharbrackleft}{\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\ \isacommand{{\isachardot}}\isamarkupfalse%
+\ nat{\isacharunderscore}dvd{\isacharcolon}\ partial{\isacharunderscore}order\ {\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}partial{\isacharunderscore}order{\isachardot}less\ op\ dvd\ {\isacharparenleft}x{\isacharcolon}{\isacharcolon}nat{\isacharparenright}\ y\ {\isacharequal}\ {\isacharparenleft}x\ dvd\ y\ {\isasymand}\ x\ {\isasymnoteq}\ y{\isacharparenright}{\isachardoublequoteclose}\isanewline
@@ -285,7 +285,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \isacommand{interpretation}\isamarkupfalse%
-\ nat{\isacharunderscore}dvd{\isacharcolon}\ lattice\ {\isacharbrackleft}{\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ nat{\isacharunderscore}dvd{\isacharcolon}\ lattice\ {\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isakeyword{where}\ nat{\isacharunderscore}dvd{\isacharunderscore}meet{\isacharunderscore}eq{\isacharcolon}\isanewline
 \ \ \ \ \ \ {\isachardoublequoteopen}lattice{\isachardot}meet\ op\ dvd\ {\isacharequal}\ gcd{\isachardoublequoteclose}\isanewline
 \ \ \ \ \isakeyword{and}\ nat{\isacharunderscore}dvd{\isacharunderscore}join{\isacharunderscore}eq{\isacharcolon}\isanewline
@@ -316,7 +316,7 @@
 \isanewline
 \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{interpret}\isamarkupfalse%
-\ nat{\isacharunderscore}dvd{\isacharcolon}\ lattice\ {\isacharbrackleft}{\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\ \isacommand{{\isachardot}}\isamarkupfalse%
+\ nat{\isacharunderscore}dvd{\isacharcolon}\ lattice\ {\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
 \isanewline
 \ \ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}lattice{\isachardot}meet\ op\ dvd\ {\isacharequal}\ gcd{\isachardoublequoteclose}\isanewline
@@ -390,7 +390,7 @@
 \isatagvisible
 \isacommand{interpretation}\isamarkupfalse%
 \ nat{\isacharunderscore}dvd{\isacharcolon}\isanewline
-\ \ distrib{\isacharunderscore}lattice\ {\isacharbrackleft}{\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}{\isacharbrackright}\isanewline
+\ \ distrib{\isacharunderscore}lattice\ {\isachardoublequoteopen}op\ dvd\ {\isacharcolon}{\isacharcolon}\ nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ bool{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{apply}\isamarkupfalse%
 \ unfold{\isacharunderscore}locales%
 \begin{isamarkuptxt}%
@@ -434,7 +434,7 @@
   \isa{nat{\isacharunderscore}dvd{\isachardot}meet{\isacharunderscore}left} from locale \isa{lattice}: \\
   \quad \isa{gcd\ {\isacharquery}x\ {\isacharquery}y\ dvd\ {\isacharquery}x} \\
   \isa{nat{\isacharunderscore}dvd{\isachardot}join{\isacharunderscore}distr} from locale \isa{distrib{\isacharunderscore}lattice}: \\
-  \quad \isa{lcm\ {\isacharquery}x\ {\isacharparenleft}gcd\ {\isacharquery}y\ {\isacharquery}z{\isacharparenright}\ {\isacharequal}\ gcd\ {\isacharparenleft}lcm\ {\isacharquery}x\ {\isacharquery}y{\isacharparenright}\ {\isacharparenleft}lcm\ {\isacharquery}x\ {\isacharquery}z{\isacharparenright}} \\
+  \quad \isa{lattice{\isachardot}join\ op\ dvd\ {\isacharquery}x\ {\isacharparenleft}lattice{\isachardot}meet\ op\ dvd\ {\isacharquery}y\ {\isacharquery}z{\isacharparenright}\ {\isacharequal}\ lattice{\isachardot}meet\ op\ dvd\ {\isacharparenleft}lattice{\isachardot}join\ op\ dvd\ {\isacharquery}x\ {\isacharquery}y{\isacharparenright}\ {\isacharparenleft}lattice{\isachardot}join\ op\ dvd\ {\isacharquery}x\ {\isacharquery}z{\isacharparenright}} \\
 \end{tabular}
 \end{center}
 \hrule
@@ -476,7 +476,7 @@
 \isamarkuptrue%
 \ \ \isacommand{locale}\isamarkupfalse%
 \ order{\isacharunderscore}preserving\ {\isacharequal}\isanewline
-\ \ \ \ partial{\isacharunderscore}order\ {\isacharplus}\ partial{\isacharunderscore}order\ le{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasympreceq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ {\isacharplus}\isanewline
+\ \ \ \ partial{\isacharunderscore}order\ {\isacharplus}\ po{\isacharprime}{\isacharcolon}\ partial{\isacharunderscore}order\ le{\isacharprime}\ \isakeyword{for}\ le{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasympreceq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ {\isacharplus}\isanewline
 \ \ \ \ \isakeyword{fixes}\ {\isasymphi}\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}b{\isachardoublequoteclose}\isanewline
 \ \ \ \ \isakeyword{assumes}\ hom{\isacharunderscore}le{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymsqsubseteq}\ y\ {\isasymLongrightarrow}\ {\isasymphi}\ x\ {\isasympreceq}\ {\isasymphi}\ y{\isachardoublequoteclose}%
 \begin{isamarkuptext}%
@@ -505,7 +505,8 @@
 \isamarkuptrue%
 %
 \begin{isamarkuptext}%
-The locale \isa{order{\isacharunderscore}preserving} contains theorems for both
+% FIXME needs update
+  The locale \isa{order{\isacharunderscore}preserving} contains theorems for both
   orders \isa{{\isasymsqsubseteq}} and \isa{{\isasympreceq}}.  How can one refer to a theorem for
   a particular order, \isa{{\isasymsqsubseteq}} or \isa{{\isasympreceq}}?  Names in locales are
   qualified by the locale parameters.  More precisely, a name is
@@ -529,16 +530,17 @@
 \endisadeliminvisible
 %
 \begin{isamarkuptext}%
-\isa{le{\isachardot}less{\isacharunderscore}le{\isacharunderscore}trans}: \isa{{\isasymlbrakk}{\isacharquery}x\ {\isasymsqsubset}\ {\isacharquery}y{\isacharsemicolon}\ {\isacharquery}y\ {\isasymsqsubseteq}\ {\isacharquery}z{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}x\ {\isasymsqsubset}\ {\isacharquery}z}
+% FIXME needs update?
+  \isa{less{\isacharunderscore}le{\isacharunderscore}trans}: \isa{{\isasymlbrakk}partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasympreceq}\ {\isacharquery}x\ {\isacharquery}y{\isacharsemicolon}\ {\isacharquery}y\ {\isasympreceq}\ {\isacharquery}z{\isasymrbrakk}\ {\isasymLongrightarrow}\ partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasympreceq}\ {\isacharquery}x\ {\isacharquery}z}
 
-  \isa{le{\isacharunderscore}le{\isacharprime}{\isacharunderscore}{\isasymphi}{\isachardot}hom{\isacharunderscore}le}: \isa{{\isacharquery}x\ {\isasymsqsubseteq}\ {\isacharquery}y\ {\isasymLongrightarrow}\ {\isasymphi}\ {\isacharquery}x\ {\isasympreceq}\ {\isasymphi}\ {\isacharquery}y}%
+  \isa{hom{\isacharunderscore}le}: \isa{{\isacharquery}x\ {\isasymsqsubseteq}\ {\isacharquery}y\ {\isasymLongrightarrow}\ {\isasymphi}\ {\isacharquery}x\ {\isasympreceq}\ {\isasymphi}\ {\isacharquery}y}%
 \end{isamarkuptext}%
 \isamarkuptrue%
 %
 \begin{isamarkuptext}%
 When renaming a locale, the morphism is also applied
   to the qualifiers.  Hence theorems for the partial order \isa{{\isasympreceq}}
-  are qualified by \isa{le{\isacharprime}}.  For example, \isa{le{\isacharprime}{\isachardot}less{\isacharunderscore}le{\isacharunderscore}trans}: \begin{isabelle}%
+  are qualified by \isa{le{\isacharprime}}.  For example, \isa{po{\isacharprime}{\isachardot}less{\isacharunderscore}le{\isacharunderscore}trans}: \begin{isabelle}%
 \ \ {\isasymlbrakk}partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasympreceq}\ {\isacharquery}x\ {\isacharquery}y{\isacharsemicolon}\ {\isacharquery}y\ {\isasympreceq}\ {\isacharquery}z{\isasymrbrakk}\isanewline
 \isaindent{\ \ }{\isasymLongrightarrow}\ partial{\isacharunderscore}order{\isachardot}less\ op\ {\isasympreceq}\ {\isacharquery}x\ {\isacharquery}z%
 \end{isabelle}%
@@ -560,7 +562,8 @@
 \endisadeliminvisible
 %
 \begin{isamarkuptext}%
-This example reveals that there is no infix syntax for the strict
+% FIXME needs update?
+  This example reveals that there is no infix syntax for the strict
   version of \isa{{\isasympreceq}}!  This can, of course, not be introduced
   automatically, but it can be declared manually through an abbreviation.%
 \end{isamarkuptext}%
@@ -589,7 +592,7 @@
 \end{isamarkuptext}%
 \isamarkuptrue%
 \ \ \isacommand{locale}\isamarkupfalse%
-\ lattice{\isacharunderscore}hom\ {\isacharequal}\ lattice\ {\isacharplus}\ lattice\ le{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasympreceq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ {\isacharplus}\isanewline
+\ lattice{\isacharunderscore}hom\ {\isacharequal}\ lattice\ {\isacharplus}\ lat{\isacharprime}{\isacharbang}{\isacharcolon}\ lattice\ le{\isacharprime}\ \isakeyword{for}\ le{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasympreceq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ {\isacharplus}\isanewline
 \ \ \ \ \isakeyword{fixes}\ {\isasymphi}\isanewline
 \ \ \ \ \isakeyword{assumes}\ hom{\isacharunderscore}meet{\isacharcolon}\isanewline
 \ \ \ \ \ \ \ \ {\isachardoublequoteopen}{\isasymphi}\ {\isacharparenleft}lattice{\isachardot}meet\ le\ x\ y{\isacharparenright}\ {\isacharequal}\ lattice{\isachardot}meet\ le{\isacharprime}\ {\isacharparenleft}{\isasymphi}\ x{\isacharparenright}\ {\isacharparenleft}{\isasymphi}\ y{\isacharparenright}{\isachardoublequoteclose}\isanewline
@@ -598,17 +601,17 @@
 \isanewline
 \ \ \isacommand{abbreviation}\isamarkupfalse%
 \ {\isacharparenleft}\isakeyword{in}\ lattice{\isacharunderscore}hom{\isacharparenright}\isanewline
-\ \ \ \ meet{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqinter}{\isacharprime}{\isacharprime}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\ {\isachardoublequoteopen}meet{\isacharprime}\ {\isasymequiv}\ le{\isacharprime}{\isachardot}meet{\isachardoublequoteclose}\isanewline
+\ \ \ \ meet{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqinter}{\isacharprime}{\isacharprime}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\ {\isachardoublequoteopen}meet{\isacharprime}\ {\isasymequiv}\ lat{\isacharprime}{\isachardot}meet{\isachardoublequoteclose}\isanewline
 \ \ \isacommand{abbreviation}\isamarkupfalse%
 \ {\isacharparenleft}\isakeyword{in}\ lattice{\isacharunderscore}hom{\isacharparenright}\isanewline
-\ \ \ \ join{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqunion}{\isacharprime}{\isacharprime}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\ {\isachardoublequoteopen}join{\isacharprime}\ {\isasymequiv}\ le{\isacharprime}{\isachardot}join{\isachardoublequoteclose}%
+\ \ \ \ join{\isacharprime}\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqunion}{\isacharprime}{\isacharprime}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\ {\isachardoublequoteopen}join{\isacharprime}\ {\isasymequiv}\ lat{\isacharprime}{\isachardot}join{\isachardoublequoteclose}%
 \begin{isamarkuptext}%
 A homomorphism is an endomorphism if both orders coincide.%
 \end{isamarkuptext}%
 \isamarkuptrue%
 \ \ \isacommand{locale}\isamarkupfalse%
 \ lattice{\isacharunderscore}end\ {\isacharequal}\isanewline
-\ \ \ \ lattice{\isacharunderscore}hom\ le\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqsubseteq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\ le\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqsubseteq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}%
+\ \ \ \ lattice{\isacharunderscore}hom\ le\ le\ \isakeyword{for}\ le\ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymsqsubseteq}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}%
 \begin{isamarkuptext}%
 The inheritance diagram of the situation we have now is shown
   in Figure~\ref{fig:hom}, where the dashed line depicts an
@@ -659,7 +662,7 @@
   interpretation is used to assert this.%
 \end{isamarkuptext}%
 \isamarkuptrue%
-\ \ \isacommand{interpretation}\isamarkupfalse%
+\ \ \isacommand{sublocale}\isamarkupfalse%
 \ lattice{\isacharunderscore}hom\ {\isasymsubseteq}\ order{\isacharunderscore}preserving%
 \isadelimproof
 \ %
@@ -675,7 +678,7 @@
 \ \ \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{have}\isamarkupfalse%
 \ {\isachardoublequoteopen}y\ {\isacharequal}\ {\isacharparenleft}x\ {\isasymsqunion}\ y{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ add{\isacharcolon}\ le{\isachardot}join{\isacharunderscore}connection{\isacharparenright}\isanewline
+\ {\isacharparenleft}simp\ add{\isacharcolon}\ join{\isacharunderscore}connection{\isacharparenright}\isanewline
 \ \ \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{have}\isamarkupfalse%
 \ {\isachardoublequoteopen}{\isasymphi}\ y\ {\isacharequal}\ {\isacharparenleft}{\isasymphi}\ x\ {\isasymsqunion}{\isacharprime}\ {\isasymphi}\ y{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
@@ -683,7 +686,7 @@
 \ \ \ \ \isacommand{then}\isamarkupfalse%
 \ \isacommand{show}\isamarkupfalse%
 \ {\isachardoublequoteopen}{\isasymphi}\ x\ {\isasympreceq}\ {\isasymphi}\ y{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ add{\isacharcolon}\ le{\isacharprime}{\isachardot}join{\isacharunderscore}connection{\isacharparenright}\isanewline
+\ {\isacharparenleft}simp\ add{\isacharcolon}\ lat{\isacharprime}{\isachardot}join{\isacharunderscore}connection{\isacharparenright}\isanewline
 \ \ \isacommand{qed}\isamarkupfalse%
 %
 \endisatagproof
@@ -697,7 +700,7 @@
 Theorems and other declarations --- syntax, in particular ---
   from the locale \isa{order{\isacharunderscore}preserving} are now active in \isa{lattice{\isacharunderscore}hom}, for example
 
-  \isa{le{\isacharprime}{\isachardot}less{\isacharunderscore}le{\isacharunderscore}trans}:
+  \isa{lat{\isacharprime}{\isachardot}less{\isacharunderscore}le{\isacharunderscore}trans}:
   \isa{{\isasymlbrakk}{\isacharquery}x\ {\isasymprec}\ {\isacharquery}y{\isacharsemicolon}\ {\isacharquery}y\ {\isasympreceq}\ {\isacharquery}z{\isasymrbrakk}\ {\isasymLongrightarrow}\ {\isacharquery}x\ {\isasymprec}\ {\isacharquery}z}%
 \end{isamarkuptext}%
 \isamarkuptrue%
--- a/doc-src/TutorialI/Documents/document/Documents.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/TutorialI/Documents/document/Documents.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -391,7 +391,7 @@
   session is derived from a single parent, usually an object-logic
   image like \texttt{HOL}.  This results in an overall tree structure,
   which is reflected by the output location in the file system
-  (usually rooted at \verb,~/isabelle/browser_info,).
+  (usually rooted at \verb,~/.isabelle/browser_info,).
 
   \medskip The easiest way to manage Isabelle sessions is via
   \texttt{isabelle mkdir} (generates an initial session source setup)
--- a/doc-src/TutorialI/Types/document/Numbers.tex	Mon Dec 29 11:04:27 2008 -0800
+++ b/doc-src/TutorialI/Types/document/Numbers.tex	Mon Jan 05 07:54:16 2009 -0800
@@ -3,7 +3,6 @@
 \def\isabellecontext{Numbers}%
 %
 \isadelimtheory
-\isanewline
 %
 \endisadelimtheory
 %
--- a/etc/isar-keywords.el	Mon Dec 29 11:04:27 2008 -0800
+++ b/etc/isar-keywords.el	Mon Jan 05 07:54:16 2009 -0800
@@ -46,6 +46,9 @@
     "chapter"
     "class"
     "class_deps"
+    "class_interpret"
+    "class_interpretation"
+    "class_locale"
     "classes"
     "classrel"
     "code_abort"
@@ -420,6 +423,7 @@
     "axiomatization"
     "axioms"
     "class"
+    "class_locale"
     "classes"
     "classrel"
     "code_abort"
@@ -503,6 +507,7 @@
 
 (defconst isar-keywords-theory-goal
   '("ax_specification"
+    "class_interpretation"
     "corollary"
     "cpodef"
     "function"
@@ -541,7 +546,8 @@
     "subsubsect"))
 
 (defconst isar-keywords-proof-goal
-  '("have"
+  '("class_interpret"
+    "have"
     "hence"
     "interpret"
     "invoke"))
--- a/lib/html/isabelle.css	Mon Dec 29 11:04:27 2008 -0800
+++ b/lib/html/isabelle.css	Mon Jan 05 07:54:16 2009 -0800
@@ -20,20 +20,20 @@
 
 /* inner and outer syntax markup */
 
-.tfree, tfree          { color: purple; }
-.tvar, tvar            { color: purple; }
-.free, free            { color: blue; }
-.skolem, skolem        { color: brown; }
-.bound, bound          { color: green; }
-.var, var              { color: blue; }
-.num, num              { }
-.xnum, xnum            { }
-.xstr, xstr            { color: brown; }
-.literal, literal      { font-weight: bold; }
-                      
+.tfree, tfree                 { color: purple; }
+.tvar, tvar                   { color: purple; }
+.free, free                   { color: blue; }
+.skolem, skolem               { color: brown; }
+.bound, bound                 { color: green; }
+.var, var                     { color: blue; }
+.numeral, numeral             { }
+.literal, literal             { font-weight: bold; }
+.inner_string, inner_string   { color: brown; }
+.inner_comment, inner_comment { color: #8B0000; }
+
 .loc, loc              { color: brown; }
 .tclass, tclass        { color: red; }
-          
+
 .keyword, keyword      { font-weight: bold; }
 .command, command      { font-weight: bold; }
 .ident, ident          { }
--- a/src/CCL/Wfd.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/CCL/Wfd.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
 (*  Title:      CCL/Wfd.thy
-    ID:         $Id$
     Author:     Martin Coen, Cambridge University Computer Laboratory
     Copyright   1993  University of Cambridge
 *)
@@ -435,9 +434,9 @@
   | get_bno l n (Bound m) = (m-length(l),n)
 
 (* Not a great way of identifying induction hypothesis! *)
-fun could_IH x = could_unify(x,hd (prems_of @{thm rcallT})) orelse
-                 could_unify(x,hd (prems_of @{thm rcall2T})) orelse
-                 could_unify(x,hd (prems_of @{thm rcall3T}))
+fun could_IH x = Term.could_unify(x,hd (prems_of @{thm rcallT})) orelse
+                 Term.could_unify(x,hd (prems_of @{thm rcall2T})) orelse
+                 Term.could_unify(x,hd (prems_of @{thm rcall3T}))
 
 fun IHinst tac rls = SUBGOAL (fn (Bi,i) =>
   let val bvs = bvars Bi []
@@ -451,7 +450,7 @@
 
 fun is_rigid_prog t =
      case (Logic.strip_assums_concl t) of
-        (Const("Trueprop",_) $ (Const("mem",_) $ a $ _)) => (term_vars a = [])
+        (Const("Trueprop",_) $ (Const("mem",_) $ a $ _)) => null (Term.add_vars a [])
        | _ => false
 in
 
--- a/src/FOL/IsaMakefile	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/FOL/IsaMakefile	Mon Jan 05 07:54:16 2009 -0800
@@ -46,7 +46,7 @@
 FOL-ex: FOL $(LOG)/FOL-ex.gz
 
 $(LOG)/FOL-ex.gz: $(OUT)/FOL ex/First_Order_Logic.thy ex/If.thy		\
-  ex/IffOracle.thy ex/LocaleTest.thy ex/Nat.thy ex/Natural_Numbers.thy	\
+  ex/IffOracle.thy ex/Nat.thy ex/Natural_Numbers.thy	\
   ex/NewLocaleSetup.thy ex/NewLocaleTest.thy    \
   ex/Miniscope.thy ex/Prolog.thy ex/ROOT.ML ex/Classical.thy		\
   ex/document/root.tex ex/Foundation.thy ex/Intuitionistic.thy		\
--- a/src/FOL/ex/LocaleTest.thy	Mon Dec 29 11:04:27 2008 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,765 +0,0 @@
-(*  Title:      FOL/ex/LocaleTest.thy
-    ID:         $Id$
-    Author:     Clemens Ballarin
-    Copyright (c) 2005 by Clemens Ballarin
-
-Collection of regression tests for locales.
-*)
-
-header {* Test of Locale Interpretation *}
-
-theory LocaleTest
-imports FOL
-begin
-
-ML {* set Toplevel.debug *}
-ML {* set show_hyps *}
-ML {* set show_sorts *}
-
-ML {*
-  fun check_thm name = let
-    val thy = the_context ();
-    val thm = PureThy.get_thm thy name;
-    val {prop, hyps, ...} = rep_thm thm;
-    val prems = Logic.strip_imp_prems prop;
-    val _ = if null hyps then ()
-        else error ("Theorem " ^ quote name ^ " has meta hyps.\n" ^
-          "Consistency check of locales package failed.");
-    val _ = if null prems then ()
-        else error ("Theorem " ^ quote name ^ " has premises.\n" ^
-          "Consistency check of locales package failed.");
-  in () end;
-*}
-
-section {* Context Elements and Locale Expressions *}
-
-text {* Naming convention for global objects: prefixes L and l *}
-
-subsection {* Renaming with Syntax *}
-
-locale LS = var mult +
-  assumes "mult(x, y) = mult(y, x)"
-
-print_locale LS
-
-locale LS' = LS mult (infixl "**" 60)
-
-print_locale LS'
-
-locale LT = var mult (infixl "**" 60) +
-  assumes "x ** y = y ** x"
-
-locale LU = LT mult (infixl "**" 60) + LT add (infixl "++" 55) + var h +
-  assumes hom: "h(x ** y) = h(x) ++ h(y)"
-
-
-(* FIXME: graceful handling of type errors?
-locale LY = LT mult (infixl "**" 60) + LT add (binder "++" 55) + var h +
-  assumes "mult(x) == add"
-*)
-
-
-locale LV = LU _ add
-
-locale LW = LU _ mult (infixl "**" 60)
-
-
-subsection {* Constrains *}
-
-locale LZ = fixes a (structure)
-locale LZ' = LZ +
-  constrains a :: "'a => 'b"
-  assumes "a (x :: 'a) = a (y)"
-print_locale LZ'
-
-
-section {* Interpretation *}
-
-text {* Naming convention for global objects: prefixes I and i *}
-
-text {* interpretation input syntax *}
-
-locale IL
-locale IM = fixes a and b and c
-
-interpretation test: IL + IM a b c [x y z] .
-
-print_interps IL    (* output: test *)
-print_interps IM    (* output: test *)
-
-interpretation test: IL print_interps IM .
-
-interpretation IL .
-
-text {* Processing of locale expression *}
-
-locale IA = fixes a assumes asm_A: "a = a"
-
-locale IB = fixes b assumes asm_B [simp]: "b = b"
-
-locale IC = IA + IB + assumes asm_C: "b = b"
-
-locale IC' = IA + IB + assumes asm_C: "c = c"
-  (* independent type var in c *)
-
-locale ID = IA + IB + fixes d defines def_D: "d == (a = b)"
-
-theorem (in ID) True ..
-
-typedecl i
-arities i :: "term"
-
-
-interpretation i1: IC ["X::i" "Y::i"] by unfold_locales auto
-
-print_interps IA  (* output: i1 *)
-
-(* possible accesses *)
-thm i1.a.asm_A thm LocaleTest.IA_locale.i1.a.asm_A thm IA_locale.i1.a.asm_A
-thm i1.asm_A thm LocaleTest.i1.asm_A
-
-ML {* check_thm "i1.a.asm_A" *}
-
-(* without prefix *)
-
-interpretation IC ["W::i" "Z::i"] by intro_locales  (* subsumed by i1: IC *)
-interpretation IC ["W::'a" "Z::i"] by unfold_locales auto
-  (* subsumes i1: IA and i1: IC *)
-
-print_interps IA  (* output: <no prefix>, i1 *)
-
-(* possible accesses *)
-thm asm_C thm a_b.asm_C thm LocaleTest.IC_locale.a_b.asm_C thm IC_locale.a_b.asm_C
-
-ML {* check_thm "asm_C" *}
-
-interpretation i2: ID [X "Y::i" "Y = X"]
-  by (simp add: eq_commute) unfold_locales
-
-print_interps IA  (* output: <no prefix>, i1 *)
-print_interps ID  (* output: i2 *)
-
-
-interpretation i3: ID [X "Y::i"] by simp unfold_locales
-
-(* duplicate: thm not added *)
-
-(* thm i3.a.asm_A *)
-
-
-print_interps IA  (* output: <no prefix>, i1 *)
-print_interps IB  (* output: i1 *)
-print_interps IC  (* output: <no prefix, i1 *)
-print_interps ID  (* output: i2, i3 *)
-
-
-interpretation i10: ID + ID a' b' d' [X "Y::i" _ u "v::i" _] by intro_locales
-
-corollary (in ID) th_x: True ..
-
-(* possible accesses: for each registration *)
-
-thm i2.th_x thm i3.th_x
-
-ML {* check_thm "i2.th_x"; check_thm "i3.th_x" *}
-
-lemma (in ID) th_y: "d == (a = b)" by (rule d_def)
-
-thm i2.th_y thm i3.th_y
-
-ML {* check_thm "i2.th_y"; check_thm "i3.th_y" *}
-
-lemmas (in ID) th_z = th_y
-
-thm i2.th_z
-
-ML {* check_thm "i2.th_z" *}
-
-
-subsection {* Interpretation in Proof Contexts *}
-
-locale IF = fixes f assumes asm_F: "f & f --> f"
-
-consts default :: "'a"
-
-theorem True
-proof -
-  fix alpha::i and beta
-  have alpha_A: "IA(alpha)" by unfold_locales
-  interpret i5: IA [alpha] by intro_locales  (* subsumed *)
-  print_interps IA  (* output: <no prefix>, i1 *)
-  interpret i6: IC [alpha beta] by unfold_locales auto
-  print_interps IA   (* output: <no prefix>, i1 *)
-  print_interps IC   (* output: <no prefix>, i1, i6 *)
-  interpret i11: IF ["default = default"] by (fast intro: IF.intro)
-  thm i11.asm_F [where 'a = i]     (* default has schematic type *)
-qed rule
-
-theorem (in IA) True
-proof -
-  print_interps! IA
-  fix beta and gamma
-  interpret i9: ID [a beta _]
-    apply - apply assumption
-    apply unfold_locales
-    apply (rule refl) done
-qed rule
-
-
-(* Definition involving free variable *)
-
-ML {* reset show_sorts *}
-
-locale IE = fixes e defines e_def: "e(x) == x & x"
-  notes e_def2 = e_def
-
-lemma (in IE) True thm e_def by fast
-
-interpretation i7: IE ["%x. x"] by simp
-
-thm i7.e_def2 (* has no premise *)
-
-ML {* check_thm "i7.e_def2" *}
-
-locale IE' = fixes e defines e_def: "e == (%x. x & x)"
-  notes e_def2 = e_def
-
-interpretation i7': IE' ["(%x. x)"] by simp
-
-thm i7'.e_def2
-
-ML {* check_thm "i7'.e_def2" *}
-
-(* Definition involving free variable in assm *)
-
-locale IG = fixes g assumes asm_G: "g --> x"
-  notes asm_G2 = asm_G
-
-interpretation i8: IG ["False"] by unfold_locales fast
-
-thm i8.asm_G2
-
-ML {* check_thm "i8.asm_G2" *}
-
-text {* Locale without assumptions *}
-
-locale IL1 = notes rev_conjI [intro] = conjI [THEN iffD1 [OF conj_commute]]
-
-lemma "[| P; Q |] ==> P & Q"
-proof -
-  interpret my: IL1 .          txt {* No chained fact required. *}
-  assume Q and P               txt {* order reversed *}
-  then show "P & Q" ..         txt {* Applies @{thm my.rev_conjI}. *}
-qed
-
-locale IL11 = notes rev_conjI = conjI [THEN iffD1 [OF conj_commute]]
-
-
-subsection {* Simple locale with assumptions *}
-
-consts ibin :: "[i, i] => i" (infixl "#" 60)
-
-axioms i_assoc: "(x # y) # z = x # (y # z)"
-  i_comm: "x # y = y # x"
-
-locale IL2 =
-  fixes OP (infixl "+" 60)
-  assumes assoc: "(x + y) + z = x + (y + z)"
-    and comm: "x + y = y + x"
-
-lemma (in IL2) lcomm: "x + (y + z) = y + (x + z)"
-proof -
-  have "x + (y + z) = (x + y) + z" by (simp add: assoc)
-  also have "... = (y + x) + z" by (simp add: comm)
-  also have "... = y + (x + z)" by (simp add: assoc)
-  finally show ?thesis .
-qed
-
-lemmas (in IL2) AC = comm assoc lcomm
-
-lemma "(x::i) # y # z # w = y # x # w # z"
-proof -
-  interpret my: IL2 ["op #"] by (rule IL2.intro [of "op #", OF i_assoc i_comm])
-  show ?thesis by (simp only: my.OP.AC)  (* or my.AC *)
-qed
-
-subsection {* Nested locale with assumptions *}
-
-locale IL3 =
-  fixes OP (infixl "+" 60)
-  assumes assoc: "(x + y) + z = x + (y + z)"
-
-locale IL4 = IL3 +
-  assumes comm: "x + y = y + x"
-
-lemma (in IL4) lcomm: "x + (y + z) = y + (x + z)"
-proof -
-  have "x + (y + z) = (x + y) + z" by (simp add: assoc)
-  also have "... = (y + x) + z" by (simp add: comm)
-  also have "... = y + (x + z)" by (simp add: assoc)
-  finally show ?thesis .
-qed
-
-lemmas (in IL4) AC = comm assoc lcomm
-
-lemma "(x::i) # y # z # w = y # x # w # z"
-proof -
-  interpret my: IL4 ["op #"]
-    by (auto intro: IL4.intro IL3.intro IL4_axioms.intro i_assoc i_comm)
-  show ?thesis by (simp only: my.OP.AC)  (* or simply AC *)
-qed
-
-text {* Locale with definition *}
-
-text {* This example is admittedly not very creative :-) *}
-
-locale IL5 = IL4 + var A +
-  defines A_def: "A == True"
-
-lemma (in IL5) lem: A
-  by (unfold A_def) rule
-
-lemma "IL5(op #) ==> True"
-proof -
-  assume "IL5(op #)"
-  then interpret IL5 ["op #"] by (auto intro: IL5.axioms)
-  show ?thesis by (rule lem)  (* lem instantiated to True *)
-qed
-
-text {* Interpretation in a context with target *}
-
-lemma (in IL4)
-  fixes A (infixl "$" 60)
-  assumes A: "IL4(A)"
-  shows "(x::i) $ y $ z $ w = y $ x $ w $ z"
-proof -
-  from A interpret A: IL4 ["A"] by (auto intro: IL4.axioms)
-  show ?thesis by (simp only: A.OP.AC)
-qed
-
-
-section {* Interpretation in Locales *}
-
-text {* Naming convention for global objects: prefixes R and r *}
-
-(* locale with many parameters ---
-   interpretations generate alternating group A5 *)
-
-locale RA5 = var A + var B + var C + var D + var E +
-  assumes eq: "A <-> B <-> C <-> D <-> E"
-
-interpretation RA5 < RA5 _ _ D E C
-print_facts
-print_interps RA5
-  using A_B_C_D_E.eq apply (blast intro: RA5.intro) done
-
-interpretation RA5 < RA5 C _ E _ A
-print_facts
-print_interps RA5
-  using A_B_C_D_E.eq apply (blast intro: RA5.intro) done
-
-interpretation RA5 < RA5 B C A _ _
-print_facts
-print_interps RA5
-  using A_B_C_D_E.eq apply (blast intro: RA5.intro) done
-
-interpretation RA5 < RA5 _ C D B _ .
-  (* Any even permutation of parameters is subsumed by the above. *)
-
-(* circle of three locales, forward direction *)
-
-locale RA1 = var A + var B + assumes p: "A <-> B"
-locale RA2 = var A + var B + assumes q: "A & B | ~ A & ~ B"
-locale RA3 = var A + var B + assumes r: "(A --> B) & (B --> A)"
-
-interpretation RA1 < RA2
-  print_facts
-  using p apply unfold_locales apply fast done
-interpretation RA2 < RA3
-  print_facts
-  using q apply unfold_locales apply fast done
-interpretation RA3 < RA1
-  print_facts
-  using r apply unfold_locales apply fast done
-
-(* circle of three locales, backward direction *)
-
-locale RB1 = var A + var B + assumes p: "A <-> B"
-locale RB2 = var A + var B + assumes q: "A & B | ~ A & ~ B"
-locale RB3 = var A + var B + assumes r: "(A --> B) & (B --> A)"
-
-interpretation RB1 < RB2
-  print_facts
-  using p apply unfold_locales apply fast done
-interpretation RB3 < RB1
-  print_facts
-  using r apply unfold_locales apply fast done
-interpretation RB2 < RB3
-  print_facts
-  using q apply unfold_locales apply fast done
-
-lemma (in RB1) True
-  print_facts
-  ..
-
-
-(* Group example *)
-
-locale Rsemi = var prod (infixl "**" 65) +
-  assumes assoc: "(x ** y) ** z = x ** (y ** z)"
-
-locale Rlgrp = Rsemi + var one + var inv +
-  assumes lone: "one ** x = x"
-    and linv: "inv(x) ** x = one"
-
-lemma (in Rlgrp) lcancel:
-  "x ** y = x ** z <-> y = z"
-proof
-  assume "x ** y = x ** z"
-  then have "inv(x) ** x ** y = inv(x) ** x ** z" by (simp add: assoc)
-  then show "y = z" by (simp add: lone linv)
-qed simp
-
-locale Rrgrp = Rsemi + var one + var inv +
-  assumes rone: "x ** one = x"
-    and rinv: "x ** inv(x) = one"
-
-lemma (in Rrgrp) rcancel:
-  "y ** x = z ** x <-> y = z"
-proof
-  assume "y ** x = z ** x"
-  then have "y ** (x ** inv(x)) = z ** (x ** inv(x))"
-    by (simp add: assoc [symmetric])
-  then show "y = z" by (simp add: rone rinv)
-qed simp
-
-interpretation Rlgrp < Rrgrp
-  proof unfold_locales
-    {
-      fix x
-      have "inv(x) ** x ** one = inv(x) ** x" by (simp add: linv lone)
-      then show "x ** one = x" by (simp add: assoc lcancel)
-    }
-    note rone = this
-    {
-      fix x
-      have "inv(x) ** x ** inv(x) = inv(x) ** one"
-	by (simp add: linv lone rone)
-      then show "x ** inv(x) = one" by (simp add: assoc lcancel)
-    }
-  qed
-
-(* effect on printed locale *)
-
-print_locale! Rlgrp
-
-(* use of derived theorem *)
-
-lemma (in Rlgrp)
-  "y ** x = z ** x <-> y = z"
-  apply (rule rcancel)
-  print_interps Rrgrp thm lcancel rcancel
-  done
-
-(* circular interpretation *)
-
-interpretation Rrgrp < Rlgrp
-  proof unfold_locales
-    {
-      fix x
-      have "one ** (x ** inv(x)) = x ** inv(x)" by (simp add: rinv rone)
-      then show "one ** x = x" by (simp add: assoc [symmetric] rcancel)
-    }
-    note lone = this
-    {
-      fix x
-      have "inv(x) ** (x ** inv(x)) = one ** inv(x)"
-	by (simp add: rinv lone rone)
-      then show "inv(x) ** x = one" by (simp add: assoc [symmetric] rcancel)
-    }
-  qed
-
-(* effect on printed locale *)
-
-print_locale! Rrgrp
-print_locale! Rlgrp
-
-subsection {* Interaction of Interpretation in Theories and Locales:
-  in Locale, then in Theory *}
-
-consts
-  rone :: i
-  rinv :: "i => i"
-
-axioms
-  r_one : "rone # x = x"
-  r_inv : "rinv(x) # x = rone"
-
-interpretation Rbool: Rlgrp ["op #" "rone" "rinv"]
-proof
-  fix x y z
-  {
-    show "(x # y) # z = x # (y # z)" by (rule i_assoc)
-  next
-    show "rone # x = x" by (rule r_one)
-  next
-    show "rinv(x) # x = rone" by (rule r_inv)
-  }
-qed
-
-(* derived elements *)
-
-print_interps Rrgrp
-print_interps Rlgrp
-
-lemma "y # x = z # x <-> y = z" by (rule Rbool.rcancel)
-
-(* adding lemma to derived element *)
-
-lemma (in Rrgrp) new_cancel:
-  "b ** a = c ** a <-> b = c"
-  by (rule rcancel)
-
-thm Rbool.new_cancel (* additional prems discharged!! *)
-
-ML {* check_thm "Rbool.new_cancel" *}
-
-lemma "b # a = c # a <-> b = c" by (rule Rbool.new_cancel)
-
-
-subsection {* Interaction of Interpretation in Theories and Locales:
-  in Theory, then in Locale *}
-
-(* Another copy of the group example *)
-
-locale Rqsemi = var prod (infixl "**" 65) +
-  assumes assoc: "(x ** y) ** z = x ** (y ** z)"
-
-locale Rqlgrp = Rqsemi + var one + var inv +
-  assumes lone: "one ** x = x"
-    and linv: "inv(x) ** x = one"
-
-lemma (in Rqlgrp) lcancel:
-  "x ** y = x ** z <-> y = z"
-proof
-  assume "x ** y = x ** z"
-  then have "inv(x) ** x ** y = inv(x) ** x ** z" by (simp add: assoc)
-  then show "y = z" by (simp add: lone linv)
-qed simp
-
-locale Rqrgrp = Rqsemi + var one + var inv +
-  assumes rone: "x ** one = x"
-    and rinv: "x ** inv(x) = one"
-
-lemma (in Rqrgrp) rcancel:
-  "y ** x = z ** x <-> y = z"
-proof
-  assume "y ** x = z ** x"
-  then have "y ** (x ** inv(x)) = z ** (x ** inv(x))"
-    by (simp add: assoc [symmetric])
-  then show "y = z" by (simp add: rone rinv)
-qed simp
-
-interpretation Rqrgrp < Rrgrp
-  apply unfold_locales
-  apply (rule assoc)
-  apply (rule rone)
-  apply (rule rinv)
-  done
-
-interpretation R2: Rqlgrp ["op #" "rone" "rinv"] 
-  apply unfold_locales  (* FIXME: unfold_locales is too eager and shouldn't
-                          solve this. *)
-  apply (rule i_assoc)
-  apply (rule r_one)
-  apply (rule r_inv)
-  done
-
-print_interps Rqsemi
-print_interps Rqlgrp
-print_interps Rlgrp  (* no interpretations yet *)
-
-
-interpretation Rqlgrp < Rqrgrp
-  proof unfold_locales
-    {
-      fix x
-      have "inv(x) ** x ** one = inv(x) ** x" by (simp add: linv lone)
-      then show "x ** one = x" by (simp add: assoc lcancel)
-    }
-    note rone = this
-    {
-      fix x
-      have "inv(x) ** x ** inv(x) = inv(x) ** one"
-	by (simp add: linv lone rone)
-      then show "x ** inv(x) = one" by (simp add: assoc lcancel)
-    }
-  qed
-
-print_interps! Rqrgrp
-print_interps! Rsemi  (* witness must not have meta hyps *)
-print_interps! Rrgrp  (* witness must not have meta hyps *)
-print_interps! Rlgrp  (* witness must not have meta hyps *)
-thm R2.rcancel
-thm R2.lcancel
-
-ML {* check_thm "R2.rcancel"; check_thm "R2.lcancel" *}
-
-
-subsection {* Generation of Witness Theorems for Transitive Interpretations *}
-
-locale Rtriv = var x +
-  assumes x: "x = x"
-
-locale Rtriv2 = var x + var y +
-  assumes x: "x = x" and y: "y = y"
-
-interpretation Rtriv2 < Rtriv x
-  apply unfold_locales
-  apply (rule x)
-  done
-
-interpretation Rtriv2 < Rtriv y
-  apply unfold_locales
-  apply (rule y)
-  done
-
-print_locale Rtriv2
-
-locale Rtriv3 = var x + var y + var z +
-  assumes x: "x = x" and y: "y = y" and z: "z = z"
-
-interpretation Rtriv3 < Rtriv2 x y
-  apply unfold_locales
-  apply (rule x)
-  apply (rule y)
-  done
-
-print_locale Rtriv3
-
-interpretation Rtriv3 < Rtriv2 x z
-  apply unfold_locales
-  apply (rule x_y_z.x)
-  apply (rule z)
-  done
-
-ML {* set show_types *}
-
-print_locale Rtriv3
-
-
-subsection {* Normalisation Replaces Assumed Element by Derived Element *}
-
-typedecl ('a, 'b) pair
-arities pair :: ("term", "term") "term"
-
-consts
-  pair :: "['a, 'b] => ('a, 'b) pair"
-  fst :: "('a, 'b) pair => 'a"
-  snd :: "('a, 'b) pair => 'b"
-
-axioms
-  fst [simp]: "fst(pair(x, y)) = x"
-  snd [simp]: "snd(pair(x, y)) = y"
-
-locale Rpair = var prod (infixl "**" 65) + var prodP (infixl "***" 65) +
-  defines P_def: "x *** y == pair(fst(x) ** fst(y), snd(x) ** snd(y))"
-
-locale Rpair_semi = Rpair + Rsemi
-
-interpretation Rpair_semi < Rsemi prodP (infixl "***" 65)
-proof unfold_locales
-  fix x y z
-  show "(x *** y) *** z = x *** (y *** z)"
-    apply (simp only: P_def) apply (simp add: assoc) (* FIXME: unfold P_def fails *)
-    done
-qed
-
-locale Rsemi_rev = Rsemi + var rprod (infixl "++" 65) +
-  defines r_def: "x ++ y == y ** x"
-
-lemma (in Rsemi_rev) r_assoc:
-  "(x ++ y) ++ z = x ++ (y ++ z)"
-  by (simp add: r_def assoc)
-
-
-subsection {* Import of Locales with Predicates as Interpretation *}
-
-locale Ra =
-  assumes Ra: "True"
-
-locale Rb = Ra +
-  assumes Rb: "True"
-
-locale Rc = Rb +
-  assumes Rc: "True"
-
-print_locale! Rc
-
-
-section {* Interpretation of Defined Concepts *}
-
-text {* Naming convention for global objects: prefixes D and d *}
-
-
-subsection {* Simple examples *}
-
-locale Da = fixes a :: o
-  assumes true: a
-
-text {* In the following examples, @{term "~ a"} is the defined concept. *}
-
-lemma (in Da) not_false: "~ a <-> False"
-  apply simp apply (rule true) done
-
-interpretation D1: Da ["True"]
-  where "~ True == False"
-  apply -
-  apply unfold_locales [1] apply rule
-  by simp
-
-thm D1.not_false
-lemma "False <-> False" apply (rule D1.not_false) done
-
-interpretation D2: Da ["x | ~ x"]
-  where "~ (x | ~ x) <-> ~ x & x"
-  apply -
-  apply unfold_locales [1] apply fast
-  by simp
-
-thm D2.not_false
-lemma "~ x & x <-> False" apply (rule D2.not_false) done
-
-print_interps! Da
-
-(* Subscriptions of interpretations *)
-
-lemma (in Da) not_false2: "~a <-> False"
-  apply simp apply (rule true) done
-
-thm D1.not_false2 D2.not_false2
-lemma "False <-> False" apply (rule D1.not_false2) done
-lemma "~x & x <-> False" apply (rule D2.not_false2) done
-
-(* Unfolding in attributes *)
-
-locale Db = Da +
-  fixes b :: o
-  assumes a_iff_b: "~a <-> b"
-
-lemmas (in Db) not_false_b = not_false [unfolded a_iff_b]
-
-interpretation D2: Db ["x | ~ x" "~ (x <-> x)"]
-  apply unfold_locales apply fast done
-
-thm D2.not_false_b
-lemma "~(x <-> x) <-> False" apply (rule D2.not_false_b) done
-
-(* Subscription and attributes *)
-
-lemmas (in Db) not_false_b2 = not_false [unfolded a_iff_b]
-
-thm D2.not_false_b2
-lemma "~(x <-> x) <-> False" apply (rule D2.not_false_b2) done
-
-end
--- a/src/FOL/ex/NewLocaleSetup.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/FOL/ex/NewLocaleSetup.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -44,9 +44,10 @@
 val _ =
   OuterSyntax.command "interpretation"
     "prove interpretation of locale expression in theory" K.thy_goal
-    (P.!!! SpecParse.locale_expression
-        >> (fn expr => Toplevel.print o
-            Toplevel.theory_to_proof (Expression.interpretation_cmd expr)));
+    (P.!!! SpecParse.locale_expression --
+      Scan.optional (P.$$$ "where" |-- P.and_list1 (SpecParse.opt_thm_name ":" -- P.prop)) []
+        >> (fn (expr, mixin) => Toplevel.print o
+            Toplevel.theory_to_proof (Expression.interpretation_cmd expr mixin)));
 
 val _ =
   OuterSyntax.command "interpret"
--- a/src/FOL/ex/NewLocaleTest.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/FOL/ex/NewLocaleTest.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -8,9 +8,7 @@
 imports NewLocaleSetup
 begin
 
-ML_val {* set new_locales *}
 ML_val {* set Toplevel.debug *}
-ML_val {* set show_hyps *}
 
 
 typedecl int arities int :: "term"
@@ -24,7 +22,7 @@
   int_minus: "(-x) + x = 0"
   int_minus2: "-(-x) = x"
 
-text {* Inference of parameter types *}
+section {* Inference of parameter types *}
 
 locale param1 = fixes p
 print_locale! param1
@@ -44,7 +42,7 @@
 print_locale! param4
 
 
-text {* Incremental type constraints *}
+subsection {* Incremental type constraints *}
 
 locale constraint1 =
   fixes  prod (infixl "**" 65)
@@ -58,7 +56,7 @@
 print_locale! constraint2
 
 
-text {* Inheritance *}
+section {* Inheritance *}
 
 locale semi =
   fixes prod (infixl "**" 65)
@@ -94,7 +92,7 @@
 print_locale! pert_hom' thm pert_hom'_def
 
 
-text {* Syntax declarations *}
+section {* Syntax declarations *}
 
 locale logic =
   fixes land (infixl "&&" 55)
@@ -112,14 +110,30 @@
 locale use_decl = logic + semi "op ||"
 print_locale! use_decl thm use_decl_def
 
+locale extra_type =
+  fixes a :: 'a
+    and P :: "'a => 'b => o"
+begin
 
-text {* Foundational versions of theorems *}
+definition test :: "'a => o" where
+  "test(x) <-> (ALL b. P(x, b))"
+
+end
+
+term extra_type.test thm extra_type.test_def
+
+interpretation var: extra_type "0" "%x y. x = 0" .
+
+thm var.test_def
+
+
+section {* Foundational versions of theorems *}
 
 thm logic.assoc
 thm logic.lor_def
 
 
-text {* Defines *}
+section {* Defines *}
 
 locale logic_def =
   fixes land (infixl "&&" 55)
@@ -149,7 +163,7 @@
 end
 
 
-text {* Notes *}
+section {* Notes *}
 
 (* A somewhat arcane homomorphism example *)
 
@@ -165,11 +179,21 @@
   assumes semi_homh: "semi_hom(prod, sum, h)"
   notes semi_hom_mult = semi_hom_mult [OF semi_homh]
 
+thm semi_hom_loc.semi_hom_mult
+(* unspecified, attribute not applied in backgroud theory !!! *)
+
 lemma (in semi_hom_loc) "h(prod(x, y)) = sum(h(x), h(y))"
   by (rule semi_hom_mult)
 
+(* Referring to facts from within a context specification *)
 
-text {* Theorem statements *}
+lemma
+  assumes x: "P <-> P"
+  notes y = x
+  shows True ..
+
+
+section {* Theorem statements *}
 
 lemma (in lgrp) lcancel:
   "x ** y = x ** z <-> y = z"
@@ -200,7 +224,7 @@
 print_locale! rgrp
 
 
-text {* Patterns *}
+subsection {* Patterns *}
 
 lemma (in rgrp)
   assumes "y ** x = z ** x" (is ?a)
@@ -218,7 +242,7 @@
 qed
 
 
-text {* Interpretation between locales: sublocales *}
+section {* Interpretation between locales: sublocales *}
 
 sublocale lgrp < right: rgrp
 print_facts
@@ -305,8 +329,7 @@
   done
 
 print_locale! order_with_def
-(* Note that decls come after theorems that make use of them.
-  Appears to be harmless at least in this example. *)
+(* Note that decls come after theorems that make use of them. *)
 
 
 (* locale with many parameters ---
@@ -359,7 +382,7 @@
 print_locale! trivial  (* No instance for y created (subsumed). *)
 
 
-text {* Sublocale, then interpretation in theory *}
+subsection {* Sublocale, then interpretation in theory *}
 
 interpretation int: lgrp "op +" "0" "minus"
 proof unfold_locales
@@ -374,7 +397,7 @@
   (* the latter comes through the sublocale relation *)
 
 
-text {* Interpretation in theory, then sublocale *}
+subsection {* Interpretation in theory, then sublocale *}
 
 interpretation (* fol: *) logic "op +" "minus"
 (* FIXME declaration of qualified names *)
@@ -386,10 +409,12 @@
   assumes assoc: "(x && y) && z = x && (y && z)"
     and notnot: "-- (-- x) = x"
 begin
-(* FIXME
+
+(* FIXME interpretation below fails
 definition lor (infixl "||" 50) where
   "x || y = --(-- x && -- y)"
 *)
+
 end
 
 sublocale logic < two: logic2
@@ -398,7 +423,48 @@
 thm two.assoc
 
 
-text {* Interpretation in proofs *}
+subsection {* Declarations and sublocale *}
+
+locale logic_a = logic
+locale logic_b = logic
+
+sublocale logic_a < logic_b
+  by unfold_locales
+
+
+subsection {* Equations *}
+
+locale logic_o =
+  fixes land (infixl "&&" 55)
+    and lnot ("-- _" [60] 60)
+  assumes assoc_o: "(x && y) && z <-> x && (y && z)"
+    and notnot_o: "-- (-- x) <-> x"
+begin
+
+definition lor_o (infixl "||" 50) where
+  "x || y <-> --(-- x && -- y)"
+
+end
+
+interpretation x!: logic_o "op &" "Not"
+  where bool_logic_o: "logic_o.lor_o(op &, Not, x, y) <-> x | y"
+proof -
+  show bool_logic_o: "PROP logic_o(op &, Not)" by unfold_locales fast+
+  show "logic_o.lor_o(op &, Not, x, y) <-> x | y"
+    by (unfold logic_o.lor_o_def [OF bool_logic_o]) fast
+qed
+
+thm x.lor_o_def bool_logic_o
+
+lemma lor_triv: "z <-> z" ..
+
+lemma (in logic_o) lor_triv: "x || y <-> x || y" by fast
+
+thm lor_triv [where z = True] (* Check strict prefix. *)
+  x.lor_triv
+
+
+subsection {* Interpretation in proofs *}
 
 lemma True
 proof
--- a/src/FOL/ex/ROOT.ML	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/FOL/ex/ROOT.ML	Mon Jan 05 07:54:16 2009 -0800
@@ -29,6 +29,5 @@
 ];
 
 (*regression test for locales -- sets several global flags!*)
-no_document use_thy "LocaleTest";
 no_document use_thy "NewLocaleTest";
 
--- a/src/FOLP/IFOLP.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/FOLP/IFOLP.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
 (*  Title:      FOLP/IFOLP.thy
-    ID:         $Id$
     Author:     Martin D Coen, Cambridge University Computer Laboratory
     Copyright   1992  University of Cambridge
 *)
@@ -247,7 +246,7 @@
           and concl = discard_proof (Logic.strip_assums_concl prem)
       in
           if exists (fn hyp => hyp aconv concl) hyps
-          then case distinct (op =) (filter (fn hyp => could_unify (hyp, concl)) hyps) of
+          then case distinct (op =) (filter (fn hyp => Term.could_unify (hyp, concl)) hyps) of
                    [_] => assume_tac i
                  |  _  => no_tac
           else no_tac
@@ -340,6 +339,7 @@
   shows "?a : R"
   apply (insert assms(1) [unfolded ex1_def])
   apply (erule exE conjE | assumption | rule assms(1))+
+  apply (erule assms(2), assumption)
   done
 
 
--- a/src/FOLP/simp.ML	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/FOLP/simp.ML	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
-(*  Title:      FOLP/simp
-    ID:         $Id$
+(*  Title:      FOLP/simp.ML
     Author:     Tobias Nipkow
     Copyright   1993  University of Cambridge
 
@@ -156,21 +155,21 @@
 (*ccs contains the names of the constants possessing congruence rules*)
 fun add_hidden_vars ccs =
   let fun add_hvars tm hvars = case tm of
-              Abs(_,_,body) => add_term_vars(body,hvars)
+              Abs(_,_,body) => OldTerm.add_term_vars(body,hvars)
             | _$_ => let val (f,args) = strip_comb tm 
                      in case f of
                             Const(c,T) => 
                                 if member (op =) ccs c
                                 then fold_rev add_hvars args hvars
-                                else add_term_vars (tm, hvars)
-                          | _ => add_term_vars (tm, hvars)
+                                else OldTerm.add_term_vars (tm, hvars)
+                          | _ => OldTerm.add_term_vars (tm, hvars)
                      end
             | _ => hvars;
   in add_hvars end;
 
 fun add_new_asm_vars new_asms =
     let fun itf (tm, at) vars =
-                if at then vars else add_term_vars(tm,vars)
+                if at then vars else OldTerm.add_term_vars(tm,vars)
         fun add_list(tm,al,vars) = let val (_,tml) = strip_comb tm
                 in if length(tml)=length(al)
                    then fold_rev itf (tml ~~ al) vars
@@ -198,7 +197,7 @@
     val hvars = add_new_asm_vars new_asms (rhs,hvars)
     fun it_asms asm hvars =
         if atomic asm then add_new_asm_vars new_asms (asm,hvars)
-        else add_term_frees(asm,hvars)
+        else OldTerm.add_term_frees(asm,hvars)
     val hvars = fold_rev it_asms asms hvars
     val hvs = map (#1 o dest_Var) hvars
     val refl1_tac = refl_tac 1
@@ -542,7 +541,7 @@
 fun find_subst sg T =
 let fun find (thm::thms) =
         let val (Const(_,cT), va, vb) = dest_red(hd(prems_of thm));
-            val [P] = add_term_vars(concl_of thm,[]) \\ [va,vb]
+            val [P] = OldTerm.add_term_vars(concl_of thm,[]) \\ [va,vb]
             val eqT::_ = binder_types cT
         in if Sign.typ_instance sg (T,eqT) then SOME(thm,va,vb,P)
            else find thms
--- a/src/HOL/Algebra/AbelCoset.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/AbelCoset.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/AbelCoset.thy
-  Id:        $Id$
   Author:    Stephan Hohe, TU Muenchen
 *)
 
@@ -52,7 +51,9 @@
   "a_kernel G H h \<equiv> kernel \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>
                               \<lparr>carrier = carrier H, mult = add H, one = zero H\<rparr> h"
 
-locale abelian_group_hom = abelian_group G + abelian_group H + var h +
+locale abelian_group_hom = G: abelian_group G + H: abelian_group H
+    for G (structure) and H (structure) +
+  fixes h
   assumes a_group_hom: "group_hom (| carrier = carrier G, mult = add G, one = zero G |)
                                   (| carrier = carrier H, mult = add H, one = zero H |) h"
 
@@ -180,7 +181,8 @@
 
 subsubsection {* Subgroups *}
 
-locale additive_subgroup = var H + struct G +
+locale additive_subgroup =
+  fixes H and G (structure)
   assumes a_subgroup: "subgroup H \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
 
 lemma (in additive_subgroup) is_additive_subgroup:
@@ -218,7 +220,7 @@
 
 text {* Every subgroup of an @{text "abelian_group"} is normal *}
 
-locale abelian_subgroup = additive_subgroup H G + abelian_group G +
+locale abelian_subgroup = additive_subgroup + abelian_group G +
   assumes a_normal: "normal H \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
 
 lemma (in abelian_subgroup) is_abelian_subgroup:
@@ -230,7 +232,7 @@
       and a_comm: "!!x y. [| x \<in> carrier G; y \<in> carrier G |] ==> x \<oplus>\<^bsub>G\<^esub> y = y \<oplus>\<^bsub>G\<^esub> x"
   shows "abelian_subgroup H G"
 proof -
-  interpret normal ["H" "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"]
+  interpret normal "H" "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
   by (rule a_normal)
 
   show "abelian_subgroup H G"
@@ -243,9 +245,9 @@
       and a_subgroup: "subgroup H \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
   shows "abelian_subgroup H G"
 proof -
-  interpret comm_group ["\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"]
+  interpret comm_group "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
   by (rule a_comm_group)
-  interpret subgroup ["H" "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"]
+  interpret subgroup "H" "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
   by (rule a_subgroup)
 
   show "abelian_subgroup H G"
@@ -538,8 +540,8 @@
                                   (| carrier = carrier H, mult = add H, one = zero H |) h"
   shows "abelian_group_hom G H h"
 proof -
-  interpret G: abelian_group [G] by fact
-  interpret H: abelian_group [H] by fact
+  interpret G!: abelian_group G by fact
+  interpret H!: abelian_group H by fact
   show ?thesis apply (intro abelian_group_hom.intro abelian_group_hom_axioms.intro)
     apply fact
     apply fact
@@ -690,7 +692,7 @@
   assumes carr: "x \<in> carrier G" "x' \<in> carrier G"
   shows "(x' \<in> H +> x) = (x' \<ominus> x \<in> H)"
 proof -
-  interpret G: ring [G] by fact
+  interpret G!: ring G by fact
   from carr
   have "(x' \<in> H +> x) = (x' \<oplus> \<ominus>x \<in> H)" by (rule a_rcos_module)
   with carr
--- a/src/HOL/Algebra/Congruence.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Congruence.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:  Algebra/Congruence.thy
-  Id:     $Id$
   Author: Clemens Ballarin, started 3 January 2008
   Copyright: Clemens Ballarin
 *)
--- a/src/HOL/Algebra/Coset.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Coset.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Algebra/Coset.thy
-    ID:         $Id$
     Author:     Florian Kammueller, with new proofs by L C Paulson, and
                 Stephan Hohe
 *)
@@ -114,7 +113,7 @@
       and repr:  "H #> x = H #> y"
   shows "y \<in> H #> x"
 proof -
-  interpret subgroup [H G] by fact
+  interpret subgroup H G by fact
   show ?thesis  apply (subst repr)
   apply (intro rcos_self)
    apply (rule ycarr)
@@ -129,7 +128,7 @@
     and a': "a' \<in> H #> a"
   shows "a' \<in> carrier G"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   from subset and acarr
   have "H #> a \<subseteq> carrier G" by (rule r_coset_subset_G)
   from this and a'
@@ -142,7 +141,7 @@
   assumes hH: "h \<in> H"
   shows "H #> h = H"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   show ?thesis apply (unfold r_coset_def)
     apply rule
     apply rule
@@ -173,7 +172,7 @@
       and x'cos: "x' \<in> H #> x"
   shows "(x' \<otimes> inv x) \<in> H"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   from xcarr x'cos
       have x'carr: "x' \<in> carrier G"
       by (rule elemrcos_carrier[OF is_group])
@@ -213,7 +212,7 @@
       and xixH: "(x' \<otimes> inv x) \<in> H"
   shows "x' \<in> H #> x"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   from xixH
       have "\<exists>h\<in>H. x' \<otimes> (inv x) = h" by fast
   from this
@@ -244,7 +243,7 @@
   assumes carr: "x \<in> carrier G" "x' \<in> carrier G"
   shows "(x' \<in> H #> x) = (x' \<otimes> inv x \<in> H)"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   show ?thesis proof  assume "x' \<in> H #> x"
     from this and carr
     show "x' \<otimes> inv x \<in> H"
@@ -263,7 +262,7 @@
   assumes XH: "X \<in> rcosets H"
   shows "X \<subseteq> carrier G"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   from XH have "\<exists>x\<in> carrier G. X = H #> x"
       unfolding RCOSETS_def
       by fast
@@ -348,7 +347,7 @@
       and xixH: "(inv x \<otimes> x') \<in> H"
   shows "x' \<in> x <# H"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   from xixH
       have "\<exists>h\<in>H. (inv x) \<otimes> x' = h" by fast
   from this
@@ -600,7 +599,7 @@
    assumes "group G"
    shows "equiv (carrier G) (rcong H)"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   show ?thesis
   proof (intro equiv.intro)
     show "refl (carrier G) (rcong H)"
@@ -647,7 +646,7 @@
   assumes a: "a \<in> carrier G"
   shows "a <# H = rcong H `` {a}"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   show ?thesis by (force simp add: r_congruent_def l_coset_def m_assoc [symmetric] a ) 
 qed
 
@@ -658,7 +657,7 @@
   assumes p: "ha \<otimes> a = h \<otimes> b" "a \<in> carrier G" "b \<in> carrier G" "h \<in> H" "ha \<in> H" "hb \<in> H"
   shows "hb \<otimes> a \<in> (\<Union>h\<in>H. {h \<otimes> b})"
 proof -
-  interpret subgroup [H G] by fact
+  interpret subgroup H G by fact
   from p show ?thesis apply (rule_tac UN_I [of "hb \<otimes> ((inv ha) \<otimes> h)"])
     apply (simp add: )
     apply (simp add: m_assoc transpose_inv)
@@ -670,7 +669,7 @@
   assumes p: "a \<in> rcosets H" "b \<in> rcosets H" "a\<noteq>b"
   shows "a \<inter> b = {}"
 proof -
-  interpret subgroup [H G] by fact
+  interpret subgroup H G by fact
   from p show ?thesis apply (simp add: RCOSETS_def r_coset_def)
     apply (blast intro: rcos_equation prems sym)
     done
@@ -760,7 +759,7 @@
   assumes "subgroup H G"
   shows "\<Union>(rcosets H) = carrier G"
 proof -
-  interpret subgroup [H G] by fact
+  interpret subgroup H G by fact
   show ?thesis
     apply (rule equalityI)
     apply (force simp add: RCOSETS_def r_coset_def)
@@ -847,7 +846,7 @@
   assumes "group G"
   shows "H \<in> rcosets H"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   from _ subgroup_axioms have "H #> \<one> = H"
     by (rule coset_join2) auto
   then show ?thesis
--- a/src/HOL/Algebra/Divisibility.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Divisibility.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     Divisibility in monoids and rings
-  Id:        $Id$
   Author:    Clemens Ballarin, started 18 July 2008
 
 Based on work by Stephan Hohe.
@@ -32,7 +31,7 @@
   "monoid_cancel G"
   ..
 
-interpretation group \<subseteq> monoid_cancel
+sublocale group \<subseteq> monoid_cancel
   proof qed simp+
 
 
@@ -45,7 +44,7 @@
           "\<And>a b c. \<lbrakk>a \<otimes> c = b \<otimes> c; a \<in> carrier G; b \<in> carrier G; c \<in> carrier G\<rbrakk> \<Longrightarrow> a = b"
   shows "comm_monoid_cancel G"
 proof -
-  interpret comm_monoid [G] by fact
+  interpret comm_monoid G by fact
   show "comm_monoid_cancel G"
     apply unfold_locales
     apply (subgoal_tac "a \<otimes> c = b \<otimes> c")
@@ -59,7 +58,7 @@
   "comm_monoid_cancel G"
   by intro_locales
 
-interpretation comm_group \<subseteq> comm_monoid_cancel
+sublocale comm_group \<subseteq> comm_monoid_cancel
   ..
 
 
@@ -755,7 +754,7 @@
 using pf
 unfolding properfactor_lless
 proof -
-  interpret weak_partial_order ["division_rel G"] ..
+  interpret weak_partial_order "division_rel G" ..
   from x'x
        have "x' .=\<^bsub>division_rel G\<^esub> x" by simp
   also assume "x \<sqsubset>\<^bsub>division_rel G\<^esub> y"
@@ -771,7 +770,7 @@
 using pf
 unfolding properfactor_lless
 proof -
-  interpret weak_partial_order ["division_rel G"] ..
+  interpret weak_partial_order "division_rel G" ..
   assume "x \<sqsubset>\<^bsub>division_rel G\<^esub> y"
   also from yy'
        have "y .=\<^bsub>division_rel G\<^esub> y'" by simp
@@ -2916,7 +2915,7 @@
 lemma (in gcd_condition_monoid) division_weak_lower_semilattice [simp]:
   shows "weak_lower_semilattice (division_rel G)"
 proof -
-  interpret weak_partial_order ["division_rel G"] ..
+  interpret weak_partial_order "division_rel G" ..
   show ?thesis
   apply (unfold_locales, simp_all)
   proof -
@@ -2941,7 +2940,7 @@
   shows "a' gcdof b c"
 proof -
   note carr = a'carr carr'
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   have "a' \<in> carrier G \<and> a' gcdof b c"
     apply (simp add: gcdof_greatestLower carr')
     apply (subst greatest_Lower_cong_l[of _ a])
@@ -2958,7 +2957,7 @@
   assumes carr: "a \<in> carrier G"  "b \<in> carrier G"
   shows "somegcd G a b \<in> carrier G"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet[OF carr])
     apply (rule meet_closed[simplified], fact+)
@@ -2969,7 +2968,7 @@
   assumes carr: "a \<in> carrier G"  "b \<in> carrier G"
   shows "(somegcd G a b) gcdof a b"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   from carr
   have "somegcd G a b \<in> carrier G \<and> (somegcd G a b) gcdof a b"
     apply (subst gcdof_greatestLower, simp, simp)
@@ -2983,7 +2982,7 @@
   assumes carr: "a \<in> carrier G"  "b \<in> carrier G"
   shows "\<exists>x\<in>carrier G. x = somegcd G a b"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet[OF carr])
     apply (rule meet_closed[simplified], fact+)
@@ -2994,7 +2993,7 @@
   assumes carr: "a \<in> carrier G"  "b \<in> carrier G"
   shows "(somegcd G a b) divides a"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet[OF carr])
     apply (rule meet_left[simplified], fact+)
@@ -3005,7 +3004,7 @@
   assumes carr: "a \<in> carrier G"  "b \<in> carrier G"
   shows "(somegcd G a b) divides b"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet[OF carr])
     apply (rule meet_right[simplified], fact+)
@@ -3017,7 +3016,7 @@
     and L: "x \<in> carrier G"  "y \<in> carrier G"  "z \<in> carrier G"
   shows "z divides (somegcd G x y)"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet L)
     apply (rule meet_le[simplified], fact+)
@@ -3029,7 +3028,7 @@
     and carr: "x \<in> carrier G"  "x' \<in> carrier G"  "y \<in> carrier G"
   shows "somegcd G x y \<sim> somegcd G x' y"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet carr)
     apply (rule meet_cong_l[simplified], fact+)
@@ -3041,7 +3040,7 @@
     and yy': "y \<sim> y'"
   shows "somegcd G x y \<sim> somegcd G x y'"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: somegcd_meet carr)
     apply (rule meet_cong_r[simplified], fact+)
@@ -3092,7 +3091,7 @@
   assumes "finite A"  "A \<subseteq> carrier G"  "A \<noteq> {}"
   shows "\<exists>x\<in> carrier G. x = SomeGcd G A"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (simp add: SomeGcd_def)
     apply (rule finite_inf_closed[simplified], fact+)
@@ -3103,7 +3102,7 @@
   assumes carr: "a \<in> carrier G"  "b \<in> carrier G"  "c \<in> carrier G"
   shows "somegcd G (somegcd G a b) c \<sim> somegcd G a (somegcd G b c)"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
   show ?thesis
     apply (subst (2 3) somegcd_meet, (simp add: carr)+)
     apply (simp add: somegcd_meet carr)
@@ -3313,7 +3312,7 @@
   qed
 qed
 
-interpretation gcd_condition_monoid \<subseteq> primeness_condition_monoid
+sublocale gcd_condition_monoid \<subseteq> primeness_condition_monoid
   by (rule primeness_condition)
 
 
@@ -3832,7 +3831,7 @@
   with fca fcb show ?thesis by simp
 qed
 
-interpretation factorial_monoid \<subseteq> divisor_chain_condition_monoid
+sublocale factorial_monoid \<subseteq> divisor_chain_condition_monoid
 apply unfold_locales
 apply (rule wfUNIVI)
 apply (rule measure_induct[of "factorcount G"])
@@ -3854,7 +3853,7 @@
   done
 qed
 
-interpretation factorial_monoid \<subseteq> primeness_condition_monoid
+sublocale factorial_monoid \<subseteq> primeness_condition_monoid
   proof qed (rule irreducible_is_prime)
 
 
@@ -3866,13 +3865,13 @@
   shows "gcd_condition_monoid G"
   proof qed (rule gcdof_exists)
 
-interpretation factorial_monoid \<subseteq> gcd_condition_monoid
+sublocale factorial_monoid \<subseteq> gcd_condition_monoid
   proof qed (rule gcdof_exists)
 
 lemma (in factorial_monoid) division_weak_lattice [simp]:
   shows "weak_lattice (division_rel G)"
 proof -
-  interpret weak_lower_semilattice ["division_rel G"] by simp
+  interpret weak_lower_semilattice "division_rel G" by simp
 
   show "weak_lattice (division_rel G)"
   apply (unfold_locales, simp_all)
@@ -3902,14 +3901,14 @@
 proof clarify
   assume dcc: "divisor_chain_condition_monoid G"
      and pc: "primeness_condition_monoid G"
-  interpret divisor_chain_condition_monoid ["G"] by (rule dcc)
-  interpret primeness_condition_monoid ["G"] by (rule pc)
+  interpret divisor_chain_condition_monoid "G" by (rule dcc)
+  interpret primeness_condition_monoid "G" by (rule pc)
 
   show "factorial_monoid G"
       by (fast intro: factorial_monoidI wfactors_exist wfactors_unique)
 next
   assume fm: "factorial_monoid G"
-  interpret factorial_monoid ["G"] by (rule fm)
+  interpret factorial_monoid "G" by (rule fm)
   show "divisor_chain_condition_monoid G \<and> primeness_condition_monoid G"
       by rule unfold_locales
 qed
@@ -3920,13 +3919,13 @@
 proof clarify
     assume dcc: "divisor_chain_condition_monoid G"
      and gc: "gcd_condition_monoid G"
-  interpret divisor_chain_condition_monoid ["G"] by (rule dcc)
-  interpret gcd_condition_monoid ["G"] by (rule gc)
+  interpret divisor_chain_condition_monoid "G" by (rule dcc)
+  interpret gcd_condition_monoid "G" by (rule gc)
   show "factorial_monoid G"
       by (simp add: factorial_condition_one[symmetric], rule, unfold_locales)
 next
   assume fm: "factorial_monoid G"
-  interpret factorial_monoid ["G"] by (rule fm)
+  interpret factorial_monoid "G" by (rule fm)
   show "divisor_chain_condition_monoid G \<and> gcd_condition_monoid G"
       by rule unfold_locales
 qed
--- a/src/HOL/Algebra/FiniteProduct.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/FiniteProduct.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Algebra/FiniteProduct.thy
-    ID:         $Id$
     Author:     Clemens Ballarin, started 19 November 2002
 
 This file is largely based on HOL/Finite_Set.thy.
@@ -519,9 +518,9 @@
 lemma finprod_singleton:
   assumes i_in_A: "i \<in> A" and fin_A: "finite A" and f_Pi: "f \<in> A \<rightarrow> carrier G"
   shows "(\<Otimes>j\<in>A. if i = j then f j else \<one>) = f i"
-  using i_in_A G.finprod_insert [of "A - {i}" i "(\<lambda>j. if i = j then f j else \<one>)"]
-    fin_A f_Pi G.finprod_one [of "A - {i}"]
-    G.finprod_cong [of "A - {i}" "A - {i}" "(\<lambda>j. if i = j then f j else \<one>)" "(\<lambda>i. \<one>)"] 
+  using i_in_A finprod_insert [of "A - {i}" i "(\<lambda>j. if i = j then f j else \<one>)"]
+    fin_A f_Pi finprod_one [of "A - {i}"]
+    finprod_cong [of "A - {i}" "A - {i}" "(\<lambda>j. if i = j then f j else \<one>)" "(\<lambda>i. \<one>)"] 
   unfolding Pi_def simp_implies_def by (force simp add: insert_absorb)
 
 end
--- a/src/HOL/Algebra/Group.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Group.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:  HOL/Algebra/Group.thy
-  Id:     $Id$
   Author: Clemens Ballarin, started 4 February 2003
 
 Based on work by Florian Kammueller, L C Paulson and Markus Wenzel.
@@ -425,7 +424,7 @@
   assumes "group G"
   shows "group (G\<lparr>carrier := H\<rparr>)"
 proof -
-  interpret group [G] by fact
+  interpret group G by fact
   show ?thesis
     apply (rule monoid.group_l_invI)
     apply (unfold_locales) [1]
@@ -489,8 +488,8 @@
   assumes "monoid G" and "monoid H"
   shows "monoid (G \<times>\<times> H)"
 proof -
-  interpret G: monoid [G] by fact
-  interpret H: monoid [H] by fact
+  interpret G!: monoid G by fact
+  interpret H!: monoid H by fact
   from assms
   show ?thesis by (unfold monoid_def DirProd_def, auto) 
 qed
@@ -501,8 +500,8 @@
   assumes "group G" and "group H"
   shows "group (G \<times>\<times> H)"
 proof -
-  interpret G: group [G] by fact
-  interpret H: group [H] by fact
+  interpret G!: group G by fact
+  interpret H!: group H by fact
   show ?thesis by (rule groupI)
      (auto intro: G.m_assoc H.m_assoc G.l_inv H.l_inv
            simp add: DirProd_def)
@@ -526,9 +525,9 @@
       and h: "h \<in> carrier H"
   shows "m_inv (G \<times>\<times> H) (g, h) = (inv\<^bsub>G\<^esub> g, inv\<^bsub>H\<^esub> h)"
 proof -
-  interpret G: group [G] by fact
-  interpret H: group [H] by fact
-  interpret Prod: group ["G \<times>\<times> H"]
+  interpret G!: group G by fact
+  interpret H!: group H by fact
+  interpret Prod!: group "G \<times>\<times> H"
     by (auto intro: DirProd_group group.intro group.axioms assms)
   show ?thesis by (simp add: Prod.inv_equality g h)
 qed
@@ -542,15 +541,6 @@
     {h. h \<in> carrier G -> carrier H &
       (\<forall>x \<in> carrier G. \<forall>y \<in> carrier G. h (x \<otimes>\<^bsub>G\<^esub> y) = h x \<otimes>\<^bsub>H\<^esub> h y)}"
 
-lemma hom_mult:
-  "[| h \<in> hom G H; x \<in> carrier G; y \<in> carrier G |]
-   ==> h (x \<otimes>\<^bsub>G\<^esub> y) = h x \<otimes>\<^bsub>H\<^esub> h y"
-  by (simp add: hom_def)
-
-lemma hom_closed:
-  "[| h \<in> hom G H; x \<in> carrier G |] ==> h x \<in> carrier H"
-  by (auto simp add: hom_def funcset_mem)
-
 lemma (in group) hom_compose:
      "[|h \<in> hom G H; i \<in> hom H I|] ==> compose (carrier G) i h \<in> hom G I"
 apply (auto simp add: hom_def funcset_compose) 
@@ -587,10 +577,23 @@
 
 text{*Basis for homomorphism proofs: we assume two groups @{term G} and
   @{term H}, with a homomorphism @{term h} between them*}
-locale group_hom = group G + group H + var h +
+locale group_hom = G: group G + H: group H for G (structure) and H (structure) +
+  fixes h
   assumes homh: "h \<in> hom G H"
-  notes hom_mult [simp] = hom_mult [OF homh]
-    and hom_closed [simp] = hom_closed [OF homh]
+
+lemma (in group_hom) hom_mult [simp]:
+  "[| x \<in> carrier G; y \<in> carrier G |] ==> h (x \<otimes>\<^bsub>G\<^esub> y) = h x \<otimes>\<^bsub>H\<^esub> h y"
+proof -
+  assume "x \<in> carrier G" "y \<in> carrier G"
+  with homh [unfolded hom_def] show ?thesis by simp
+qed
+
+lemma (in group_hom) hom_closed [simp]:
+  "x \<in> carrier G ==> h x \<in> carrier H"
+proof -
+  assume "x \<in> carrier G"
+  with homh [unfolded hom_def] show ?thesis by (auto simp add: funcset_mem)
+qed
 
 lemma (in group_hom) one_closed [simp]:
   "h \<one> \<in> carrier H"
--- a/src/HOL/Algebra/Ideal.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Ideal.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/CIdeal.thy
-  Id:        $Id$
   Author:    Stephan Hohe, TU Muenchen
 *)
 
@@ -14,11 +13,11 @@
 
 subsubsection {* General definition *}
 
-locale ideal = additive_subgroup I R + ring R +
+locale ideal = additive_subgroup I R + ring R for I and R (structure) +
   assumes I_l_closed: "\<lbrakk>a \<in> I; x \<in> carrier R\<rbrakk> \<Longrightarrow> x \<otimes> a \<in> I"
       and I_r_closed: "\<lbrakk>a \<in> I; x \<in> carrier R\<rbrakk> \<Longrightarrow> a \<otimes> x \<in> I"
 
-interpretation ideal \<subseteq> abelian_subgroup I R
+sublocale ideal \<subseteq> abelian_subgroup I R
 apply (intro abelian_subgroupI3 abelian_group.intro)
   apply (rule ideal.axioms, rule ideal_axioms)
  apply (rule abelian_group.axioms, rule ring.axioms, rule ideal.axioms, rule ideal_axioms)
@@ -37,7 +36,7 @@
       and I_r_closed: "\<And>a x. \<lbrakk>a \<in> I; x \<in> carrier R\<rbrakk> \<Longrightarrow> a \<otimes> x \<in> I"
   shows "ideal I R"
 proof -
-  interpret ring [R] by fact
+  interpret ring R by fact
   show ?thesis  apply (intro ideal.intro ideal_axioms.intro additive_subgroupI)
      apply (rule a_subgroup)
     apply (rule is_ring)
@@ -68,7 +67,7 @@
   assumes generate: "\<exists>i \<in> carrier R. I = Idl {i}"
   shows "principalideal I R"
 proof -
-  interpret ideal [I R] by fact
+  interpret ideal I R by fact
   show ?thesis  by (intro principalideal.intro principalideal_axioms.intro) (rule is_ideal, rule generate)
 qed
 
@@ -89,7 +88,7 @@
      and I_maximal: "\<And>J. \<lbrakk>ideal J R; I \<subseteq> J; J \<subseteq> carrier R\<rbrakk> \<Longrightarrow> J = I \<or> J = carrier R"
   shows "maximalideal I R"
 proof -
-  interpret ideal [I R] by fact
+  interpret ideal I R by fact
   show ?thesis by (intro maximalideal.intro maximalideal_axioms.intro)
     (rule is_ideal, rule I_notcarr, rule I_maximal)
 qed
@@ -112,8 +111,8 @@
       and I_prime: "\<And>a b. \<lbrakk>a \<in> carrier R; b \<in> carrier R; a \<otimes> b \<in> I\<rbrakk> \<Longrightarrow> a \<in> I \<or> b \<in> I"
   shows "primeideal I R"
 proof -
-  interpret ideal [I R] by fact
-  interpret cring [R] by fact
+  interpret ideal I R by fact
+  interpret cring R by fact
   show ?thesis by (intro primeideal.intro primeideal_axioms.intro)
     (rule is_ideal, rule is_cring, rule I_notcarr, rule I_prime)
 qed
@@ -128,8 +127,8 @@
       and I_prime: "\<And>a b. \<lbrakk>a \<in> carrier R; b \<in> carrier R; a \<otimes> b \<in> I\<rbrakk> \<Longrightarrow> a \<in> I \<or> b \<in> I"
   shows "primeideal I R"
 proof -
-  interpret additive_subgroup [I R] by fact
-  interpret cring [R] by fact
+  interpret additive_subgroup I R by fact
+  interpret cring R by fact
   show ?thesis apply (intro_locales)
     apply (intro ideal_axioms.intro)
     apply (erule (1) I_l_closed)
@@ -207,8 +206,8 @@
   assumes "ideal J R"
   shows "ideal (I \<inter> J) R"
 proof -
-  interpret ideal [I R] by fact
-  interpret ideal [J R] by fact
+  interpret ideal I R by fact
+  interpret ideal J R by fact
   show ?thesis
 apply (intro idealI subgroup.intro)
       apply (rule is_ring)
@@ -245,7 +244,7 @@
   from notempty have "\<exists>I0. I0 \<in> S" by blast
   from this obtain I0 where I0S: "I0 \<in> S" by auto
 
-  interpret ideal ["I0" "R"] by (rule Sideals[OF I0S])
+  interpret ideal I0 R by (rule Sideals[OF I0S])
 
   from xI[OF I0S] have "x \<in> I0" .
   from this and a_subset show "x \<in> carrier R" by fast
@@ -258,13 +257,13 @@
 
   fix J
   assume JS: "J \<in> S"
-  interpret ideal ["J" "R"] by (rule Sideals[OF JS])
+  interpret ideal J R by (rule Sideals[OF JS])
   from xI[OF JS] and yI[OF JS]
       show "x \<oplus> y \<in> J" by (rule a_closed)
 next
   fix J
   assume JS: "J \<in> S"
-  interpret ideal ["J" "R"] by (rule Sideals[OF JS])
+  interpret ideal J R by (rule Sideals[OF JS])
   show "\<zero> \<in> J" by simp
 next
   fix x
@@ -273,7 +272,7 @@
 
   fix J
   assume JS: "J \<in> S"
-  interpret ideal ["J" "R"] by (rule Sideals[OF JS])
+  interpret ideal J R by (rule Sideals[OF JS])
 
   from xI[OF JS]
       show "\<ominus> x \<in> J" by (rule a_inv_closed)
@@ -285,7 +284,7 @@
 
   fix J
   assume JS: "J \<in> S"
-  interpret ideal ["J" "R"] by (rule Sideals[OF JS])
+  interpret ideal J R by (rule Sideals[OF JS])
 
   from xI[OF JS] and ycarr
       show "y \<otimes> x \<in> J" by (rule I_l_closed)
@@ -297,7 +296,7 @@
 
   fix J
   assume JS: "J \<in> S"
-  interpret ideal ["J" "R"] by (rule Sideals[OF JS])
+  interpret ideal J R by (rule Sideals[OF JS])
 
   from xI[OF JS] and ycarr
       show "x \<otimes> y \<in> J" by (rule I_r_closed)
@@ -443,7 +442,7 @@
 lemma (in ring) genideal_one:
   "Idl {\<one>} = carrier R"
 proof -
-  interpret ideal ["Idl {\<one>}" "R"] by (rule genideal_ideal, fast intro: one_closed)
+  interpret ideal "Idl {\<one>}" "R" by (rule genideal_ideal, fast intro: one_closed)
   show "Idl {\<one>} = carrier R"
   apply (rule, rule a_subset)
   apply (simp add: one_imp_carrier genideal_self')
@@ -533,7 +532,7 @@
   assumes aJ: "a \<in> J"
   shows "PIdl a \<subseteq> J"
 proof -
-  interpret ideal [J R] by fact
+  interpret ideal J R by fact
   show ?thesis unfolding cgenideal_def
     apply rule
     apply clarify
@@ -580,7 +579,7 @@
   shows "Idl (I \<union> J) = I <+> J"
 apply rule
  apply (rule ring.genideal_minimal)
-   apply (rule R.is_ring)
+   apply (rule is_ring)
   apply (rule add_ideals[OF idealI idealJ])
  apply (rule)
  apply (simp add: set_add_defs) apply (elim disjE) defer 1 defer 1
@@ -660,7 +659,7 @@
   assumes "cring R"
   shows "\<exists>x\<in>I. I = carrier R #> x"
 proof -
-  interpret cring [R] by fact
+  interpret cring R by fact
   from generate
       obtain i
         where icarr: "i \<in> carrier R"
@@ -693,7 +692,7 @@
   assumes notprime: "\<not> primeideal I R"
   shows "carrier R = I \<or> (\<exists>a b. a \<in> carrier R \<and> b \<in> carrier R \<and> a \<otimes> b \<in> I \<and> a \<notin> I \<and> b \<notin> I)"
 proof (rule ccontr, clarsimp)
-  interpret cring [R] by fact
+  interpret cring R by fact
   assume InR: "carrier R \<noteq> I"
      and "\<forall>a. a \<in> carrier R \<longrightarrow> (\<forall>b. a \<otimes> b \<in> I \<longrightarrow> b \<in> carrier R \<longrightarrow> a \<in> I \<or> b \<in> I)"
   hence I_prime: "\<And> a b. \<lbrakk>a \<in> carrier R; b \<in> carrier R; a \<otimes> b \<in> I\<rbrakk> \<Longrightarrow> a \<in> I \<or> b \<in> I" by simp
@@ -713,7 +712,7 @@
   obtains "carrier R = I"
     | "\<exists>a b. a \<in> carrier R \<and> b \<in> carrier R \<and> a \<otimes> b \<in> I \<and> a \<notin> I \<and> b \<notin> I"
 proof -
-  interpret R: cring [R] by fact
+  interpret R!: cring R by fact
   assume "carrier R = I ==> thesis"
     and "\<exists>a b. a \<in> carrier R \<and> b \<in> carrier R \<and> a \<otimes> b \<in> I \<and> a \<notin> I \<and> b \<notin> I \<Longrightarrow> thesis"
   then show thesis using primeidealCD [OF R.is_cring notprime] by blast
@@ -726,13 +725,13 @@
 apply (rule domain.intro, rule is_cring)
 apply (rule domain_axioms.intro)
 proof (rule ccontr, simp)
-  interpret primeideal ["{\<zero>}" "R"] by (rule pi)
+  interpret primeideal "{\<zero>}" "R" by (rule pi)
   assume "\<one> = \<zero>"
   hence "carrier R = {\<zero>}" by (rule one_zeroD)
   from this[symmetric] and I_notcarr
       show "False" by simp
 next
-  interpret primeideal ["{\<zero>}" "R"] by (rule pi)
+  interpret primeideal "{\<zero>}" "R" by (rule pi)
   fix a b
   assume ab: "a \<otimes> b = \<zero>"
      and carr: "a \<in> carrier R" "b \<in> carrier R"
@@ -771,7 +770,7 @@
   assumes acarr: "a \<in> carrier R"
   shows "ideal {x\<in>carrier R. a \<otimes> x \<in> I} R"
 proof -
-  interpret cring [R] by fact
+  interpret cring R by fact
   show ?thesis apply (rule idealI)
     apply (rule cring.axioms[OF is_cring])
     apply (rule subgroup.intro)
@@ -812,7 +811,7 @@
   assumes "maximalideal I R"
   shows "primeideal I R"
 proof -
-  interpret maximalideal [I R] by fact
+  interpret maximalideal I R by fact
   show ?thesis apply (rule ccontr)
     apply (rule primeidealCE)
     apply (rule is_cring)
@@ -830,7 +829,7 @@
       by fast
     def J \<equiv> "{x\<in>carrier R. a \<otimes> x \<in> I}"
     
-    from R.is_cring and acarr
+    from is_cring and acarr
     have idealJ: "ideal J R" unfolding J_def by (rule helper_max_prime)
     
     have IsubJ: "I \<subseteq> J"
@@ -855,7 +854,7 @@
     have "\<one> \<notin> J" unfolding J_def by fast
     hence Jncarr: "J \<noteq> carrier R" by fast
     
-    interpret ideal ["J" "R"] by (rule idealJ)
+    interpret ideal J R by (rule idealJ)
     
     have "J = I \<or> J = carrier R"
       apply (intro I_maximal)
@@ -932,7 +931,7 @@
   fix I
   assume a: "I \<in> {I. ideal I R}"
   with this
-      interpret ideal ["I" "R"] by simp
+      interpret ideal I R by simp
 
   show "I \<in> {{\<zero>}, carrier R}"
   proof (cases "\<exists>a. a \<in> I - {\<zero>}")
@@ -1019,7 +1018,7 @@
   fix J
   assume Jn0: "J \<noteq> {\<zero>}"
      and idealJ: "ideal J R"
-  interpret ideal ["J" "R"] by (rule idealJ)
+  interpret ideal J R by (rule idealJ)
   have "{\<zero>} \<subseteq> J" by (rule ccontr, simp)
   from zeromax and idealJ and this and a_subset
       have "J = {\<zero>} \<or> J = carrier R" by (rule maximalideal.I_maximal)
--- a/src/HOL/Algebra/IntRing.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/IntRing.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/IntRing.thy
-  Id:        $Id$
   Author:    Stephan Hohe, TU Muenchen
 *)
 
@@ -97,7 +96,7 @@
   interpretation needs to be done as early as possible --- that is,
   with as few assumptions as possible. *}
 
-interpretation int: monoid ["\<Z>"]
+interpretation int!: monoid \<Z>
   where "carrier \<Z> = UNIV"
     and "mult \<Z> x y = x * y"
     and "one \<Z> = 1"
@@ -105,7 +104,7 @@
 proof -
   -- "Specification"
   show "monoid \<Z>" proof qed (auto simp: int_ring_def)
-  then interpret int: monoid ["\<Z>"] .
+  then interpret int!: monoid \<Z> .
 
   -- "Carrier"
   show "carrier \<Z> = UNIV" by (simp add: int_ring_def)
@@ -117,12 +116,12 @@
   show "pow \<Z> x n = x^n" by (induct n) (simp, simp add: int_ring_def)+
 qed
 
-interpretation int: comm_monoid ["\<Z>"]
+interpretation int!: comm_monoid \<Z>
   where "finprod \<Z> f A = (if finite A then setprod f A else undefined)"
 proof -
   -- "Specification"
   show "comm_monoid \<Z>" proof qed (auto simp: int_ring_def)
-  then interpret int: comm_monoid ["\<Z>"] .
+  then interpret int!: comm_monoid \<Z> .
 
   -- "Operations"
   { fix x y have "mult \<Z> x y = x * y" by (simp add: int_ring_def) }
@@ -140,14 +139,14 @@
   qed
 qed
 
-interpretation int: abelian_monoid ["\<Z>"]
+interpretation int!: abelian_monoid \<Z>
   where "zero \<Z> = 0"
     and "add \<Z> x y = x + y"
     and "finsum \<Z> f A = (if finite A then setsum f A else undefined)"
 proof -
   -- "Specification"
   show "abelian_monoid \<Z>" proof qed (auto simp: int_ring_def)
-  then interpret int: abelian_monoid ["\<Z>"] .
+  then interpret int!: abelian_monoid \<Z> .
 
   -- "Operations"
   { fix x y show "add \<Z> x y = x + y" by (simp add: int_ring_def) }
@@ -165,7 +164,7 @@
   qed
 qed
 
-interpretation int: abelian_group ["\<Z>"]
+interpretation int!: abelian_group \<Z>
   where "a_inv \<Z> x = - x"
     and "a_minus \<Z> x y = x - y"
 proof -
@@ -175,7 +174,7 @@
     show "!!x. x \<in> carrier \<Z> ==> EX y : carrier \<Z>. y \<oplus>\<^bsub>\<Z>\<^esub> x = \<zero>\<^bsub>\<Z>\<^esub>"
       by (simp add: int_ring_def) arith
   qed (auto simp: int_ring_def)
-  then interpret int: abelian_group ["\<Z>"] .
+  then interpret int!: abelian_group \<Z> .
 
   -- "Operations"
   { fix x y have "add \<Z> x y = x + y" by (simp add: int_ring_def) }
@@ -188,7 +187,7 @@
   show "a_minus \<Z> x y = x - y" by (simp add: int.minus_eq add a_inv)
 qed
 
-interpretation int: "domain" ["\<Z>"]
+interpretation int!: "domain" \<Z>
   proof qed (auto simp: int_ring_def left_distrib right_distrib)
 
 
@@ -204,8 +203,8 @@
   "(True ==> PROP R) == PROP R"
   by simp_all
 
-interpretation int (* FIXME [unfolded UNIV] *) :
-  partial_order ["(| carrier = UNIV::int set, eq = op =, le = op \<le> |)"]
+interpretation int! (* FIXME [unfolded UNIV] *) :
+  partial_order "(| carrier = UNIV::int set, eq = op =, le = op \<le> |)"
   where "carrier (| carrier = UNIV::int set, eq = op =, le = op \<le> |) = UNIV"
     and "le (| carrier = UNIV::int set, eq = op =, le = op \<le> |) x y = (x \<le> y)"
     and "lless (| carrier = UNIV::int set, eq = op =, le = op \<le> |) x y = (x < y)"
@@ -220,8 +219,8 @@
     by (simp add: lless_def) auto
 qed
 
-interpretation int (* FIXME [unfolded UNIV] *) :
-  lattice ["(| carrier = UNIV::int set, eq = op =, le = op \<le> |)"]
+interpretation int! (* FIXME [unfolded UNIV] *) :
+  lattice "(| carrier = UNIV::int set, eq = op =, le = op \<le> |)"
   where "join (| carrier = UNIV::int set, eq = op =, le = op \<le> |) x y = max x y"
     and "meet (| carrier = UNIV::int set, eq = op =, le = op \<le> |) x y = min x y"
 proof -
@@ -233,7 +232,7 @@
     apply (simp add: greatest_def Lower_def)
     apply arith
     done
-  then interpret int: lattice ["?Z"] .
+  then interpret int!: lattice "?Z" .
   show "join ?Z x y = max x y"
     apply (rule int.joinI)
     apply (simp_all add: least_def Upper_def)
@@ -246,8 +245,8 @@
     done
 qed
 
-interpretation int (* [unfolded UNIV] *) :
-  total_order ["(| carrier = UNIV::int set, eq = op =, le = op \<le> |)"]
+interpretation int! (* [unfolded UNIV] *) :
+  total_order "(| carrier = UNIV::int set, eq = op =, le = op \<le> |)"
   proof qed clarsimp
 
 
@@ -329,7 +328,7 @@
 next
   assume "UNIV = {uu. EX x. uu = x * p}"
   from this obtain x 
-      where "1 = x * p" by fast
+      where "1 = x * p" by best
   from this [symmetric]
       have "p * x = 1" by (subst zmult_commute)
   hence "\<bar>p * x\<bar> = 1" by simp
@@ -404,7 +403,7 @@
   assumes zmods: "ZMod m a = ZMod m b"
   shows "a mod m = b mod m"
 proof -
-  interpret ideal ["Idl\<^bsub>\<Z>\<^esub> {m}" \<Z>] by (rule int.genideal_ideal, fast)
+  interpret ideal "Idl\<^bsub>\<Z>\<^esub> {m}" \<Z> by (rule int.genideal_ideal, fast)
   from zmods
       have "b \<in> ZMod m a"
       unfolding ZMod_def
@@ -428,7 +427,7 @@
 lemma ZMod_mod:
   shows "ZMod m a = ZMod m (a mod m)"
 proof -
-  interpret ideal ["Idl\<^bsub>\<Z>\<^esub> {m}" \<Z>] by (rule int.genideal_ideal, fast)
+  interpret ideal "Idl\<^bsub>\<Z>\<^esub> {m}" \<Z> by (rule int.genideal_ideal, fast)
   show ?thesis
       unfolding ZMod_def
   apply (rule a_repr_independence'[symmetric])
--- a/src/HOL/Algebra/Lattice.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Lattice.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/Lattice.thy
-  Id:        $Id$
   Author:    Clemens Ballarin, started 7 November 2003
   Copyright: Clemens Ballarin
 
@@ -16,7 +15,7 @@
 record 'a gorder = "'a eq_object" +
   le :: "['a, 'a] => bool" (infixl "\<sqsubseteq>\<index>" 50)
 
-locale weak_partial_order = equivalence L +
+locale weak_partial_order = equivalence L for L (structure) +
   assumes le_refl [intro, simp]:
       "x \<in> carrier L ==> x \<sqsubseteq> x"
     and weak_le_anti_sym [intro]:
@@ -920,7 +919,7 @@
 
 text {* Total orders are lattices. *}
 
-interpretation weak_total_order < weak_lattice
+sublocale weak_total_order < weak: weak_lattice
 proof
   fix x y
   assume L: "x \<in> carrier L"  "y \<in> carrier L"
@@ -1126,14 +1125,14 @@
   assumes sup_of_two_exists:
     "[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. least L s (Upper L {x, y})"
 
-interpretation upper_semilattice < weak_upper_semilattice
+sublocale upper_semilattice < weak: weak_upper_semilattice
   proof qed (rule sup_of_two_exists)
 
 locale lower_semilattice = partial_order +
   assumes inf_of_two_exists:
     "[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. greatest L s (Lower L {x, y})"
 
-interpretation lower_semilattice < weak_lower_semilattice
+sublocale lower_semilattice < weak: weak_lower_semilattice
   proof qed (rule inf_of_two_exists)
 
 locale lattice = upper_semilattice + lower_semilattice
@@ -1184,7 +1183,7 @@
 locale total_order = partial_order +
   assumes total_order_total: "[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> y | y \<sqsubseteq> x"
 
-interpretation total_order < weak_total_order
+sublocale total_order < weak: weak_total_order
   proof qed (rule total_order_total)
 
 text {* Introduction rule: the usual definition of total order *}
@@ -1196,7 +1195,7 @@
 
 text {* Total orders are lattices. *}
 
-interpretation total_order < lattice
+sublocale total_order < weak: lattice
   proof qed (auto intro: sup_of_two_exists inf_of_two_exists)
 
 
@@ -1208,7 +1207,7 @@
     and inf_exists:
     "[| A \<subseteq> carrier L |] ==> EX i. greatest L i (Lower L A)"
 
-interpretation complete_lattice < weak_complete_lattice
+sublocale complete_lattice < weak: weak_complete_lattice
   proof qed (auto intro: sup_exists inf_exists)
 
 text {* Introduction rule: the usual definition of complete lattice *}
--- a/src/HOL/Algebra/Module.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Module.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Algebra/Module.thy
-    ID:         $Id$
     Author:     Clemens Ballarin, started 15 April 2003
     Copyright:  Clemens Ballarin
 *)
@@ -14,7 +13,7 @@
 record ('a, 'b) module = "'b ring" +
   smult :: "['a, 'b] => 'b" (infixl "\<odot>\<index>" 70)
 
-locale module = cring R + abelian_group M +
+locale module = R: cring + M: abelian_group M for M (structure) +
   assumes smult_closed [simp, intro]:
       "[| a \<in> carrier R; x \<in> carrier M |] ==> a \<odot>\<^bsub>M\<^esub> x \<in> carrier M"
     and smult_l_distr:
@@ -29,7 +28,7 @@
     and smult_one [simp]:
       "x \<in> carrier M ==> \<one> \<odot>\<^bsub>M\<^esub> x = x"
 
-locale algebra = module R M + cring M +
+locale algebra = module + cring M +
   assumes smult_assoc2:
       "[| a \<in> carrier R; x \<in> carrier M; y \<in> carrier M |] ==>
       (a \<odot>\<^bsub>M\<^esub> x) \<otimes>\<^bsub>M\<^esub> y = a \<odot>\<^bsub>M\<^esub> (x \<otimes>\<^bsub>M\<^esub> y)"
--- a/src/HOL/Algebra/QuotRing.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/QuotRing.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/QuotRing.thy
-  Id:        $Id$
   Author:    Stephan Hohe
 *)
 
@@ -158,7 +157,7 @@
   assumes "cring R"
   shows "cring (R Quot I)"
 proof -
-  interpret cring [R] by fact
+  interpret cring R by fact
   show ?thesis apply (intro cring.intro comm_monoid.intro comm_monoid_axioms.intro)
   apply (rule quotient_is_ring)
  apply (rule ring.axioms[OF quotient_is_ring])
@@ -173,12 +172,12 @@
   assumes "cring R"
   shows "ring_hom_cring R (R Quot I) (op +> I)"
 proof -
-  interpret cring [R] by fact
+  interpret cring R by fact
   show ?thesis apply (rule ring_hom_cringI)
   apply (rule rcos_ring_hom_ring)
- apply (rule R.is_cring)
+ apply (rule is_cring)
 apply (rule quotient_is_cring)
-apply (rule R.is_cring)
+apply (rule is_cring)
 done
 qed
 
@@ -244,7 +243,7 @@
   assumes "cring R"
   shows "field (R Quot I)"
 proof -
-  interpret cring [R] by fact
+  interpret cring R by fact
   show ?thesis apply (intro cring.cring_fieldI2)
   apply (rule quotient_is_cring, rule is_cring)
  defer 1
--- a/src/HOL/Algebra/Ring.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/Ring.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     The algebraic hierarchy of rings
-  Id:        $Id$
   Author:    Clemens Ballarin, started 9 December 1996
   Copyright: Clemens Ballarin
 *)
@@ -187,7 +186,7 @@
   assumes cg: "comm_group \<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
   shows "abelian_group G"
 proof -
-  interpret comm_group ["\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"]
+  interpret comm_group "\<lparr>carrier = carrier G, mult = add G, one = zero G\<rparr>"
     by (rule cg)
   show "abelian_group G" ..
 qed
@@ -360,7 +359,7 @@
 
 subsection {* Rings: Basic Definitions *}
 
-locale ring = abelian_group R + monoid R +
+locale ring = abelian_group R + monoid R for R (structure) +
   assumes l_distr: "[| x \<in> carrier R; y \<in> carrier R; z \<in> carrier R |]
       ==> (x \<oplus> y) \<otimes> z = x \<otimes> z \<oplus> y \<otimes> z"
     and r_distr: "[| x \<in> carrier R; y \<in> carrier R; z \<in> carrier R |]
@@ -585,8 +584,8 @@
   assumes RS: "a \<in> carrier R" "b \<in> carrier R" "c \<in> carrier S" "d \<in> carrier S"
   shows "a \<oplus> \<ominus> (a \<oplus> \<ominus> b) = b & c \<otimes>\<^bsub>S\<^esub> d = d \<otimes>\<^bsub>S\<^esub> c"
 proof -
-  interpret ring [R] by fact
-  interpret cring [S] by fact
+  interpret ring R by fact
+  interpret cring S by fact
 ML_val {* Algebra.print_structures @{context} *}
   from RS show ?thesis by algebra
 qed
@@ -597,7 +596,7 @@
   assumes R: "a \<in> carrier R" "b \<in> carrier R"
   shows "a \<ominus> (a \<ominus> b) = b"
 proof -
-  interpret ring [R] by fact
+  interpret ring R by fact
   from R show ?thesis by algebra
 qed
 
@@ -771,7 +770,8 @@
   shows "h \<in> ring_hom R S ==> h \<one> = \<one>\<^bsub>S\<^esub>"
   by (simp add: ring_hom_def)
 
-locale ring_hom_cring = cring R + cring S +
+locale ring_hom_cring = R: cring R + S: cring S
+    for R (structure) and S (structure) +
   fixes h
   assumes homh [simp, intro]: "h \<in> ring_hom R S"
   notes hom_closed [simp, intro] = ring_hom_closed [OF homh]
--- a/src/HOL/Algebra/RingHom.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/RingHom.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/RingHom.thy
-  Id:        $Id$
   Author:    Stephan Hohe, TU Muenchen
 *)
 
@@ -11,15 +10,17 @@
 section {* Homomorphisms of Non-Commutative Rings *}
 
 text {* Lifting existing lemmas in a @{text ring_hom_ring} locale *}
-locale ring_hom_ring = ring R + ring S + var h +
+locale ring_hom_ring = R: ring R + S: ring S
+    for R (structure) and S (structure) +
+  fixes h
   assumes homh: "h \<in> ring_hom R S"
   notes hom_mult [simp] = ring_hom_mult [OF homh]
     and hom_one [simp] = ring_hom_one [OF homh]
 
-interpretation ring_hom_cring \<subseteq> ring_hom_ring
+sublocale ring_hom_cring \<subseteq> ring: ring_hom_ring
   proof qed (rule homh)
 
-interpretation ring_hom_ring \<subseteq> abelian_group_hom R S
+sublocale ring_hom_ring \<subseteq> abelian_group: abelian_group_hom R S
 apply (rule abelian_group_homI)
   apply (rule R.is_abelian_group)
  apply (rule S.is_abelian_group)
@@ -44,8 +45,8 @@
       and compatible_one: "h \<one> = \<one>\<^bsub>S\<^esub>"
   shows "ring_hom_ring R S h"
 proof -
-  interpret ring [R] by fact
-  interpret ring [S] by fact
+  interpret ring R by fact
+  interpret ring S by fact
   show ?thesis apply unfold_locales
 apply (unfold ring_hom_def, safe)
    apply (simp add: hom_closed Pi_def)
@@ -60,8 +61,8 @@
   assumes h: "h \<in> ring_hom R S"
   shows "ring_hom_ring R S h"
 proof -
-  interpret R: ring [R] by fact
-  interpret S: ring [S] by fact
+  interpret R!: ring R by fact
+  interpret S!: ring S by fact
   show ?thesis apply (intro ring_hom_ring.intro ring_hom_ring_axioms.intro)
     apply (rule R.is_ring)
     apply (rule S.is_ring)
@@ -76,9 +77,9 @@
       and compatible_one: "h \<one> = \<one>\<^bsub>S\<^esub>"
   shows "ring_hom_ring R S h"
 proof -
-  interpret abelian_group_hom [R S h] by fact
-  interpret R: ring [R] by fact
-  interpret S: ring [S] by fact
+  interpret abelian_group_hom R S h by fact
+  interpret R!: ring R by fact
+  interpret S!: ring S by fact
   show ?thesis apply (intro ring_hom_ring.intro ring_hom_ring_axioms.intro, rule R.is_ring, rule S.is_ring)
     apply (insert group_hom.homh[OF a_group_hom])
     apply (unfold hom_def ring_hom_def, simp)
@@ -92,9 +93,9 @@
   assumes "ring_hom_ring R S h" "cring R" "cring S"
   shows "ring_hom_cring R S h"
 proof -
-  interpret ring_hom_ring [R S h] by fact
-  interpret R: cring [R] by fact
-  interpret S: cring [S] by fact
+  interpret ring_hom_ring R S h by fact
+  interpret R!: cring R by fact
+  interpret S!: cring S by fact
   show ?thesis by (intro ring_hom_cring.intro ring_hom_cring_axioms.intro)
     (rule R.is_cring, rule S.is_cring, rule homh)
 qed
@@ -124,7 +125,7 @@
       and xrcos: "x \<in> a_kernel R S h +> a"
   shows "h x = h a"
 proof -
-  interpret ideal ["a_kernel R S h" "R"] by (rule kernel_is_ideal)
+  interpret ideal "a_kernel R S h" "R" by (rule kernel_is_ideal)
 
   from xrcos
       have "\<exists>i \<in> a_kernel R S h. x = i \<oplus> a" by (simp add: a_r_coset_defs)
@@ -152,7 +153,7 @@
       and hx: "h x = h a"
   shows "x \<in> a_kernel R S h +> a"
 proof -
-  interpret ideal ["a_kernel R S h" "R"] by (rule kernel_is_ideal)
+  interpret ideal "a_kernel R S h" "R" by (rule kernel_is_ideal)
  
   note carr = acarr xcarr
   note hcarr = acarr[THEN hom_closed] xcarr[THEN hom_closed]
@@ -180,7 +181,7 @@
 apply rule defer 1
 apply clarsimp defer 1
 proof
-  interpret ideal ["a_kernel R S h" "R"] by (rule kernel_is_ideal)
+  interpret ideal "a_kernel R S h" "R" by (rule kernel_is_ideal)
 
   fix x
   assume xrcos: "x \<in> a_kernel R S h +> a"
@@ -193,7 +194,7 @@
   from xcarr and this
       show "x \<in> {x \<in> carrier R. h x = h a}" by fast
 next
-  interpret ideal ["a_kernel R S h" "R"] by (rule kernel_is_ideal)
+  interpret ideal "a_kernel R S h" "R" by (rule kernel_is_ideal)
 
   fix x
   assume xcarr: "x \<in> carrier R"
--- a/src/HOL/Algebra/UnivPoly.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/UnivPoly.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,5 @@
 (*
   Title:     HOL/Algebra/UnivPoly.thy
-  Id:        $Id$
   Author:    Clemens Ballarin, started 9 December 1996
   Copyright: Clemens Ballarin
 
@@ -176,17 +175,17 @@
   fixes R (structure) and P (structure)
   defines P_def: "P == UP R"
 
-locale UP_ring = UP + ring R
+locale UP_ring = UP + R: ring R
 
-locale UP_cring = UP + cring R
+locale UP_cring = UP + R: cring R
 
-interpretation UP_cring < UP_ring
-  by (rule P_def) intro_locales
+sublocale UP_cring < UP_ring
+  by intro_locales [1] (rule P_def)
 
-locale UP_domain = UP + "domain" R
+locale UP_domain = UP + R: "domain" R
 
-interpretation UP_domain < UP_cring
-  by (rule P_def) intro_locales
+sublocale UP_domain < UP_cring
+  by intro_locales [1] (rule P_def)
 
 context UP
 begin
@@ -458,8 +457,8 @@
 
 end
 
-interpretation UP_ring < ring P using UP_ring .
-interpretation UP_cring < cring P using UP_cring .
+sublocale UP_ring < P: ring P using UP_ring .
+sublocale UP_cring < P: cring P using UP_cring .
 
 
 subsection {* Polynomials Form an Algebra *}
@@ -508,7 +507,7 @@
   "algebra R P" by (auto intro!: algebraI R.cring UP_cring UP_smult_l_distr UP_smult_r_distr
     UP_smult_assoc1 UP_smult_assoc2)
 
-interpretation UP_cring < algebra R P using UP_algebra .
+sublocale UP_cring < algebra R P using UP_algebra .
 
 
 subsection {* Further Lemmas Involving Monomials *}
@@ -1085,7 +1084,7 @@
   Interpretation of theorems from @{term domain}.
 *}
 
-interpretation UP_domain < "domain" P
+sublocale UP_domain < "domain" P
   by intro_locales (rule domain.axioms UP_domain)+
 
 
@@ -1204,7 +1203,9 @@
 
 text {* The universal property of the polynomial ring *}
 
-locale UP_pre_univ_prop = ring_hom_cring R S h + UP_cring R P
+locale UP_pre_univ_prop = ring_hom_cring + UP_cring
+
+(* FIXME print_locale ring_hom_cring fails *)
 
 locale UP_univ_prop = UP_pre_univ_prop +
   fixes s and Eval
@@ -1350,7 +1351,7 @@
 
 text {* Interpretation of ring homomorphism lemmas. *}
 
-interpretation UP_univ_prop < ring_hom_cring P S Eval
+sublocale UP_univ_prop < ring_hom_cring P S Eval
   apply (unfold Eval_def)
   apply intro_locales
   apply (rule ring_hom_cring.axioms)
@@ -1391,7 +1392,7 @@
   assumes R: "r \<in> carrier R" and S: "s \<in> carrier S"
   shows "eval R S h s (monom P r n) = h r \<otimes>\<^bsub>S\<^esub> s (^)\<^bsub>S\<^esub> n"
 proof -
-  interpret UP_univ_prop [R S h P s _]
+  interpret UP_univ_prop R S h P s "eval R S h s"
     using UP_pre_univ_prop_axioms P_def R S
     by (auto intro: UP_univ_prop.intro UP_univ_prop_axioms.intro)
   from R
@@ -1428,8 +1429,8 @@
     and P: "p \<in> carrier P" and S: "s \<in> carrier S"
   shows "Phi p = Psi p"
 proof -
-  interpret ring_hom_cring [P S Phi] by fact
-  interpret ring_hom_cring [P S Psi] by fact
+  interpret ring_hom_cring P S Phi by fact
+  interpret ring_hom_cring P S Psi by fact
   have "Phi p =
       Phi (\<Oplus>\<^bsub>P \<^esub>i \<in> {..deg R p}. monom P (coeff P p i) 0 \<otimes>\<^bsub>P\<^esub> monom P \<one> 1 (^)\<^bsub>P\<^esub> i)"
     by (simp add: up_repr P monom_mult [THEN sym] monom_pow del: monom_mult)
@@ -1772,17 +1773,17 @@
   shows "eval R R id a (monom P \<one>\<^bsub>R\<^esub> 1 \<ominus>\<^bsub>P\<^esub> monom P a 0) = \<zero>"
   (is "eval R R id a ?g = _")
 proof -
-  interpret UP_pre_univ_prop [R R id P] proof qed simp
+  interpret UP_pre_univ_prop R R id proof qed simp
   have eval_ring_hom: "eval R R id a \<in> ring_hom P R" using eval_ring_hom [OF a] by simp
-  interpret ring_hom_cring [P R "eval R R id a"] proof qed (simp add: eval_ring_hom)
+  interpret ring_hom_cring P R "eval R R id a" proof qed (simp add: eval_ring_hom)
   have mon1_closed: "monom P \<one>\<^bsub>R\<^esub> 1 \<in> carrier P" 
     and mon0_closed: "monom P a 0 \<in> carrier P" 
     and min_mon0_closed: "\<ominus>\<^bsub>P\<^esub> monom P a 0 \<in> carrier P"
     using a R.a_inv_closed by auto
   have "eval R R id a ?g = eval R R id a (monom P \<one> 1) \<ominus> eval R R id a (monom P a 0)"
     unfolding P.minus_eq [OF mon1_closed mon0_closed]
-    unfolding R_S_h.hom_add [OF mon1_closed min_mon0_closed]
-    unfolding R_S_h.hom_a_inv [OF mon0_closed] 
+    unfolding hom_add [OF mon1_closed min_mon0_closed]
+    unfolding hom_a_inv [OF mon0_closed] 
     using R.minus_eq [symmetric] mon1_closed mon0_closed by auto
   also have "\<dots> = a \<ominus> a"
     using eval_monom [OF R.one_closed a, of 1] using eval_monom [OF a a, of 0] using a by simp
@@ -1819,7 +1820,7 @@
     and deg_r_0: "deg R r = 0"
     shows "r = monom P (eval R R id a f) 0"
 proof -
-  interpret UP_pre_univ_prop [R R id P] proof qed simp
+  interpret UP_pre_univ_prop R R id P proof qed simp
   have eval_ring_hom: "eval R R id a \<in> ring_hom P R"
     using eval_ring_hom [OF a] by simp
   have "eval R R id a f = eval R R id a ?gq \<oplus>\<^bsub>R\<^esub> eval R R id a r"
@@ -1885,7 +1886,7 @@
   "UP INTEG"} globally.
 *}
 
-interpretation INTEG: UP_pre_univ_prop [INTEG INTEG id]
+interpretation INTEG!: UP_pre_univ_prop INTEG INTEG id "UP INTEG"
   using INTEG_id_eval by simp_all
 
 lemma INTEG_closed [intro, simp]:
--- a/src/HOL/Algebra/abstract/Ring2.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/abstract/Ring2.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,8 +1,7 @@
-(*
-  Title:     The algebraic hierarchy of rings as axiomatic classes
-  Id:        $Id$
-  Author:    Clemens Ballarin, started 9 December 1996
-  Copyright: Clemens Ballarin
+(*  Title:     HOL/Algebra/abstract/Ring2.thy
+    Author:    Clemens Ballarin
+
+The algebraic hierarchy of rings as axiomatic classes.
 *)
 
 header {* The algebraic hierarchy of rings as type classes *}
@@ -211,7 +210,7 @@
         @{const_name HOL.minus}, @{const_name HOL.one}, @{const_name HOL.times}]
   | ring_ord _ = ~1;
 
-fun termless_ring (a, b) = (Term.term_lpo ring_ord (a, b) = LESS);
+fun termless_ring (a, b) = (TermOrd.term_lpo ring_ord (a, b) = LESS);
 
 val ring_ss = HOL_basic_ss settermless termless_ring addsimps
   [thm "a_assoc", thm "l_zero", thm "l_neg", thm "a_comm", thm "m_assoc",
--- a/src/HOL/Algebra/ringsimp.ML	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Algebra/ringsimp.ML	Mon Jan 05 07:54:16 2009 -0800
@@ -1,6 +1,4 @@
-(*
-  Id:        $Id$
-  Author:    Clemens Ballarin
+(*  Author:    Clemens Ballarin
 
 Normalisation method for locales ring and cring.
 *)
@@ -48,7 +46,7 @@
     val ops = map (fst o Term.strip_comb) ts;
     fun ord (Const (a, _)) = find_index (fn (Const (b, _)) => a=b | _ => false) ops
       | ord (Free (a, _)) = find_index (fn (Free (b, _)) => a=b | _ => false) ops;
-    fun less (a, b) = (Term.term_lpo ord (a, b) = LESS);
+    fun less (a, b) = (TermOrd.term_lpo ord (a, b) = LESS);
   in asm_full_simp_tac (HOL_ss settermless less addsimps simps) end;
 
 fun algebra_tac ctxt =
--- a/src/HOL/Bali/AxExample.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Bali/AxExample.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -41,7 +41,7 @@
 
 ML {*
 fun inst1_tac ctxt s t st =
-  case AList.lookup (op =) (rev (Term.add_varnames (Thm.prop_of st) [])) s of
+  case AList.lookup (op =) (rev (Term.add_var_names (Thm.prop_of st) [])) s of
   SOME i => instantiate_tac ctxt [((s, i), t)] st | NONE => Seq.empty;
 
 val ax_tac =
--- a/src/HOL/Complex.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Complex.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -345,13 +345,13 @@
 
 subsection {* Completeness of the Complexes *}
 
-interpretation Re: bounded_linear ["Re"]
+interpretation Re!: bounded_linear "Re"
 apply (unfold_locales, simp, simp)
 apply (rule_tac x=1 in exI)
 apply (simp add: complex_norm_def)
 done
 
-interpretation Im: bounded_linear ["Im"]
+interpretation Im!: bounded_linear "Im"
 apply (unfold_locales, simp, simp)
 apply (rule_tac x=1 in exI)
 apply (simp add: complex_norm_def)
@@ -513,7 +513,7 @@
 lemma complex_mod_mult_cnj: "cmod (z * cnj z) = (cmod z)\<twosuperior>"
 by (simp add: norm_mult power2_eq_square)
 
-interpretation cnj: bounded_linear ["cnj"]
+interpretation cnj!: bounded_linear "cnj"
 apply (unfold_locales)
 apply (rule complex_cnj_add)
 apply (rule complex_cnj_scaleR)
--- a/src/HOL/Complex/Fundamental_Theorem_Algebra.thy	Mon Dec 29 11:04:27 2008 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1329 +0,0 @@
-(*  Title:       Fundamental_Theorem_Algebra.thy
-    Author:      Amine Chaieb
-*)
-
-header{*Fundamental Theorem of Algebra*}
-
-theory Fundamental_Theorem_Algebra
-imports "~~/src/HOL/Univ_Poly" "~~/src/HOL/Library/Dense_Linear_Order" "~~/src/HOL/Complex"
-begin
-
-subsection {* Square root of complex numbers *}
-definition csqrt :: "complex \<Rightarrow> complex" where
-"csqrt z = (if Im z = 0 then
-            if 0 \<le> Re z then Complex (sqrt(Re z)) 0
-            else Complex 0 (sqrt(- Re z))
-           else Complex (sqrt((cmod z + Re z) /2))
-                        ((Im z / abs(Im z)) * sqrt((cmod z - Re z) /2)))"
-
-lemma csqrt[algebra]: "csqrt z ^ 2 = z"
-proof-
-  obtain x y where xy: "z = Complex x y" by (cases z, simp_all)
-  {assume y0: "y = 0"
-    {assume x0: "x \<ge> 0" 
-      then have ?thesis using y0 xy real_sqrt_pow2[OF x0]
-	by (simp add: csqrt_def power2_eq_square)}
-    moreover
-    {assume "\<not> x \<ge> 0" hence x0: "- x \<ge> 0" by arith
-      then have ?thesis using y0 xy real_sqrt_pow2[OF x0] 
-	by (simp add: csqrt_def power2_eq_square) }
-    ultimately have ?thesis by blast}
-  moreover
-  {assume y0: "y\<noteq>0"
-    {fix x y
-      let ?z = "Complex x y"
-      from abs_Re_le_cmod[of ?z] have tha: "abs x \<le> cmod ?z" by auto
-      hence "cmod ?z - x \<ge> 0" "cmod ?z + x \<ge> 0" by arith+ 
-      hence "(sqrt (x * x + y * y) + x) / 2 \<ge> 0" "(sqrt (x * x + y * y) - x) / 2 \<ge> 0" by (simp_all add: power2_eq_square) }
-    note th = this
-    have sq4: "\<And>x::real. x^2 / 4 = (x / 2) ^ 2" 
-      by (simp add: power2_eq_square) 
-    from th[of x y]
-    have sq4': "sqrt (((sqrt (x * x + y * y) + x)^2 / 4)) = (sqrt (x * x + y * y) + x) / 2" "sqrt (((sqrt (x * x + y * y) - x)^2 / 4)) = (sqrt (x * x + y * y) - x) / 2" unfolding sq4 by simp_all
-    then have th1: "sqrt ((sqrt (x * x + y * y) + x) * (sqrt (x * x + y * y) + x) / 4) - sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) - x) / 4) = x"
-      unfolding power2_eq_square by simp 
-    have "sqrt 4 = sqrt (2^2)" by simp 
-    hence sqrt4: "sqrt 4 = 2" by (simp only: real_sqrt_abs)
-    have th2: "2 *(y * sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) + x) / 4)) / \<bar>y\<bar> = y"
-      using iffD2[OF real_sqrt_pow2_iff sum_power2_ge_zero[of x y]] y0
-      unfolding power2_eq_square 
-      by (simp add: ring_simps real_sqrt_divide sqrt4)
-     from y0 xy have ?thesis  apply (simp add: csqrt_def power2_eq_square)
-       apply (simp add: real_sqrt_sum_squares_mult_ge_zero[of x y] real_sqrt_pow2[OF th(1)[of x y], unfolded power2_eq_square] real_sqrt_pow2[OF th(2)[of x y], unfolded power2_eq_square] real_sqrt_mult[symmetric])
-      using th1 th2  ..}
-  ultimately show ?thesis by blast
-qed
-
-
-subsection{* More lemmas about module of complex numbers *}
-
-lemma complex_of_real_power: "complex_of_real x ^ n = complex_of_real (x^n)"
-  by (rule of_real_power [symmetric])
-
-lemma real_down2: "(0::real) < d1 \<Longrightarrow> 0 < d2 ==> EX e. 0 < e & e < d1 & e < d2"
-  apply ferrack apply arith done
-
-text{* The triangle inequality for cmod *}
-lemma complex_mod_triangle_sub: "cmod w \<le> cmod (w + z) + norm z"
-  using complex_mod_triangle_ineq2[of "w + z" "-z"] by auto
-
-subsection{* Basic lemmas about complex polynomials *}
-
-lemma poly_bound_exists:
-  shows "\<exists>m. m > 0 \<and> (\<forall>z. cmod z <= r \<longrightarrow> cmod (poly p z) \<le> m)"
-proof(induct p)
-  case Nil thus ?case by (rule exI[where x=1], simp) 
-next
-  case (Cons c cs)
-  from Cons.hyps obtain m where m: "\<forall>z. cmod z \<le> r \<longrightarrow> cmod (poly cs z) \<le> m"
-    by blast
-  let ?k = " 1 + cmod c + \<bar>r * m\<bar>"
-  have kp: "?k > 0" using abs_ge_zero[of "r*m"] norm_ge_zero[of c] by arith
-  {fix z
-    assume H: "cmod z \<le> r"
-    from m H have th: "cmod (poly cs z) \<le> m" by blast
-    from H have rp: "r \<ge> 0" using norm_ge_zero[of z] by arith
-    have "cmod (poly (c # cs) z) \<le> cmod c + cmod (z* poly cs z)"
-      using norm_triangle_ineq[of c "z* poly cs z"] by simp
-    also have "\<dots> \<le> cmod c + r*m" using mult_mono[OF H th rp norm_ge_zero[of "poly cs z"]] by (simp add: norm_mult)
-    also have "\<dots> \<le> ?k" by simp
-    finally have "cmod (poly (c # cs) z) \<le> ?k" .}
-  with kp show ?case by blast
-qed
-
-
-text{* Offsetting the variable in a polynomial gives another of same degree *}
-  (* FIXME : Lemma holds also in locale --- fix it later *)
-lemma  poly_offset_lemma:
-  shows "\<exists>b q. (length q = length p) \<and> (\<forall>x. poly (b#q) (x::complex) = (a + x) * poly p x)"
-proof(induct p)
-  case Nil thus ?case by simp
-next
-  case (Cons c cs)
-  from Cons.hyps obtain b q where 
-    bq: "length q = length cs" "\<forall>x. poly (b # q) x = (a + x) * poly cs x"
-    by blast
-  let ?b = "a*c"
-  let ?q = "(b+c)#q"
-  have lg: "length ?q = length (c#cs)" using bq(1) by simp
-  {fix x
-    from bq(2)[rule_format, of x]
-    have "x*poly (b # q) x = x*((a + x) * poly cs x)" by simp
-    hence "poly (?b# ?q) x = (a + x) * poly (c # cs) x"
-      by (simp add: ring_simps)}
-  with lg  show ?case by blast 
-qed
-
-    (* FIXME : This one too*)
-lemma poly_offset: "\<exists> q. length q = length p \<and> (\<forall>x. poly q (x::complex) = poly p (a + x))"
-proof (induct p)
-  case Nil thus ?case by simp
-next
-  case (Cons c cs)
-  from Cons.hyps obtain q where q: "length q = length cs" "\<forall>x. poly q x = poly cs (a + x)" by blast
-  from poly_offset_lemma[of q a] obtain b p where 
-    bp: "length p = length q" "\<forall>x. poly (b # p) x = (a + x) * poly q x"
-    by blast
-  thus ?case using q bp by - (rule exI[where x="(c + b)#p"], simp)
-qed
-
-text{* An alternative useful formulation of completeness of the reals *}
-lemma real_sup_exists: assumes ex: "\<exists>x. P x" and bz: "\<exists>z. \<forall>x. P x \<longrightarrow> x < z"
-  shows "\<exists>(s::real). \<forall>y. (\<exists>x. P x \<and> y < x) \<longleftrightarrow> y < s"
-proof-
-  from ex bz obtain x Y where x: "P x" and Y: "\<And>x. P x \<Longrightarrow> x < Y"  by blast
-  from ex have thx:"\<exists>x. x \<in> Collect P" by blast
-  from bz have thY: "\<exists>Y. isUb UNIV (Collect P) Y" 
-    by(auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def order_le_less)
-  from reals_complete[OF thx thY] obtain L where L: "isLub UNIV (Collect P) L"
-    by blast
-  from Y[OF x] have xY: "x < Y" .
-  from L have L': "\<forall>x. P x \<longrightarrow> x \<le> L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def)  
-  from Y have Y': "\<forall>x. P x \<longrightarrow> x \<le> Y" 
-    apply (clarsimp, atomize (full)) by auto 
-  from L Y' have "L \<le> Y" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def)
-  {fix y
-    {fix z assume z: "P z" "y < z"
-      from L' z have "y < L" by auto }
-    moreover
-    {assume yL: "y < L" "\<forall>z. P z \<longrightarrow> \<not> y < z"
-      hence nox: "\<forall>z. P z \<longrightarrow> y \<ge> z" by auto
-      from nox L have "y \<ge> L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) 
-      with yL(1) have False  by arith}
-    ultimately have "(\<exists>x. P x \<and> y < x) \<longleftrightarrow> y < L" by blast}
-  thus ?thesis by blast
-qed
-
-
-subsection{* Some theorems about Sequences*}
-text{* Given a binary function @{text "f:: nat \<Rightarrow> 'a \<Rightarrow> 'a"}, its values are uniquely determined by a function g *}
-
-lemma num_Axiom: "EX! g. g 0 = e \<and> (\<forall>n. g (Suc n) = f n (g n))"
-  unfolding Ex1_def
-  apply (rule_tac x="nat_rec e f" in exI)
-  apply (rule conjI)+
-apply (rule def_nat_rec_0, simp)
-apply (rule allI, rule def_nat_rec_Suc, simp)
-apply (rule allI, rule impI, rule ext)
-apply (erule conjE)
-apply (induct_tac x)
-apply (simp add: nat_rec_0)
-apply (erule_tac x="n" in allE)
-apply (simp)
-done
-
- text{* An equivalent formulation of monotony -- Not used here, but might be useful *}
-lemma mono_Suc: "mono f = (\<forall>n. (f n :: 'a :: order) \<le> f (Suc n))"
-unfolding mono_def
-proof auto
-  fix A B :: nat
-  assume H: "\<forall>n. f n \<le> f (Suc n)" "A \<le> B"
-  hence "\<exists>k. B = A + k" apply -  apply (thin_tac "\<forall>n. f n \<le> f (Suc n)") 
-    by presburger
-  then obtain k where k: "B = A + k" by blast
-  {fix a k
-    have "f a \<le> f (a + k)"
-    proof (induct k)
-      case 0 thus ?case by simp
-    next
-      case (Suc k)
-      from Suc.hyps H(1)[rule_format, of "a + k"] show ?case by simp
-    qed}
-  with k show "f A \<le> f B" by blast
-qed
-
-text{* for any sequence, there is a mootonic subsequence *}
-lemma seq_monosub: "\<exists>f. subseq f \<and> monoseq (\<lambda> n. (s (f n)))"
-proof-
-  {assume H: "\<forall>n. \<exists>p >n. \<forall> m\<ge>p. s m \<le> s p"
-    let ?P = "\<lambda> p n. p > n \<and> (\<forall>m \<ge> p. s m \<le> s p)"
-    from num_Axiom[of "SOME p. ?P p 0" "\<lambda>p n. SOME p. ?P p n"]
-    obtain f where f: "f 0 = (SOME p. ?P p 0)" "\<forall>n. f (Suc n) = (SOME p. ?P p (f n))" by blast
-    have "?P (f 0) 0"  unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p 0"]
-      using H apply - 
-      apply (erule allE[where x=0], erule exE, rule_tac x="p" in exI) 
-      unfolding order_le_less by blast 
-    hence f0: "f 0 > 0" "\<forall>m \<ge> f 0. s m \<le> s (f 0)" by blast+
-    {fix n
-      have "?P (f (Suc n)) (f n)" 
-	unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
-	using H apply - 
-      apply (erule allE[where x="f n"], erule exE, rule_tac x="p" in exI) 
-      unfolding order_le_less by blast 
-    hence "f (Suc n) > f n" "\<forall>m \<ge> f (Suc n). s m \<le> s (f (Suc n))" by blast+}
-  note fSuc = this
-    {fix p q assume pq: "p \<ge> f q"
-      have "s p \<le> s(f(q))"  using f0(2)[rule_format, of p] pq fSuc
-	by (cases q, simp_all) }
-    note pqth = this
-    {fix q
-      have "f (Suc q) > f q" apply (induct q) 
-	using f0(1) fSuc(1)[of 0] apply simp by (rule fSuc(1))}
-    note fss = this
-    from fss have th1: "subseq f" unfolding subseq_Suc_iff ..
-    {fix a b 
-      have "f a \<le> f (a + b)"
-      proof(induct b)
-	case 0 thus ?case by simp
-      next
-	case (Suc b)
-	from fSuc(1)[of "a + b"] Suc.hyps show ?case by simp
-      qed}
-    note fmon0 = this
-    have "monoseq (\<lambda>n. s (f n))" 
-    proof-
-      {fix n
-	have "s (f n) \<ge> s (f (Suc n))" 
-	proof(cases n)
-	  case 0
-	  assume n0: "n = 0"
-	  from fSuc(1)[of 0] have th0: "f 0 \<le> f (Suc 0)" by simp
-	  from f0(2)[rule_format, OF th0] show ?thesis  using n0 by simp
-	next
-	  case (Suc m)
-	  assume m: "n = Suc m"
-	  from fSuc(1)[of n] m have th0: "f (Suc m) \<le> f (Suc (Suc m))" by simp
-	  from m fSuc(2)[rule_format, OF th0] show ?thesis by simp 
-	qed}
-      thus "monoseq (\<lambda>n. s (f n))" unfolding monoseq_Suc by blast 
-    qed
-    with th1 have ?thesis by blast}
-  moreover
-  {fix N assume N: "\<forall>p >N. \<exists> m\<ge>p. s m > s p"
-    {fix p assume p: "p \<ge> Suc N" 
-      hence pN: "p > N" by arith with N obtain m where m: "m \<ge> p" "s m > s p" by blast
-      have "m \<noteq> p" using m(2) by auto 
-      with m have "\<exists>m>p. s p < s m" by - (rule exI[where x=m], auto)}
-    note th0 = this
-    let ?P = "\<lambda>m x. m > x \<and> s x < s m"
-    from num_Axiom[of "SOME x. ?P x (Suc N)" "\<lambda>m x. SOME y. ?P y x"]
-    obtain f where f: "f 0 = (SOME x. ?P x (Suc N))" 
-      "\<forall>n. f (Suc n) = (SOME m. ?P m (f n))" by blast
-    have "?P (f 0) (Suc N)"  unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p (Suc N)"]
-      using N apply - 
-      apply (erule allE[where x="Suc N"], clarsimp)
-      apply (rule_tac x="m" in exI)
-      apply auto
-      apply (subgoal_tac "Suc N \<noteq> m")
-      apply simp
-      apply (rule ccontr, simp)
-      done
-    hence f0: "f 0 > Suc N" "s (Suc N) < s (f 0)" by blast+
-    {fix n
-      have "f n > N \<and> ?P (f (Suc n)) (f n)"
-	unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
-      proof (induct n)
-	case 0 thus ?case
-	  using f0 N apply auto 
-	  apply (erule allE[where x="f 0"], clarsimp) 
-	  apply (rule_tac x="m" in exI, simp)
-	  by (subgoal_tac "f 0 \<noteq> m", auto)
-      next
-	case (Suc n)
-	from Suc.hyps have Nfn: "N < f n" by blast
-	from Suc.hyps obtain m where m: "m > f n" "s (f n) < s m" by blast
-	with Nfn have mN: "m > N" by arith
-	note key = Suc.hyps[unfolded some_eq_ex[of "\<lambda>p. ?P p (f n)", symmetric] f(2)[rule_format, of n, symmetric]]
-	
-	from key have th0: "f (Suc n) > N" by simp
-	from N[rule_format, OF th0]
-	obtain m' where m': "m' \<ge> f (Suc n)" "s (f (Suc n)) < s m'" by blast
-	have "m' \<noteq> f (Suc (n))" apply (rule ccontr) using m'(2) by auto
-	hence "m' > f (Suc n)" using m'(1) by simp
-	with key m'(2) show ?case by auto
-      qed}
-    note fSuc = this
-    {fix n
-      have "f n \<ge> Suc N \<and> f(Suc n) > f n \<and> s(f n) < s(f(Suc n))" using fSuc[of n] by auto 
-      hence "f n \<ge> Suc N" "f(Suc n) > f n" "s(f n) < s(f(Suc n))" by blast+}
-    note thf = this
-    have sqf: "subseq f" unfolding subseq_Suc_iff using thf by simp
-    have "monoseq (\<lambda>n. s (f n))"  unfolding monoseq_Suc using thf
-      apply -
-      apply (rule disjI1)
-      apply auto
-      apply (rule order_less_imp_le)
-      apply blast
-      done
-    then have ?thesis  using sqf by blast}
-  ultimately show ?thesis unfolding linorder_not_less[symmetric] by blast
-qed
-
-lemma seq_suble: assumes sf: "subseq f" shows "n \<le> f n"
-proof(induct n)
-  case 0 thus ?case by simp
-next
-  case (Suc n)
-  from sf[unfolded subseq_Suc_iff, rule_format, of n] Suc.hyps
-  have "n < f (Suc n)" by arith 
-  thus ?case by arith
-qed
-
-subsection {* Fundamental theorem of algebra *}
-lemma  unimodular_reduce_norm:
-  assumes md: "cmod z = 1"
-  shows "cmod (z + 1) < 1 \<or> cmod (z - 1) < 1 \<or> cmod (z + ii) < 1 \<or> cmod (z - ii) < 1"
-proof-
-  obtain x y where z: "z = Complex x y " by (cases z, auto)
-  from md z have xy: "x^2 + y^2 = 1" by (simp add: cmod_def)
-  {assume C: "cmod (z + 1) \<ge> 1" "cmod (z - 1) \<ge> 1" "cmod (z + ii) \<ge> 1" "cmod (z - ii) \<ge> 1"
-    from C z xy have "2*x \<le> 1" "2*x \<ge> -1" "2*y \<le> 1" "2*y \<ge> -1"
-      by (simp_all add: cmod_def power2_eq_square ring_simps)
-    hence "abs (2*x) \<le> 1" "abs (2*y) \<le> 1" by simp_all
-    hence "(abs (2 * x))^2 <= 1^2" "(abs (2 * y)) ^2 <= 1^2"
-      by - (rule power_mono, simp, simp)+
-    hence th0: "4*x^2 \<le> 1" "4*y^2 \<le> 1" 
-      by (simp_all  add: power2_abs power_mult_distrib)
-    from add_mono[OF th0] xy have False by simp }
-  thus ?thesis unfolding linorder_not_le[symmetric] by blast
-qed
-
-text{* Hence we can always reduce modulus of @{text "1 + b z^n"} if nonzero *}
-lemma reduce_poly_simple:
- assumes b: "b \<noteq> 0" and n: "n\<noteq>0"
-  shows "\<exists>z. cmod (1 + b * z^n) < 1"
-using n
-proof(induct n rule: nat_less_induct)
-  fix n
-  assume IH: "\<forall>m<n. m \<noteq> 0 \<longrightarrow> (\<exists>z. cmod (1 + b * z ^ m) < 1)" and n: "n \<noteq> 0"
-  let ?P = "\<lambda>z n. cmod (1 + b * z ^ n) < 1"
-  {assume e: "even n"
-    hence "\<exists>m. n = 2*m" by presburger
-    then obtain m where m: "n = 2*m" by blast
-    from n m have "m\<noteq>0" "m < n" by presburger+
-    with IH[rule_format, of m] obtain z where z: "?P z m" by blast
-    from z have "?P (csqrt z) n" by (simp add: m power_mult csqrt)
-    hence "\<exists>z. ?P z n" ..}
-  moreover
-  {assume o: "odd n"
-    from b have b': "b^2 \<noteq> 0" unfolding power2_eq_square by simp
-    have "Im (inverse b) * (Im (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) +
-    Re (inverse b) * (Re (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) = 
-    ((Re (inverse b))^2 + (Im (inverse b))^2) * \<bar>Im b * Im b + Re b * Re b\<bar>" by algebra
-    also have "\<dots> = cmod (inverse b) ^2 * cmod b ^ 2" 
-      apply (simp add: cmod_def) using realpow_two_le_add_order[of "Re b" "Im b"]
-      by (simp add: power2_eq_square)
-    finally 
-    have th0: "Im (inverse b) * (Im (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) +
-    Re (inverse b) * (Re (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) =
-    1" 
-      apply (simp add: power2_eq_square norm_mult[symmetric] norm_inverse[symmetric])
-      using right_inverse[OF b']
-      by (simp add: power2_eq_square[symmetric] power_inverse[symmetric] ring_simps)
-    have th0: "cmod (complex_of_real (cmod b) / b) = 1"
-      apply (simp add: complex_Re_mult cmod_def power2_eq_square Re_complex_of_real Im_complex_of_real divide_inverse ring_simps )
-      by (simp add: real_sqrt_mult[symmetric] th0)        
-    from o have "\<exists>m. n = Suc (2*m)" by presburger+
-    then obtain m where m: "n = Suc (2*m)" by blast
-    from unimodular_reduce_norm[OF th0] o
-    have "\<exists>v. cmod (complex_of_real (cmod b) / b + v^n) < 1"
-      apply (cases "cmod (complex_of_real (cmod b) / b + 1) < 1", rule_tac x="1" in exI, simp)
-      apply (cases "cmod (complex_of_real (cmod b) / b - 1) < 1", rule_tac x="-1" in exI, simp add: diff_def)
-      apply (cases "cmod (complex_of_real (cmod b) / b + ii) < 1")
-      apply (cases "even m", rule_tac x="ii" in exI, simp add: m power_mult)
-      apply (rule_tac x="- ii" in exI, simp add: m power_mult)
-      apply (cases "even m", rule_tac x="- ii" in exI, simp add: m power_mult diff_def)
-      apply (rule_tac x="ii" in exI, simp add: m power_mult diff_def)
-      done
-    then obtain v where v: "cmod (complex_of_real (cmod b) / b + v^n) < 1" by blast
-    let ?w = "v / complex_of_real (root n (cmod b))"
-    from odd_real_root_pow[OF o, of "cmod b"]
-    have th1: "?w ^ n = v^n / complex_of_real (cmod b)" 
-      by (simp add: power_divide complex_of_real_power)
-    have th2:"cmod (complex_of_real (cmod b) / b) = 1" using b by (simp add: norm_divide)
-    hence th3: "cmod (complex_of_real (cmod b) / b) \<ge> 0" by simp
-    have th4: "cmod (complex_of_real (cmod b) / b) *
-   cmod (1 + b * (v ^ n / complex_of_real (cmod b)))
-   < cmod (complex_of_real (cmod b) / b) * 1"
-      apply (simp only: norm_mult[symmetric] right_distrib)
-      using b v by (simp add: th2)
-
-    from mult_less_imp_less_left[OF th4 th3]
-    have "?P ?w n" unfolding th1 . 
-    hence "\<exists>z. ?P z n" .. }
-  ultimately show "\<exists>z. ?P z n" by blast
-qed
-
-
-text{* Bolzano-Weierstrass type property for closed disc in complex plane. *}
-
-lemma metric_bound_lemma: "cmod (x - y) <= \<bar>Re x - Re y\<bar> + \<bar>Im x - Im y\<bar>"
-  using real_sqrt_sum_squares_triangle_ineq[of "Re x - Re y" 0 0 "Im x - Im y" ]
-  unfolding cmod_def by simp
-
-lemma bolzano_weierstrass_complex_disc:
-  assumes r: "\<forall>n. cmod (s n) \<le> r"
-  shows "\<exists>f z. subseq f \<and> (\<forall>e >0. \<exists>N. \<forall>n \<ge> N. cmod (s (f n) - z) < e)"
-proof-
-  from seq_monosub[of "Re o s"] 
-  obtain f g where f: "subseq f" "monoseq (\<lambda>n. Re (s (f n)))" 
-    unfolding o_def by blast
-  from seq_monosub[of "Im o s o f"] 
-  obtain g where g: "subseq g" "monoseq (\<lambda>n. Im (s(f(g n))))" unfolding o_def by blast  
-  let ?h = "f o g"
-  from r[rule_format, of 0] have rp: "r \<ge> 0" using norm_ge_zero[of "s 0"] by arith 
-  have th:"\<forall>n. r + 1 \<ge> \<bar> Re (s n)\<bar>" 
-  proof
-    fix n
-    from abs_Re_le_cmod[of "s n"] r[rule_format, of n]  show "\<bar>Re (s n)\<bar> \<le> r + 1" by arith
-  qed
-  have conv1: "convergent (\<lambda>n. Re (s ( f n)))"
-    apply (rule Bseq_monoseq_convergent)
-    apply (simp add: Bseq_def)
-    apply (rule exI[where x= "r + 1"])
-    using th rp apply simp
-    using f(2) .
-  have th:"\<forall>n. r + 1 \<ge> \<bar> Im (s n)\<bar>" 
-  proof
-    fix n
-    from abs_Im_le_cmod[of "s n"] r[rule_format, of n]  show "\<bar>Im (s n)\<bar> \<le> r + 1" by arith
-  qed
-
-  have conv2: "convergent (\<lambda>n. Im (s (f (g n))))"
-    apply (rule Bseq_monoseq_convergent)
-    apply (simp add: Bseq_def)
-    apply (rule exI[where x= "r + 1"])
-    using th rp apply simp
-    using g(2) .
-
-  from conv1[unfolded convergent_def] obtain x where "LIMSEQ (\<lambda>n. Re (s (f n))) x" 
-    by blast 
-  hence  x: "\<forall>r>0. \<exists>n0. \<forall>n\<ge>n0. \<bar> Re (s (f n)) - x \<bar> < r" 
-    unfolding LIMSEQ_def real_norm_def .
-
-  from conv2[unfolded convergent_def] obtain y where "LIMSEQ (\<lambda>n. Im (s (f (g n)))) y" 
-    by blast 
-  hence  y: "\<forall>r>0. \<exists>n0. \<forall>n\<ge>n0. \<bar> Im (s (f (g n))) - y \<bar> < r" 
-    unfolding LIMSEQ_def real_norm_def .
-  let ?w = "Complex x y"
-  from f(1) g(1) have hs: "subseq ?h" unfolding subseq_def by auto 
-  {fix e assume ep: "e > (0::real)"
-    hence e2: "e/2 > 0" by simp
-    from x[rule_format, OF e2] y[rule_format, OF e2]
-    obtain N1 N2 where N1: "\<forall>n\<ge>N1. \<bar>Re (s (f n)) - x\<bar> < e / 2" and N2: "\<forall>n\<ge>N2. \<bar>Im (s (f (g n))) - y\<bar> < e / 2" by blast
-    {fix n assume nN12: "n \<ge> N1 + N2"
-      hence nN1: "g n \<ge> N1" and nN2: "n \<ge> N2" using seq_suble[OF g(1), of n] by arith+
-      from add_strict_mono[OF N1[rule_format, OF nN1] N2[rule_format, OF nN2]]
-      have "cmod (s (?h n) - ?w) < e" 
-	using metric_bound_lemma[of "s (f (g n))" ?w] by simp }
-    hence "\<exists>N. \<forall>n\<ge>N. cmod (s (?h n) - ?w) < e" by blast }
-  with hs show ?thesis  by blast  
-qed
-
-text{* Polynomial is continuous. *}
-
-lemma poly_cont:
-  assumes ep: "e > 0" 
-  shows "\<exists>d >0. \<forall>w. 0 < cmod (w - z) \<and> cmod (w - z) < d \<longrightarrow> cmod (poly p w - poly p z) < e"
-proof-
-  from poly_offset[of p z] obtain q where q: "length q = length p" "\<And>x. poly q x = poly p (z + x)" by blast
-  {fix w
-    note q(2)[of "w - z", simplified]}
-  note th = this
-  show ?thesis unfolding th[symmetric]
-  proof(induct q)
-    case Nil thus ?case  using ep by auto
-  next
-    case (Cons c cs)
-    from poly_bound_exists[of 1 "cs"] 
-    obtain m where m: "m > 0" "\<And>z. cmod z \<le> 1 \<Longrightarrow> cmod (poly cs z) \<le> m" by blast
-    from ep m(1) have em0: "e/m > 0" by (simp add: field_simps)
-    have one0: "1 > (0::real)"  by arith
-    from real_lbound_gt_zero[OF one0 em0] 
-    obtain d where d: "d >0" "d < 1" "d < e / m" by blast
-    from d(1,3) m(1) have dm: "d*m > 0" "d*m < e" 
-      by (simp_all add: field_simps real_mult_order)
-    show ?case 
-      proof(rule ex_forward[OF real_lbound_gt_zero[OF one0 em0]], clarsimp simp add: norm_mult)
-	fix d w
-	assume H: "d > 0" "d < 1" "d < e/m" "w\<noteq>z" "cmod (w-z) < d"
-	hence d1: "cmod (w-z) \<le> 1" "d \<ge> 0" by simp_all
-	from H(3) m(1) have dme: "d*m < e" by (simp add: field_simps)
-	from H have th: "cmod (w-z) \<le> d" by simp 
-	from mult_mono[OF th m(2)[OF d1(1)] d1(2) norm_ge_zero] dme
-	show "cmod (w - z) * cmod (poly cs (w - z)) < e" by simp
-      qed  
-    qed
-qed
-
-text{* Hence a polynomial attains minimum on a closed disc 
-  in the complex plane. *}
-lemma  poly_minimum_modulus_disc:
-  "\<exists>z. \<forall>w. cmod w \<le> r \<longrightarrow> cmod (poly p z) \<le> cmod (poly p w)"
-proof-
-  {assume "\<not> r \<ge> 0" hence ?thesis unfolding linorder_not_le
-      apply -
-      apply (rule exI[where x=0]) 
-      apply auto
-      apply (subgoal_tac "cmod w < 0")
-      apply simp
-      apply arith
-      done }
-  moreover
-  {assume rp: "r \<ge> 0"
-    from rp have "cmod 0 \<le> r \<and> cmod (poly p 0) = - (- cmod (poly p 0))" by simp 
-    hence mth1: "\<exists>x z. cmod z \<le> r \<and> cmod (poly p z) = - x"  by blast
-    {fix x z
-      assume H: "cmod z \<le> r" "cmod (poly p z) = - x" "\<not>x < 1"
-      hence "- x < 0 " by arith
-      with H(2) norm_ge_zero[of "poly p z"]  have False by simp }
-    then have mth2: "\<exists>z. \<forall>x. (\<exists>z. cmod z \<le> r \<and> cmod (poly p z) = - x) \<longrightarrow> x < z" by blast
-    from real_sup_exists[OF mth1 mth2] obtain s where 
-      s: "\<forall>y. (\<exists>x. (\<exists>z. cmod z \<le> r \<and> cmod (poly p z) = - x) \<and> y < x) \<longleftrightarrow>(y < s)" by blast
-    let ?m = "-s"
-    {fix y
-      from s[rule_format, of "-y"] have 
-    "(\<exists>z x. cmod z \<le> r \<and> -(- cmod (poly p z)) < y) \<longleftrightarrow> ?m < y" 
-	unfolding minus_less_iff[of y ] equation_minus_iff by blast }
-    note s1 = this[unfolded minus_minus]
-    from s1[of ?m] have s1m: "\<And>z x. cmod z \<le> r \<Longrightarrow> cmod (poly p z) \<ge> ?m" 
-      by auto
-    {fix n::nat
-      from s1[rule_format, of "?m + 1/real (Suc n)"] 
-      have "\<exists>z. cmod z \<le> r \<and> cmod (poly p z) < - s + 1 / real (Suc n)"
-	by simp}
-    hence th: "\<forall>n. \<exists>z. cmod z \<le> r \<and> cmod (poly p z) < - s + 1 / real (Suc n)" ..
-    from choice[OF th] obtain g where 
-      g: "\<forall>n. cmod (g n) \<le> r" "\<forall>n. cmod (poly p (g n)) <?m+1 /real(Suc n)" 
-      by blast
-    from bolzano_weierstrass_complex_disc[OF g(1)] 
-    obtain f z where fz: "subseq f" "\<forall>e>0. \<exists>N. \<forall>n\<ge>N. cmod (g (f n) - z) < e"
-      by blast    
-    {fix w 
-      assume wr: "cmod w \<le> r"
-      let ?e = "\<bar>cmod (poly p z) - ?m\<bar>"
-      {assume e: "?e > 0"
-	hence e2: "?e/2 > 0" by simp
-	from poly_cont[OF e2, of z p] obtain d where
-	  d: "d>0" "\<forall>w. 0<cmod (w - z)\<and> cmod(w - z) < d \<longrightarrow> cmod(poly p w - poly p z) < ?e/2" by blast
-	{fix w assume w: "cmod (w - z) < d"
-	  have "cmod(poly p w - poly p z) < ?e / 2"
-	    using d(2)[rule_format, of w] w e by (cases "w=z", simp_all)}
-	note th1 = this
-	
-	from fz(2)[rule_format, OF d(1)] obtain N1 where 
-	  N1: "\<forall>n\<ge>N1. cmod (g (f n) - z) < d" by blast
-	from reals_Archimedean2[of "2/?e"] obtain N2::nat where
-	  N2: "2/?e < real N2" by blast
-	have th2: "cmod(poly p (g(f(N1 + N2))) - poly p z) < ?e/2"
-	  using N1[rule_format, of "N1 + N2"] th1 by simp
-	{fix a b e2 m :: real
-	have "a < e2 \<Longrightarrow> abs(b - m) < e2 \<Longrightarrow> 2 * e2 <= abs(b - m) + a
-          ==> False" by arith}
-      note th0 = this
-      have ath: 
-	"\<And>m x e. m <= x \<Longrightarrow>  x < m + e ==> abs(x - m::real) < e" by arith
-      from s1m[OF g(1)[rule_format]]
-      have th31: "?m \<le> cmod(poly p (g (f (N1 + N2))))" .
-      from seq_suble[OF fz(1), of "N1+N2"]
-      have th00: "real (Suc (N1+N2)) \<le> real (Suc (f (N1+N2)))" by simp
-      have th000: "0 \<le> (1::real)" "(1::real) \<le> 1" "real (Suc (N1+N2)) > 0"  
-	using N2 by auto
-      from frac_le[OF th000 th00] have th00: "?m +1 / real (Suc (f (N1 + N2))) \<le> ?m + 1 / real (Suc (N1 + N2))" by simp
-      from g(2)[rule_format, of "f (N1 + N2)"]
-      have th01:"cmod (poly p (g (f (N1 + N2)))) < - s + 1 / real (Suc (f (N1 + N2)))" .
-      from order_less_le_trans[OF th01 th00]
-      have th32: "cmod(poly p (g (f (N1 + N2)))) < ?m + (1/ real(Suc (N1 + N2)))" .
-      from N2 have "2/?e < real (Suc (N1 + N2))" by arith
-      with e2 less_imp_inverse_less[of "2/?e" "real (Suc (N1 + N2))"]
-      have "?e/2 > 1/ real (Suc (N1 + N2))" by (simp add: inverse_eq_divide)
-      with ath[OF th31 th32]
-      have thc1:"\<bar>cmod(poly p (g (f (N1 + N2)))) - ?m\<bar>< ?e/2" by arith  
-      have ath2: "\<And>(a::real) b c m. \<bar>a - b\<bar> <= c ==> \<bar>b - m\<bar> <= \<bar>a - m\<bar> + c" 
-	by arith
-      have th22: "\<bar>cmod (poly p (g (f (N1 + N2)))) - cmod (poly p z)\<bar>
-\<le> cmod (poly p (g (f (N1 + N2))) - poly p z)" 
-	by (simp add: norm_triangle_ineq3)
-      from ath2[OF th22, of ?m]
-      have thc2: "2*(?e/2) \<le> \<bar>cmod(poly p (g (f (N1 + N2)))) - ?m\<bar> + cmod (poly p (g (f (N1 + N2))) - poly p z)" by simp
-      from th0[OF th2 thc1 thc2] have False .}
-      hence "?e = 0" by auto
-      then have "cmod (poly p z) = ?m" by simp  
-      with s1m[OF wr]
-      have "cmod (poly p z) \<le> cmod (poly p w)" by simp }
-    hence ?thesis by blast}
-  ultimately show ?thesis by blast
-qed
-
-lemma "(rcis (sqrt (abs r)) (a/2)) ^ 2 = rcis (abs r) a"
-  unfolding power2_eq_square
-  apply (simp add: rcis_mult)
-  apply (simp add: power2_eq_square[symmetric])
-  done
-
-lemma cispi: "cis pi = -1" 
-  unfolding cis_def
-  by simp
-
-lemma "(rcis (sqrt (abs r)) ((pi + a)/2)) ^ 2 = rcis (- abs r) a"
-  unfolding power2_eq_square
-  apply (simp add: rcis_mult add_divide_distrib)
-  apply (simp add: power2_eq_square[symmetric] rcis_def cispi cis_mult[symmetric])
-  done
-
-text {* Nonzero polynomial in z goes to infinity as z does. *}
-
-instance complex::idom_char_0 by (intro_classes)
-instance complex :: recpower_idom_char_0 by intro_classes
-
-lemma poly_infinity:
-  assumes ex: "list_ex (\<lambda>c. c \<noteq> 0) p"
-  shows "\<exists>r. \<forall>z. r \<le> cmod z \<longrightarrow> d \<le> cmod (poly (a#p) z)"
-using ex
-proof(induct p arbitrary: a d)
-  case (Cons c cs a d) 
-  {assume H: "list_ex (\<lambda>c. c\<noteq>0) cs"
-    with Cons.hyps obtain r where r: "\<forall>z. r \<le> cmod z \<longrightarrow> d + cmod a \<le> cmod (poly (c # cs) z)" by blast
-    let ?r = "1 + \<bar>r\<bar>"
-    {fix z assume h: "1 + \<bar>r\<bar> \<le> cmod z"
-      have r0: "r \<le> cmod z" using h by arith
-      from r[rule_format, OF r0]
-      have th0: "d + cmod a \<le> 1 * cmod(poly (c#cs) z)" by arith
-      from h have z1: "cmod z \<ge> 1" by arith
-      from order_trans[OF th0 mult_right_mono[OF z1 norm_ge_zero[of "poly (c#cs) z"]]]
-      have th1: "d \<le> cmod(z * poly (c#cs) z) - cmod a"
-	unfolding norm_mult by (simp add: ring_simps)
-      from complex_mod_triangle_sub[of "z * poly (c#cs) z" a]
-      have th2: "cmod(z * poly (c#cs) z) - cmod a \<le> cmod (poly (a#c#cs) z)" 
-	by (simp add: diff_le_eq ring_simps) 
-      from th1 th2 have "d \<le> cmod (poly (a#c#cs) z)"  by arith}
-    hence ?case by blast}
-  moreover
-  {assume cs0: "\<not> (list_ex (\<lambda>c. c \<noteq> 0) cs)"
-    with Cons.prems have c0: "c \<noteq> 0" by simp
-    from cs0 have cs0': "list_all (\<lambda>c. c = 0) cs" 
-      by (auto simp add: list_all_iff list_ex_iff)
-    {fix z
-      assume h: "(\<bar>d\<bar> + cmod a) / cmod c \<le> cmod z"
-      from c0 have "cmod c > 0" by simp
-      from h c0 have th0: "\<bar>d\<bar> + cmod a \<le> cmod (z*c)" 
-	by (simp add: field_simps norm_mult)
-      have ath: "\<And>mzh mazh ma. mzh <= mazh + ma ==> abs(d) + ma <= mzh ==> d <= mazh" by arith
-      from complex_mod_triangle_sub[of "z*c" a ]
-      have th1: "cmod (z * c) \<le> cmod (a + z * c) + cmod a"
-	by (simp add: ring_simps)
-      from ath[OF th1 th0] have "d \<le> cmod (poly (a # c # cs) z)" 
-	using poly_0[OF cs0'] by simp}
-    then have ?case  by blast}
-  ultimately show ?case by blast
-qed simp
-
-text {* Hence polynomial's modulus attains its minimum somewhere. *}
-lemma poly_minimum_modulus:
-  "\<exists>z.\<forall>w. cmod (poly p z) \<le> cmod (poly p w)"
-proof(induct p)
-  case (Cons c cs) 
-  {assume cs0: "list_ex (\<lambda>c. c \<noteq> 0) cs"
-    from poly_infinity[OF cs0, of "cmod (poly (c#cs) 0)" c]
-    obtain r where r: "\<And>z. r \<le> cmod z \<Longrightarrow> cmod (poly (c # cs) 0) \<le> cmod (poly (c # cs) z)" by blast
-    have ath: "\<And>z r. r \<le> cmod z \<or> cmod z \<le> \<bar>r\<bar>" by arith
-    from poly_minimum_modulus_disc[of "\<bar>r\<bar>" "c#cs"] 
-    obtain v where v: "\<And>w. cmod w \<le> \<bar>r\<bar> \<Longrightarrow> cmod (poly (c # cs) v) \<le> cmod (poly (c # cs) w)" by blast
-    {fix z assume z: "r \<le> cmod z"
-      from v[of 0] r[OF z] 
-      have "cmod (poly (c # cs) v) \<le> cmod (poly (c # cs) z)"
-	by simp }
-    note v0 = this
-    from v0 v ath[of r] have ?case by blast}
-  moreover
-  {assume cs0: "\<not> (list_ex (\<lambda>c. c\<noteq>0) cs)"
-    hence th:"list_all (\<lambda>c. c = 0) cs" by (simp add: list_all_iff list_ex_iff)
-    from poly_0[OF th] Cons.hyps have ?case by simp}
-  ultimately show ?case by blast
-qed simp
-
-text{* Constant function (non-syntactic characterization). *}
-definition "constant f = (\<forall>x y. f x = f y)"
-
-lemma nonconstant_length: "\<not> (constant (poly p)) \<Longrightarrow> length p \<ge> 2"
-  unfolding constant_def
-  apply (induct p, auto)
-  apply (unfold not_less[symmetric])
-  apply simp
-  apply (rule ccontr)
-  apply auto
-  done
- 
-lemma poly_replicate_append:
-  "poly ((replicate n 0)@p) (x::'a::{recpower, comm_ring}) = x^n * poly p x"
-  by(induct n, auto simp add: power_Suc ring_simps)
-
-text {* Decomposition of polynomial, skipping zero coefficients 
-  after the first.  *}
-
-lemma poly_decompose_lemma:
- assumes nz: "\<not>(\<forall>z. z\<noteq>0 \<longrightarrow> poly p z = (0::'a::{recpower,idom}))"
-  shows "\<exists>k a q. a\<noteq>0 \<and> Suc (length q + k) = length p \<and> 
-                 (\<forall>z. poly p z = z^k * poly (a#q) z)"
-using nz
-proof(induct p)
-  case Nil thus ?case by simp
-next
-  case (Cons c cs)
-  {assume c0: "c = 0"
-    
-    from Cons.hyps Cons.prems c0 have ?case apply auto
-      apply (rule_tac x="k+1" in exI)
-      apply (rule_tac x="a" in exI, clarsimp)
-      apply (rule_tac x="q" in exI)
-      by (auto simp add: power_Suc)}
-  moreover
-  {assume c0: "c\<noteq>0"
-    hence ?case apply-
-      apply (rule exI[where x=0])
-      apply (rule exI[where x=c], clarsimp)
-      apply (rule exI[where x=cs])
-      apply auto
-      done}
-  ultimately show ?case by blast
-qed
-
-lemma poly_decompose:
-  assumes nc: "~constant(poly p)"
-  shows "\<exists>k a q. a\<noteq>(0::'a::{recpower,idom}) \<and> k\<noteq>0 \<and>
-               length q + k + 1 = length p \<and> 
-              (\<forall>z. poly p z = poly p 0 + z^k * poly (a#q) z)"
-using nc 
-proof(induct p)
-  case Nil thus ?case by (simp add: constant_def)
-next
-  case (Cons c cs)
-  {assume C:"\<forall>z. z \<noteq> 0 \<longrightarrow> poly cs z = 0"
-    {fix x y
-      from C have "poly (c#cs) x = poly (c#cs) y" by (cases "x=0", auto)}
-    with Cons.prems have False by (auto simp add: constant_def)}
-  hence th: "\<not> (\<forall>z. z \<noteq> 0 \<longrightarrow> poly cs z = 0)" ..
-  from poly_decompose_lemma[OF th] 
-  show ?case 
-    apply clarsimp    
-    apply (rule_tac x="k+1" in exI)
-    apply (rule_tac x="a" in exI)
-    apply simp
-    apply (rule_tac x="q" in exI)
-    apply (auto simp add: power_Suc)
-    done
-qed
-
-text{* Fundamental theorem of algebral *}
-
-lemma fundamental_theorem_of_algebra:
-  assumes nc: "~constant(poly p)"
-  shows "\<exists>z::complex. poly p z = 0"
-using nc
-proof(induct n\<equiv> "length p" arbitrary: p rule: nat_less_induct)
-  fix n fix p :: "complex list"
-  let ?p = "poly p"
-  assume H: "\<forall>m<n. \<forall>p. \<not> constant (poly p) \<longrightarrow> m = length p \<longrightarrow> (\<exists>(z::complex). poly p z = 0)" and nc: "\<not> constant ?p" and n: "n = length p"
-  let ?ths = "\<exists>z. ?p z = 0"
-
-  from nonconstant_length[OF nc] have n2: "n\<ge> 2" by (simp add: n)
-  from poly_minimum_modulus obtain c where 
-    c: "\<forall>w. cmod (?p c) \<le> cmod (?p w)" by blast
-  {assume pc: "?p c = 0" hence ?ths by blast}
-  moreover
-  {assume pc0: "?p c \<noteq> 0"
-    from poly_offset[of p c] obtain q where
-      q: "length q = length p" "\<forall>x. poly q x = ?p (c+x)" by blast
-    {assume h: "constant (poly q)"
-      from q(2) have th: "\<forall>x. poly q (x - c) = ?p x" by auto
-      {fix x y
-	from th have "?p x = poly q (x - c)" by auto 
-	also have "\<dots> = poly q (y - c)" 
-	  using h unfolding constant_def by blast
-	also have "\<dots> = ?p y" using th by auto
-	finally have "?p x = ?p y" .}
-      with nc have False unfolding constant_def by blast }
-    hence qnc: "\<not> constant (poly q)" by blast
-    from q(2) have pqc0: "?p c = poly q 0" by simp
-    from c pqc0 have cq0: "\<forall>w. cmod (poly q 0) \<le> cmod (?p w)" by simp 
-    let ?a0 = "poly q 0"
-    from pc0 pqc0 have a00: "?a0 \<noteq> 0" by simp 
-    from a00 
-    have qr: "\<forall>z. poly q z = poly (map (op * (inverse ?a0)) q) z * ?a0"
-      by (simp add: poly_cmult_map)
-    let ?r = "map (op * (inverse ?a0)) q"
-    have lgqr: "length q = length ?r" by simp 
-    {assume h: "\<And>x y. poly ?r x = poly ?r y"
-      {fix x y
-	from qr[rule_format, of x] 
-	have "poly q x = poly ?r x * ?a0" by auto
-	also have "\<dots> = poly ?r y * ?a0" using h by simp
-	also have "\<dots> = poly q y" using qr[rule_format, of y] by simp
-	finally have "poly q x = poly q y" .} 
-      with qnc have False unfolding constant_def by blast}
-    hence rnc: "\<not> constant (poly ?r)" unfolding constant_def by blast
-    from qr[rule_format, of 0] a00  have r01: "poly ?r 0 = 1" by auto
-    {fix w 
-      have "cmod (poly ?r w) < 1 \<longleftrightarrow> cmod (poly q w / ?a0) < 1"
-	using qr[rule_format, of w] a00 by simp
-      also have "\<dots> \<longleftrightarrow> cmod (poly q w) < cmod ?a0"
-	using a00 unfolding norm_divide by (simp add: field_simps)
-      finally have "cmod (poly ?r w) < 1 \<longleftrightarrow> cmod (poly q w) < cmod ?a0" .}
-    note mrmq_eq = this
-    from poly_decompose[OF rnc] obtain k a s where 
-      kas: "a\<noteq>0" "k\<noteq>0" "length s + k + 1 = length ?r" 
-      "\<forall>z. poly ?r z = poly ?r 0 + z^k* poly (a#s) z" by blast
-    {assume "k + 1 = n"
-      with kas(3) lgqr[symmetric] q(1) n[symmetric] have s0:"s=[]" by auto
-      {fix w
-	have "cmod (poly ?r w) = cmod (1 + a * w ^ k)" 
-	  using kas(4)[rule_format, of w] s0 r01 by (simp add: ring_simps)}
-      note hth = this [symmetric]
-	from reduce_poly_simple[OF kas(1,2)] 
-      have "\<exists>w. cmod (poly ?r w) < 1" unfolding hth by blast}
-    moreover
-    {assume kn: "k+1 \<noteq> n"
-      from kn kas(3) q(1) n[symmetric] have k1n: "k + 1 < n" by simp
-      have th01: "\<not> constant (poly (1#((replicate (k - 1) 0)@[a])))" 
-	unfolding constant_def poly_Nil poly_Cons poly_replicate_append
-	using kas(1) apply simp 
-	by (rule exI[where x=0], rule exI[where x=1], simp)
-      from kas(2) have th02: "k+1 = length (1#((replicate (k - 1) 0)@[a]))" 
-	by simp
-      from H[rule_format, OF k1n th01 th02]
-      obtain w where w: "1 + w^k * a = 0"
-	unfolding poly_Nil poly_Cons poly_replicate_append
-	using kas(2) by (auto simp add: power_Suc[symmetric, of _ "k - Suc 0"] 
-	  mult_assoc[of _ _ a, symmetric])
-      from poly_bound_exists[of "cmod w" s] obtain m where 
-	m: "m > 0" "\<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m" by blast
-      have w0: "w\<noteq>0" using kas(2) w by (auto simp add: power_0_left)
-      from w have "(1 + w ^ k * a) - 1 = 0 - 1" by simp
-      then have wm1: "w^k * a = - 1" by simp
-      have inv0: "0 < inverse (cmod w ^ (k + 1) * m)" 
-	using norm_ge_zero[of w] w0 m(1)
-	  by (simp add: inverse_eq_divide zero_less_mult_iff)
-      with real_down2[OF zero_less_one] obtain t where
-	t: "t > 0" "t < 1" "t < inverse (cmod w ^ (k + 1) * m)" by blast
-      let ?ct = "complex_of_real t"
-      let ?w = "?ct * w"
-      have "1 + ?w^k * (a + ?w * poly s ?w) = 1 + ?ct^k * (w^k * a) + ?w^k * ?w * poly s ?w" using kas(1) by (simp add: ring_simps power_mult_distrib)
-      also have "\<dots> = complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w"
-	unfolding wm1 by (simp)
-      finally have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) = cmod (complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w)" 
-	apply -
-	apply (rule cong[OF refl[of cmod]])
-	apply assumption
-	done
-      with norm_triangle_ineq[of "complex_of_real (1 - t^k)" "?w^k * ?w * poly s ?w"] 
-      have th11: "cmod (1 + ?w^k * (a + ?w * poly s ?w)) \<le> \<bar>1 - t^k\<bar> + cmod (?w^k * ?w * poly s ?w)" unfolding norm_of_real by simp 
-      have ath: "\<And>x (t::real). 0\<le> x \<Longrightarrow> x < t \<Longrightarrow> t\<le>1 \<Longrightarrow> \<bar>1 - t\<bar> + x < 1" by arith
-      have "t *cmod w \<le> 1 * cmod w" apply (rule mult_mono) using t(1,2) by auto
-      then have tw: "cmod ?w \<le> cmod w" using t(1) by (simp add: norm_mult) 
-      from t inv0 have "t* (cmod w ^ (k + 1) * m) < 1"
-	by (simp add: inverse_eq_divide field_simps)
-      with zero_less_power[OF t(1), of k] 
-      have th30: "t^k * (t* (cmod w ^ (k + 1) * m)) < t^k * 1" 
-	apply - apply (rule mult_strict_left_mono) by simp_all
-      have "cmod (?w^k * ?w * poly s ?w) = t^k * (t* (cmod w ^ (k+1) * cmod (poly s ?w)))"  using w0 t(1)
-	by (simp add: ring_simps power_mult_distrib norm_of_real norm_power norm_mult)
-      then have "cmod (?w^k * ?w * poly s ?w) \<le> t^k * (t* (cmod w ^ (k + 1) * m))"
-	using t(1,2) m(2)[rule_format, OF tw] w0
-	apply (simp only: )
-	apply auto
-	apply (rule mult_mono, simp_all add: norm_ge_zero)+
-	apply (simp add: zero_le_mult_iff zero_le_power)
-	done
-      with th30 have th120: "cmod (?w^k * ?w * poly s ?w) < t^k" by simp 
-      from power_strict_mono[OF t(2), of k] t(1) kas(2) have th121: "t^k \<le> 1" 
-	by auto
-      from ath[OF norm_ge_zero[of "?w^k * ?w * poly s ?w"] th120 th121]
-      have th12: "\<bar>1 - t^k\<bar> + cmod (?w^k * ?w * poly s ?w) < 1" . 
-      from th11 th12
-      have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) < 1"  by arith 
-      then have "cmod (poly ?r ?w) < 1" 
-	unfolding kas(4)[rule_format, of ?w] r01 by simp 
-      then have "\<exists>w. cmod (poly ?r w) < 1" by blast}
-    ultimately have cr0_contr: "\<exists>w. cmod (poly ?r w) < 1" by blast
-    from cr0_contr cq0 q(2)
-    have ?ths unfolding mrmq_eq not_less[symmetric] by auto}
-  ultimately show ?ths by blast
-qed
-
-text {* Alternative version with a syntactic notion of constant polynomial. *}
-
-lemma fundamental_theorem_of_algebra_alt:
-  assumes nc: "~(\<exists>a l. a\<noteq> 0 \<and> list_all(\<lambda>b. b = 0) l \<and> p = a#l)"
-  shows "\<exists>z. poly p z = (0::complex)"
-using nc
-proof(induct p)
-  case (Cons c cs)
-  {assume "c=0" hence ?case by auto}
-  moreover
-  {assume c0: "c\<noteq>0"
-    {assume nc: "constant (poly (c#cs))"
-      from nc[unfolded constant_def, rule_format, of 0] 
-      have "\<forall>w. w \<noteq> 0 \<longrightarrow> poly cs w = 0" by auto 
-      hence "list_all (\<lambda>c. c=0) cs"
-	proof(induct cs)
-	  case (Cons d ds)
-	  {assume "d=0" hence ?case using Cons.prems Cons.hyps by simp}
-	  moreover
-	  {assume d0: "d\<noteq>0"
-	    from poly_bound_exists[of 1 ds] obtain m where 
-	      m: "m > 0" "\<forall>z. \<forall>z. cmod z \<le> 1 \<longrightarrow> cmod (poly ds z) \<le> m" by blast
-	    have dm: "cmod d / m > 0" using d0 m(1) by (simp add: field_simps)
-	    from real_down2[OF dm zero_less_one] obtain x where 
-	      x: "x > 0" "x < cmod d / m" "x < 1" by blast
-	    let ?x = "complex_of_real x"
-	    from x have cx: "?x \<noteq> 0"  "cmod ?x \<le> 1" by simp_all
-	    from Cons.prems[rule_format, OF cx(1)]
-	    have cth: "cmod (?x*poly ds ?x) = cmod d" by (simp add: eq_diff_eq[symmetric])
-	    from m(2)[rule_format, OF cx(2)] x(1)
-	    have th0: "cmod (?x*poly ds ?x) \<le> x*m"
-	      by (simp add: norm_mult)
-	    from x(2) m(1) have "x*m < cmod d" by (simp add: field_simps)
-	    with th0 have "cmod (?x*poly ds ?x) \<noteq> cmod d" by auto
-	    with cth  have ?case by blast}
-	  ultimately show ?case by blast 
-	qed simp}
-      then have nc: "\<not> constant (poly (c#cs))" using Cons.prems c0 
-	by blast
-      from fundamental_theorem_of_algebra[OF nc] have ?case .}
-  ultimately show ?case by blast  
-qed simp
-
-subsection{* Nullstellenstatz, degrees and divisibility of polynomials *}
-
-lemma nullstellensatz_lemma:
-  fixes p :: "complex list"
-  assumes "\<forall>x. poly p x = 0 \<longrightarrow> poly q x = 0"
-  and "degree p = n" and "n \<noteq> 0"
-  shows "p divides (pexp q n)"
-using prems
-proof(induct n arbitrary: p q rule: nat_less_induct)
-  fix n::nat fix p q :: "complex list"
-  assume IH: "\<forall>m<n. \<forall>p q.
-                 (\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longrightarrow>
-                 degree p = m \<longrightarrow> m \<noteq> 0 \<longrightarrow> p divides (q %^ m)"
-    and pq0: "\<forall>x. poly p x = 0 \<longrightarrow> poly q x = 0" 
-    and dpn: "degree p = n" and n0: "n \<noteq> 0"
-  let ?ths = "p divides (q %^ n)"
-  {fix a assume a: "poly p a = 0"
-    {assume p0: "poly p = poly []" 
-      hence ?ths unfolding divides_def  using pq0 n0
-	apply - apply (rule exI[where x="[]"], rule ext)
-	by (auto simp add: poly_mult poly_exp)}
-    moreover
-    {assume p0: "poly p \<noteq> poly []" 
-      and oa: "order  a p \<noteq> 0"
-      from p0 have pne: "p \<noteq> []" by auto
-      let ?op = "order a p"
-      from p0 have ap: "([- a, 1] %^ ?op) divides p" 
-	"\<not> pexp [- a, 1] (Suc ?op) divides p" using order by blast+ 
-      note oop = order_degree[OF p0, unfolded dpn]
-      {assume q0: "q = []"
-	hence ?ths using n0 unfolding divides_def 
-	  apply simp
-	  apply (rule exI[where x="[]"], rule ext)
-	  by (simp add: divides_def poly_exp poly_mult)}
-      moreover
-      {assume q0: "q\<noteq>[]"
-	from pq0[rule_format, OF a, unfolded poly_linear_divides] q0
-	obtain r where r: "q = pmult [- a, 1] r" by blast
-	from ap[unfolded divides_def] obtain s where
-	  s: "poly p = poly (pmult (pexp [- a, 1] ?op) s)" by blast
-	have s0: "poly s \<noteq> poly []"
-	  using s p0 by (simp add: poly_entire)
-	hence pns0: "poly (pnormalize s) \<noteq> poly []" and sne: "s\<noteq>[]" by auto
-	{assume ds0: "degree s = 0"
-	  from ds0 pns0 have "\<exists>k. pnormalize s = [k]" unfolding degree_def 
-	    by (cases "pnormalize s", auto)
-	  then obtain k where kpn: "pnormalize s = [k]" by blast
-	  from pns0[unfolded poly_zero] kpn have k: "k \<noteq>0" "poly s = poly [k]"
-	    using poly_normalize[of s] by simp_all
-	  let ?w = "pmult (pmult [1/k] (pexp [-a,1] (n - ?op))) (pexp r n)"
-	  from k r s oop have "poly (pexp q n) = poly (pmult p ?w)"
-	    by - (rule ext, simp add: poly_mult poly_exp poly_cmult poly_add power_add[symmetric] ring_simps power_mult_distrib[symmetric])
-	  hence ?ths unfolding divides_def by blast}
-	moreover
-	{assume ds0: "degree s \<noteq> 0"
-	  from ds0 s0 dpn degree_unique[OF s, unfolded linear_pow_mul_degree] oa
-	    have dsn: "degree s < n" by auto 
-	    {fix x assume h: "poly s x = 0"
-	      {assume xa: "x = a"
-		from h[unfolded xa poly_linear_divides] sne obtain u where
-		  u: "s = pmult [- a, 1] u" by blast
-		have "poly p = poly (pmult (pexp [- a, 1] (Suc ?op)) u)"
-		  unfolding s u
-		  apply (rule ext)
-		  by (simp add: ring_simps power_mult_distrib[symmetric] poly_mult poly_cmult poly_add poly_exp)
-		with ap(2)[unfolded divides_def] have False by blast}
-	      note xa = this
-	      from h s have "poly p x = 0" by (simp add: poly_mult)
-	      with pq0 have "poly q x = 0" by blast
-	      with r xa have "poly r x = 0"
-		by (auto simp add: poly_mult poly_add poly_cmult eq_diff_eq[symmetric])}
-	    note impth = this
-	    from IH[rule_format, OF dsn, of s r] impth ds0
-	    have "s divides (pexp r (degree s))" by blast
-	    then obtain u where u: "poly (pexp r (degree s)) = poly (pmult s u)"
-	      unfolding divides_def by blast
-	    hence u': "\<And>x. poly s x * poly u x = poly r x ^ degree s"
-	      by (simp add: poly_mult[symmetric] poly_exp[symmetric])
-	    let ?w = "pmult (pmult u (pexp [-a,1] (n - ?op))) (pexp r (n - degree s))"
-	    from u' s r oop[of a] dsn have "poly (pexp q n) = poly (pmult p ?w)"
-	      apply - apply (rule ext)
-	      apply (simp only:  power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult ring_simps)
-	      
-	      apply (simp add:  power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult mult_assoc[symmetric])
-	      done
-	    hence ?ths unfolding divides_def by blast}
-      ultimately have ?ths by blast }
-      ultimately have ?ths by blast}
-    ultimately have ?ths using a order_root by blast}
-  moreover
-  {assume exa: "\<not> (\<exists>a. poly p a = 0)"
-    from fundamental_theorem_of_algebra_alt[of p] exa obtain c cs where
-      ccs: "c\<noteq>0" "list_all (\<lambda>c. c = 0) cs" "p = c#cs" by blast
-    
-    from poly_0[OF ccs(2)] ccs(3) 
-    have pp: "\<And>x. poly p x =  c" by simp
-    let ?w = "pmult [1/c] (pexp q n)"
-    from pp ccs(1) 
-    have "poly (pexp q n) = poly (pmult p ?w) "
-      apply - apply (rule ext)
-      unfolding poly_mult_assoc[symmetric] by (simp add: poly_mult)
-    hence ?ths unfolding divides_def by blast}
-  ultimately show ?ths by blast
-qed
-
-lemma nullstellensatz_univariate:
-  "(\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longleftrightarrow> 
-    p divides (q %^ (degree p)) \<or> (poly p = poly [] \<and> poly q = poly [])"
-proof-
-  {assume pe: "poly p = poly []"
-    hence eq: "(\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longleftrightarrow> poly q = poly []"
-      apply auto
-      by (rule ext, simp)
-    {assume "p divides (pexp q (degree p))"
-      then obtain r where r: "poly (pexp q (degree p)) = poly (pmult p r)" 
-	unfolding divides_def by blast
-      from cong[OF r refl] pe degree_unique[OF pe]
-      have False by (simp add: poly_mult degree_def)}
-    with eq pe have ?thesis by blast}
-  moreover
-  {assume pe: "poly p \<noteq> poly []"
-    have p0: "poly [0] = poly []" by (rule ext, simp)
-    {assume dp: "degree p = 0"
-      then obtain k where "pnormalize p = [k]" using pe poly_normalize[of p]
-	unfolding degree_def by (cases "pnormalize p", auto)
-      hence k: "pnormalize p = [k]" "poly p = poly [k]" "k\<noteq>0"
-	using pe poly_normalize[of p] by (auto simp add: p0)
-      hence th1: "\<forall>x. poly p x \<noteq> 0" by simp
-      from k(2,3) dp have "poly (pexp q (degree p)) = poly (pmult p [1/k]) "
-	by - (rule ext, simp add: poly_mult poly_exp)
-      hence th2: "p divides (pexp q (degree p))" unfolding divides_def by blast
-      from th1 th2 pe have ?thesis by blast}
-    moreover
-    {assume dp: "degree p \<noteq> 0"
-      then obtain n where n: "degree p = Suc n " by (cases "degree p", auto)
-      {assume "p divides (pexp q (Suc n))"
-	then obtain u where u: "poly (pexp q (Suc n)) = poly (pmult p u)"
-	  unfolding divides_def by blast
-	hence u' :"\<And>x. poly (pexp q (Suc n)) x = poly (pmult p u) x" by simp_all
-	{fix x assume h: "poly p x = 0" "poly q x \<noteq> 0"
-	  hence "poly (pexp q (Suc n)) x \<noteq> 0" by (simp only: poly_exp) simp	  
-	  hence False using u' h(1) by (simp only: poly_mult poly_exp) simp}}
-	with n nullstellensatz_lemma[of p q "degree p"] dp 
-	have ?thesis by auto}
-    ultimately have ?thesis by blast}
-  ultimately show ?thesis by blast
-qed
-
-text{* Useful lemma *}
-
-lemma (in idom_char_0) constant_degree: "constant (poly p) \<longleftrightarrow> degree p = 0" (is "?lhs = ?rhs")
-proof
-  assume l: ?lhs
-  from l[unfolded constant_def, rule_format, of _ "zero"]
-  have th: "poly p = poly [poly p 0]" apply - by (rule ext, simp)
-  from degree_unique[OF th] show ?rhs by (simp add: degree_def)
-next
-  assume r: ?rhs
-  from r have "pnormalize p = [] \<or> (\<exists>k. pnormalize p = [k])"
-    unfolding degree_def by (cases "pnormalize p", auto)
-  then show ?lhs unfolding constant_def poly_normalize[of p, symmetric]
-    by (auto simp del: poly_normalize)
-qed
-
-(* It would be nicer to prove this without using algebraic closure...        *)
-
-lemma divides_degree_lemma: assumes dpn: "degree (p::complex list) = n"
-  shows "n \<le> degree (p *** q) \<or> poly (p *** q) = poly []"
-  using dpn
-proof(induct n arbitrary: p q)
-  case 0 thus ?case by simp
-next
-  case (Suc n p q)
-  from Suc.prems fundamental_theorem_of_algebra[of p] constant_degree[of p]
-  obtain a where a: "poly p a = 0" by auto
-  then obtain r where r: "p = pmult [-a, 1] r" unfolding poly_linear_divides
-    using Suc.prems by (auto simp add: degree_def)
-  {assume h: "poly (pmult r q) = poly []"
-    hence "poly (pmult p q) = poly []" using r
-      apply - apply (rule ext)  by (auto simp add: poly_entire poly_mult poly_add poly_cmult) hence ?case by blast}
-  moreover
-  {assume h: "poly (pmult r q) \<noteq> poly []" 
-    hence r0: "poly r \<noteq> poly []" and q0: "poly q \<noteq> poly []"
-      by (auto simp add: poly_entire)
-    have eq: "poly (pmult p q) = poly (pmult [-a, 1] (pmult r q))"
-      apply - apply (rule ext)
-      by (simp add: r poly_mult poly_add poly_cmult ring_simps)
-    from linear_mul_degree[OF h, of "- a"]
-    have dqe: "degree (pmult p q) = degree (pmult r q) + 1"
-      unfolding degree_unique[OF eq] .
-    from linear_mul_degree[OF r0, of "- a", unfolded r[symmetric]] r Suc.prems 
-    have dr: "degree r = n" by auto
-    from  Suc.hyps[OF dr, of q] have "Suc n \<le> degree (pmult p q)"
-      unfolding dqe using h by (auto simp del: poly.simps) 
-    hence ?case by blast}
-  ultimately show ?case by blast
-qed
-
-lemma divides_degree: assumes pq: "p divides (q:: complex list)"
-  shows "degree p \<le> degree q \<or> poly q = poly []"
-using pq  divides_degree_lemma[OF refl, of p]
-apply (auto simp add: divides_def poly_entire)
-apply atomize
-apply (erule_tac x="qa" in allE, auto)
-apply (subgoal_tac "degree q = degree (p *** qa)", simp)
-apply (rule degree_unique, simp)
-done
-
-(* Arithmetic operations on multivariate polynomials.                        *)
-
-lemma mpoly_base_conv: 
-  "(0::complex) \<equiv> poly [] x" "c \<equiv> poly [c] x" "x \<equiv> poly [0,1] x" by simp_all
-
-lemma mpoly_norm_conv: 
-  "poly [0] (x::complex) \<equiv> poly [] x" "poly [poly [] y] x \<equiv> poly [] x" by simp_all
-
-lemma mpoly_sub_conv: 
-  "poly p (x::complex) - poly q x \<equiv> poly p x + -1 * poly q x"
-  by (simp add: diff_def)
-
-lemma poly_pad_rule: "poly p x = 0 ==> poly (0#p) x = (0::complex)" by simp
-
-lemma poly_cancel_eq_conv: "p = (0::complex) \<Longrightarrow> a \<noteq> 0 \<Longrightarrow> (q = 0) \<equiv> (a * q - b * p = 0)" apply (atomize (full)) by auto
-
-lemma resolve_eq_raw:  "poly [] x \<equiv> 0" "poly [c] x \<equiv> (c::complex)" by auto
-lemma  resolve_eq_then: "(P \<Longrightarrow> (Q \<equiv> Q1)) \<Longrightarrow> (\<not>P \<Longrightarrow> (Q \<equiv> Q2))
-  \<Longrightarrow> Q \<equiv> P \<and> Q1 \<or> \<not>P\<and> Q2" apply (atomize (full)) by blast 
-lemma expand_ex_beta_conv: "list_ex P [c] \<equiv> P c" by simp
-
-lemma poly_divides_pad_rule: 
-  fixes p q :: "complex list"
-  assumes pq: "p divides q"
-  shows "p divides ((0::complex)#q)"
-proof-
-  from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast
-  hence "poly (0#q) = poly (p *** ([0,1] *** r))" 
-    by - (rule ext, simp add: poly_mult poly_cmult poly_add)
-  thus ?thesis unfolding divides_def by blast
-qed
-
-lemma poly_divides_pad_const_rule: 
-  fixes p q :: "complex list"
-  assumes pq: "p divides q"
-  shows "p divides (a %* q)"
-proof-
-  from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast
-  hence "poly (a %* q) = poly (p *** (a %* r))" 
-    by - (rule ext, simp add: poly_mult poly_cmult poly_add)
-  thus ?thesis unfolding divides_def by blast
-qed
-
-
-lemma poly_divides_conv0:  
-  fixes p :: "complex list"
-  assumes lgpq: "length q < length p" and lq:"last p \<noteq> 0"
-  shows "p divides q \<equiv> (\<not> (list_ex (\<lambda>c. c \<noteq> 0) q))" (is "?lhs \<equiv> ?rhs")
-proof-
-  {assume r: ?rhs 
-    hence eq: "poly q = poly []" unfolding poly_zero 
-      by (simp add: list_all_iff list_ex_iff)
-    hence "poly q = poly (p *** [])" by - (rule ext, simp add: poly_mult)
-    hence ?lhs unfolding divides_def  by blast}
-  moreover
-  {assume l: ?lhs
-    have ath: "\<And>lq lp dq::nat. lq < lp ==> lq \<noteq> 0 \<Longrightarrow> dq <= lq - 1 ==> dq < lp - 1"
-      by arith
-    {assume q0: "length q = 0"
-      hence "q = []" by simp
-      hence ?rhs by simp}
-    moreover
-    {assume lgq0: "length q \<noteq> 0"
-      from pnormalize_length[of q] have dql: "degree q \<le> length q - 1" 
-	unfolding degree_def by simp
-      from ath[OF lgpq lgq0 dql, unfolded pnormal_degree[OF lq, symmetric]] divides_degree[OF l] have "poly q = poly []" by auto
-      hence ?rhs unfolding poly_zero by (simp add: list_all_iff list_ex_iff)}
-    ultimately have ?rhs by blast }
-  ultimately show "?lhs \<equiv> ?rhs" by - (atomize (full), blast) 
-qed
-
-lemma poly_divides_conv1: 
-  assumes a0: "a\<noteq> (0::complex)" and pp': "(p::complex list) divides p'"
-  and qrp': "\<And>x. a * poly q x - poly p' x \<equiv> poly r x"
-  shows "p divides q \<equiv> p divides (r::complex list)" (is "?lhs \<equiv> ?rhs")
-proof-
-  {
-  from pp' obtain t where t: "poly p' = poly (p *** t)" 
-    unfolding divides_def by blast
-  {assume l: ?lhs
-    then obtain u where u: "poly q = poly (p *** u)" unfolding divides_def by blast
-     have "poly r = poly (p *** ((a %* u) +++ (-- t)))"
-       using u qrp' t
-       by - (rule ext, 
-	 simp add: poly_add poly_mult poly_cmult poly_minus ring_simps)
-     then have ?rhs unfolding divides_def by blast}
-  moreover
-  {assume r: ?rhs
-    then obtain u where u: "poly r = poly (p *** u)" unfolding divides_def by blast
-    from u t qrp' a0 have "poly q = poly (p *** ((1/a) %* (u +++ t)))"
-      by - (rule ext, atomize (full), simp add: poly_mult poly_add poly_cmult field_simps)
-    hence ?lhs  unfolding divides_def by blast}
-  ultimately have "?lhs = ?rhs" by blast }
-thus "?lhs \<equiv> ?rhs"  by - (atomize(full), blast) 
-qed
-
-lemma basic_cqe_conv1:
-  "(\<exists>x. poly p x = 0 \<and> poly [] x \<noteq> 0) \<equiv> False"
-  "(\<exists>x. poly [] x \<noteq> 0) \<equiv> False"
-  "(\<exists>x. poly [c] x \<noteq> 0) \<equiv> c\<noteq>0"
-  "(\<exists>x. poly [] x = 0) \<equiv> True"
-  "(\<exists>x. poly [c] x = 0) \<equiv> c = 0" by simp_all
-
-lemma basic_cqe_conv2: 
-  assumes l:"last (a#b#p) \<noteq> 0" 
-  shows "(\<exists>x. poly (a#b#p) x = (0::complex)) \<equiv> True"
-proof-
-  {fix h t
-    assume h: "h\<noteq>0" "list_all (\<lambda>c. c=(0::complex)) t"  "a#b#p = h#t"
-    hence "list_all (\<lambda>c. c= 0) (b#p)" by simp
-    moreover have "last (b#p) \<in> set (b#p)" by simp
-    ultimately have "last (b#p) = 0" by (simp add: list_all_iff)
-    with l have False by simp}
-  hence th: "\<not> (\<exists> h t. h\<noteq>0 \<and> list_all (\<lambda>c. c=0) t \<and> a#b#p = h#t)"
-    by blast
-  from fundamental_theorem_of_algebra_alt[OF th] 
-  show "(\<exists>x. poly (a#b#p) x = (0::complex)) \<equiv> True" by auto
-qed
-
-lemma  basic_cqe_conv_2b: "(\<exists>x. poly p x \<noteq> (0::complex)) \<equiv> (list_ex (\<lambda>c. c \<noteq> 0) p)"
-proof-
-  have "\<not> (list_ex (\<lambda>c. c \<noteq> 0) p) \<longleftrightarrow> poly p = poly []" 
-    by (simp add: poly_zero list_all_iff list_ex_iff)
-  also have "\<dots> \<longleftrightarrow> (\<not> (\<exists>x. poly p x \<noteq> 0))" by (auto intro: ext)
-  finally show "(\<exists>x. poly p x \<noteq> (0::complex)) \<equiv> (list_ex (\<lambda>c. c \<noteq> 0) p)"
-    by - (atomize (full), blast)
-qed
-
-lemma basic_cqe_conv3:
-  fixes p q :: "complex list"
-  assumes l: "last (a#p) \<noteq> 0" 
-  shows "(\<exists>x. poly (a#p) x =0 \<and> poly q x \<noteq> 0) \<equiv> \<not> ((a#p) divides (q %^ (length p)))"
-proof-
-  note np = pnormalize_eq[OF l]
-  {assume "poly (a#p) = poly []" hence False using l
-      unfolding poly_zero apply (auto simp add: list_all_iff del: last.simps)
-      apply (cases p, simp_all) done}
-  then have p0: "poly (a#p) \<noteq> poly []"  by blast
-  from np have dp:"degree (a#p) = length p" by (simp add: degree_def)
-  from nullstellensatz_univariate[of "a#p" q] p0 dp
-  show "(\<exists>x. poly (a#p) x =0 \<and> poly q x \<noteq> 0) \<equiv> \<not> ((a#p) divides (q %^ (length p)))"
-    by - (atomize (full), auto)
-qed
-
-lemma basic_cqe_conv4:
-  fixes p q :: "complex list"
-  assumes h: "\<And>x. poly (q %^ n) x \<equiv> poly r x"
-  shows "p divides (q %^ n) \<equiv> p divides r"
-proof-
-  from h have "poly (q %^ n) = poly r" by (auto intro: ext)  
-  thus "p divides (q %^ n) \<equiv> p divides r" unfolding divides_def by simp
-qed
-
-lemma pmult_Cons_Cons: "((a::complex)#b#p) *** q = (a %*q) +++ (0#((b#p) *** q))"
-  by simp
-
-lemma elim_neg_conv: "- z \<equiv> (-1) * (z::complex)" by simp
-lemma eqT_intr: "PROP P \<Longrightarrow> (True \<Longrightarrow> PROP P )" "PROP P \<Longrightarrow> True" by blast+
-lemma negate_negate_rule: "Trueprop P \<equiv> \<not> P \<equiv> False" by (atomize (full), auto)
-lemma last_simps: "last [x] = x" "last (x#y#ys) = last (y#ys)" by simp_all
-lemma length_simps: "length [] = 0" "length (x#y#xs) = length xs + 2" "length [x] = 1" by simp_all
-
-lemma complex_entire: "(z::complex) \<noteq> 0 \<and> w \<noteq> 0 \<equiv> z*w \<noteq> 0" by simp
-lemma resolve_eq_ne: "(P \<equiv> True) \<equiv> (\<not>P \<equiv> False)" "(P \<equiv> False) \<equiv> (\<not>P \<equiv> True)" 
-  by (atomize (full)) simp_all
-lemma cqe_conv1: "poly [] x = 0 \<longleftrightarrow> True"  by simp
-lemma cqe_conv2: "(p \<Longrightarrow> (q \<equiv> r)) \<equiv> ((p \<and> q) \<equiv> (p \<and> r))"  (is "?l \<equiv> ?r")
-proof
-  assume "p \<Longrightarrow> q \<equiv> r" thus "p \<and> q \<equiv> p \<and> r" apply - apply (atomize (full)) by blast
-next
-  assume "p \<and> q \<equiv> p \<and> r" "p"
-  thus "q \<equiv> r" apply - apply (atomize (full)) apply blast done
-qed
-lemma poly_const_conv: "poly [c] (x::complex) = y \<longleftrightarrow> c = y" by simp
-
-end
\ No newline at end of file
--- a/src/HOL/Complex/README.html	Mon Dec 29 11:04:27 2008 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,67 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-
-<!-- $Id$ -->
-
-<HTML>
-
-<HEAD>
-  <meta http-equiv="content-type" content="text/html;charset=iso-8859-1">
-  <TITLE>HOL/Complex/README</TITLE>
-</HEAD>
-
-<BODY>
-
-<H1>Complex: The Complex Numbers</H1>
-		<P>This directory defines the type <KBD>complex</KBD> of the complex numbers,
-with numeric constants and some complex analysis.  The development includes
-nonstandard analysis for the complex numbers.  Note that the image
-<KBD>HOL-Complex</KBD> includes theories from the directories 
-<KBD><a href="#Anchor-Real">HOL/Real</a></KBD>  and <KBD><a href="#Anchor-Hyperreal">HOL/Hyperreal</a></KBD>. They define other types including <kbd>real</kbd> (the real numbers) and <kbd>hypreal</kbd> (the hyperreal or non-standard reals).
-
-<ul>
-<li><a href="CLim.html">CLim</a> Limits, continuous functions, and derivatives for the complex numbers
-<li><a href="CSeries.html">CSeries</a> Finite summation and infinite series for the complex numbers
-<li><a href="CStar.html">CStar</a> Star-transforms for the complex numbers, to form non-standard extensions of sets and functions
-<li><a href="Complex.html">Complex</a> The complex numbers
-<li><a href="NSCA.html">NSCA</a> Nonstandard complex analysis
-<li><a href="NSComplex.html">NSComplex</a> Ultrapower construction of the nonstandard complex numbers
-</ul>
-
-<h2><a name="Anchor-Real" id="Anchor-Real"></a>Real: Dedekind Cut Construction of the Real Line</h2>
-
-<ul>
-<li><a href="Lubs.html">Lubs</a> Definition of upper bounds, lubs and so on, to support completeness proofs.
-<li><a href="PReal.html">PReal</a> The positive reals constructed using Dedekind cuts
-<li><a href="Rational.html">Rational</a> The rational numbers constructed as equivalence classes of integers
-<li><a href="RComplete.html">RComplete</a> The reals are complete: they satisfy the supremum property. They also have the Archimedean property.
-<li><a href="RealDef.html">RealDef</a> The real numbers, their ordering properties, and embedding of the integers and the natural numbers
-<li><a href="RealPow.html">RealPow</a> Real numbers raised to natural number powers
-</ul>
-<h2><a name="Anchor-Hyperreal" id="Anchor-Hyperreal"></a>Hyperreal: Ultrafilter Construction of the Non-Standard Reals</h2>
-See J. D. Fleuriot and L. C. Paulson. Mechanizing Nonstandard Real Analysis. LMS J. Computation and Mathematics 3 (2000), 140-190.
-<ul>
-<li><a href="Filter.html">Filter</a> Theory of Filters and Ultrafilters. Main result is a version of the Ultrafilter Theorem proved using Zorn's Lemma.
-<li><a href="HLog.html">HLog</a> Non-standard logarithms
-<li><a href="HSeries.html">HSeries</a> Non-standard theory of finite summation and infinite series
-<li><a href="HTranscendental.html">HTranscendental</a> Non-standard extensions of transcendental functions
-<li><a href="HyperDef.html">HyperDef</a> Ultrapower construction of the hyperreals
-<li><a href="HyperNat.html">HyperNat</a> Ultrapower construction of the hypernaturals
-<li><a href="HyperPow.html">HyperPow</a> Powers theory for the hyperreals
-<!-- <li><a href="IntFloor.html">IntFloor</a> Floor and Ceiling functions relating the reals and integers -->
-<li><a href="Integration.html">Integration</a> Gage integrals
-<li><a href="Lim.html">Lim</a> Theory of limits, continuous functions, and derivatives
-<li><a href="Log.html">Log</a> Logarithms for the reals
-<li><a href="MacLaurin.html">MacLaurin</a> MacLaurin series
-<li><a href="NatStar.html">NatStar</a> Star-transforms for the hypernaturals, to form non-standard extensions of sets and functions involving the naturals or reals
-<li><a href="NthRoot.html">NthRoot</a> Existence of n-th roots of real numbers
-<li><a href="NSA.html">NSA</a> Theory defining sets of infinite numbers, infinitesimals, the infinitely close relation, and their various algebraic properties.
-<li><a href="Poly.html">Poly</a> Univariate real polynomials
-<li><a href="SEQ.html">SEQ</a> Convergence of sequences and series using standard and nonstandard analysis
-<li><a href="Series.html">Series</a> Finite summation and infinite series for the reals
-<li><a href="Star.html">Star</a> Nonstandard extensions of real sets and real functions
-<li><a href="Transcendental.html">Transcendental</a> Power series and transcendental functions
-</ul>
-<HR>
-<P>Last modified $Date$
-</BODY>
-</HTML>
--- a/src/HOL/Complex/document/root.tex	Mon Dec 29 11:04:27 2008 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-
-% $Id$
-
-\documentclass[11pt,a4paper]{article}
-\usepackage{graphicx,isabelle,isabellesym,latexsym}
-\usepackage[latin1]{inputenc}
-\usepackage{pdfsetup}
-
-\urlstyle{rm}
-\isabellestyle{it}
-\pagestyle{myheadings}
-
-\begin{document}
-
-\title{Isabelle/HOL-Complex --- Higher-Order Logic with Complex Numbers}
-\maketitle
-
-\tableofcontents
-
-\begin{center}
-  \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{session_graph}
-\end{center}
-
-\newpage
-
-\renewcommand{\isamarkupheader}[1]%
-{\section{\isabellecontext: #1}\markright{THEORY~``\isabellecontext''}}
-
-\parindent 0pt\parskip 0.5ex
-\input{session}
-
-\end{document}
--- a/src/HOL/Complex_Main.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Complex_Main.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,15 +1,10 @@
-(*  Title:      HOL/Complex_Main.thy
-    Author:     Lawrence C Paulson, Cambridge University Computer Laboratory
-    Copyright   2003  University of Cambridge
-*)
-
-header{*Comprehensive Complex Theory*}
+header {* Comprehensive Complex Theory *}
 
 theory Complex_Main
 imports
   Main
   Real
-  "~~/src/HOL/Complex/Fundamental_Theorem_Algebra"
+  Fundamental_Theorem_Algebra
   Log
   Ln
   Taylor
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Dense_Linear_Order.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -0,0 +1,877 @@
+(* Author: Amine Chaieb, TU Muenchen *)
+
+header {* Dense linear order without endpoints
+  and a quantifier elimination procedure in Ferrante and Rackoff style *}
+
+theory Dense_Linear_Order
+imports Plain Groebner_Basis
+uses
+  "~~/src/HOL/Tools/Qelim/langford_data.ML"
+  "~~/src/HOL/Tools/Qelim/ferrante_rackoff_data.ML"
+  ("~~/src/HOL/Tools/Qelim/langford.ML")
+  ("~~/src/HOL/Tools/Qelim/ferrante_rackoff.ML")
+begin
+
+setup {* Langford_Data.setup #> Ferrante_Rackoff_Data.setup *}
+
+context linorder
+begin
+
+lemma less_not_permute: "\<not> (x < y \<and> y < x)" by (simp add: not_less linear)
+
+lemma gather_simps: 
+  shows 
+  "(\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> U. x < y) \<and> x < u \<and> P x) \<longleftrightarrow> (\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> (insert u U). x < y) \<and> P x)"
+  and "(\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> U. x < y) \<and> l < x \<and> P x) \<longleftrightarrow> (\<exists>x. (\<forall>y \<in> (insert l L). y < x) \<and> (\<forall>y \<in> U. x < y) \<and> P x)"
+  "(\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> U. x < y) \<and> x < u) \<longleftrightarrow> (\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> (insert u U). x < y))"
+  and "(\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> U. x < y) \<and> l < x) \<longleftrightarrow> (\<exists>x. (\<forall>y \<in> (insert l L). y < x) \<and> (\<forall>y \<in> U. x < y))"  by auto
+
+lemma 
+  gather_start: "(\<exists>x. P x) \<equiv> (\<exists>x. (\<forall>y \<in> {}. y < x) \<and> (\<forall>y\<in> {}. x < y) \<and> P x)" 
+  by simp
+
+text{* Theorems for @{text "\<exists>z. \<forall>x. x < z \<longrightarrow> (P x \<longleftrightarrow> P\<^bsub>-\<infinity>\<^esub>)"}*}
+lemma minf_lt:  "\<exists>z . \<forall>x. x < z \<longrightarrow> (x < t \<longleftrightarrow> True)" by auto
+lemma minf_gt: "\<exists>z . \<forall>x. x < z \<longrightarrow>  (t < x \<longleftrightarrow>  False)"
+  by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le)
+
+lemma minf_le: "\<exists>z. \<forall>x. x < z \<longrightarrow> (x \<le> t \<longleftrightarrow> True)" by (auto simp add: less_le)
+lemma minf_ge: "\<exists>z. \<forall>x. x < z \<longrightarrow> (t \<le> x \<longleftrightarrow> False)"
+  by (auto simp add: less_le not_less not_le)
+lemma minf_eq: "\<exists>z. \<forall>x. x < z \<longrightarrow> (x = t \<longleftrightarrow> False)" by auto
+lemma minf_neq: "\<exists>z. \<forall>x. x < z \<longrightarrow> (x \<noteq> t \<longleftrightarrow> True)" by auto
+lemma minf_P: "\<exists>z. \<forall>x. x < z \<longrightarrow> (P \<longleftrightarrow> P)" by blast
+
+text{* Theorems for @{text "\<exists>z. \<forall>x. x < z \<longrightarrow> (P x \<longleftrightarrow> P\<^bsub>+\<infinity>\<^esub>)"}*}
+lemma pinf_gt:  "\<exists>z . \<forall>x. z < x \<longrightarrow> (t < x \<longleftrightarrow> True)" by auto
+lemma pinf_lt: "\<exists>z . \<forall>x. z < x \<longrightarrow>  (x < t \<longleftrightarrow>  False)"
+  by (simp add: not_less) (rule exI[where x="t"], auto simp add: less_le)
+
+lemma pinf_ge: "\<exists>z. \<forall>x. z < x \<longrightarrow> (t \<le> x \<longleftrightarrow> True)" by (auto simp add: less_le)
+lemma pinf_le: "\<exists>z. \<forall>x. z < x \<longrightarrow> (x \<le> t \<longleftrightarrow> False)"
+  by (auto simp add: less_le not_less not_le)
+lemma pinf_eq: "\<exists>z. \<forall>x. z < x \<longrightarrow> (x = t \<longleftrightarrow> False)" by auto
+lemma pinf_neq: "\<exists>z. \<forall>x. z < x \<longrightarrow> (x \<noteq> t \<longleftrightarrow> True)" by auto
+lemma pinf_P: "\<exists>z. \<forall>x. z < x \<longrightarrow> (P \<longleftrightarrow> P)" by blast
+
+lemma nmi_lt: "t \<in> U \<Longrightarrow> \<forall>x. \<not>True \<and> x < t \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma nmi_gt: "t \<in> U \<Longrightarrow> \<forall>x. \<not>False \<and> t < x \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)"
+  by (auto simp add: le_less)
+lemma  nmi_le: "t \<in> U \<Longrightarrow> \<forall>x. \<not>True \<and> x\<le> t \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma  nmi_ge: "t \<in> U \<Longrightarrow> \<forall>x. \<not>False \<and> t\<le> x \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma  nmi_eq: "t \<in> U \<Longrightarrow> \<forall>x. \<not>False \<and>  x = t \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma  nmi_neq: "t \<in> U \<Longrightarrow>\<forall>x. \<not>True \<and> x \<noteq> t \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma  nmi_P: "\<forall> x. ~P \<and> P \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma  nmi_conj: "\<lbrakk>\<forall>x. \<not>P1' \<and> P1 x \<longrightarrow>  (\<exists> u\<in> U. u \<le> x) ;
+  \<forall>x. \<not>P2' \<and> P2 x \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)\<rbrakk> \<Longrightarrow>
+  \<forall>x. \<not>(P1' \<and> P2') \<and> (P1 x \<and> P2 x) \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+lemma  nmi_disj: "\<lbrakk>\<forall>x. \<not>P1' \<and> P1 x \<longrightarrow>  (\<exists> u\<in> U. u \<le> x) ;
+  \<forall>x. \<not>P2' \<and> P2 x \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)\<rbrakk> \<Longrightarrow>
+  \<forall>x. \<not>(P1' \<or> P2') \<and> (P1 x \<or> P2 x) \<longrightarrow>  (\<exists> u\<in> U. u \<le> x)" by auto
+
+lemma  npi_lt: "t \<in> U \<Longrightarrow> \<forall>x. \<not>False \<and>  x < t \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by (auto simp add: le_less)
+lemma  npi_gt: "t \<in> U \<Longrightarrow> \<forall>x. \<not>True \<and> t < x \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+lemma  npi_le: "t \<in> U \<Longrightarrow> \<forall>x. \<not>False \<and>  x \<le> t \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+lemma  npi_ge: "t \<in> U \<Longrightarrow> \<forall>x. \<not>True \<and> t \<le> x \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+lemma  npi_eq: "t \<in> U \<Longrightarrow> \<forall>x. \<not>False \<and>  x = t \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+lemma  npi_neq: "t \<in> U \<Longrightarrow> \<forall>x. \<not>True \<and> x \<noteq> t \<longrightarrow>  (\<exists> u\<in> U. x \<le> u )" by auto
+lemma  npi_P: "\<forall> x. ~P \<and> P \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+lemma  npi_conj: "\<lbrakk>\<forall>x. \<not>P1' \<and> P1 x \<longrightarrow>  (\<exists> u\<in> U. x \<le> u) ;  \<forall>x. \<not>P2' \<and> P2 x \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)\<rbrakk>
+  \<Longrightarrow>  \<forall>x. \<not>(P1' \<and> P2') \<and> (P1 x \<and> P2 x) \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+lemma  npi_disj: "\<lbrakk>\<forall>x. \<not>P1' \<and> P1 x \<longrightarrow>  (\<exists> u\<in> U. x \<le> u) ; \<forall>x. \<not>P2' \<and> P2 x \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)\<rbrakk>
+  \<Longrightarrow> \<forall>x. \<not>(P1' \<or> P2') \<and> (P1 x \<or> P2 x) \<longrightarrow>  (\<exists> u\<in> U. x \<le> u)" by auto
+
+lemma lin_dense_lt: "t \<in> U \<Longrightarrow> \<forall>x l u. (\<forall> t. l < t \<and> t < u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> x < t \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> y < t)"
+proof(clarsimp)
+  fix x l u y  assume tU: "t \<in> U" and noU: "\<forall>t. l < t \<and> t < u \<longrightarrow> t \<notin> U" and lx: "l < x"
+    and xu: "x<u"  and px: "x < t" and ly: "l<y" and yu:"y < u"
+  from tU noU ly yu have tny: "t\<noteq>y" by auto
+  {assume H: "t < y"
+    from less_trans[OF lx px] less_trans[OF H yu]
+    have "l < t \<and> t < u"  by simp
+    with tU noU have "False" by auto}
+  hence "\<not> t < y"  by auto hence "y \<le> t" by (simp add: not_less)
+  thus "y < t" using tny by (simp add: less_le)
+qed
+
+lemma lin_dense_gt: "t \<in> U \<Longrightarrow> \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l < x \<and> x < u \<and> t < x \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> t < y)"
+proof(clarsimp)
+  fix x l u y
+  assume tU: "t \<in> U" and noU: "\<forall>t. l < t \<and> t < u \<longrightarrow> t \<notin> U" and lx: "l < x" and xu: "x<u"
+  and px: "t < x" and ly: "l<y" and yu:"y < u"
+  from tU noU ly yu have tny: "t\<noteq>y" by auto
+  {assume H: "y< t"
+    from less_trans[OF ly H] less_trans[OF px xu] have "l < t \<and> t < u" by simp
+    with tU noU have "False" by auto}
+  hence "\<not> y<t"  by auto hence "t \<le> y" by (auto simp add: not_less)
+  thus "t < y" using tny by (simp add:less_le)
+qed
+
+lemma lin_dense_le: "t \<in> U \<Longrightarrow> \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> x \<le> t \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> y\<le> t)"
+proof(clarsimp)
+  fix x l u y
+  assume tU: "t \<in> U" and noU: "\<forall>t. l < t \<and> t < u \<longrightarrow> t \<notin> U" and lx: "l < x" and xu: "x<u"
+  and px: "x \<le> t" and ly: "l<y" and yu:"y < u"
+  from tU noU ly yu have tny: "t\<noteq>y" by auto
+  {assume H: "t < y"
+    from less_le_trans[OF lx px] less_trans[OF H yu]
+    have "l < t \<and> t < u" by simp
+    with tU noU have "False" by auto}
+  hence "\<not> t < y"  by auto thus "y \<le> t" by (simp add: not_less)
+qed
+
+lemma lin_dense_ge: "t \<in> U \<Longrightarrow> \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> t \<le> x \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> t \<le> y)"
+proof(clarsimp)
+  fix x l u y
+  assume tU: "t \<in> U" and noU: "\<forall>t. l < t \<and> t < u \<longrightarrow> t \<notin> U" and lx: "l < x" and xu: "x<u"
+  and px: "t \<le> x" and ly: "l<y" and yu:"y < u"
+  from tU noU ly yu have tny: "t\<noteq>y" by auto
+  {assume H: "y< t"
+    from less_trans[OF ly H] le_less_trans[OF px xu]
+    have "l < t \<and> t < u" by simp
+    with tU noU have "False" by auto}
+  hence "\<not> y<t"  by auto thus "t \<le> y" by (simp add: not_less)
+qed
+lemma lin_dense_eq: "t \<in> U \<Longrightarrow> \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> x = t   \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> y= t)"  by auto
+lemma lin_dense_neq: "t \<in> U \<Longrightarrow> \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> x \<noteq> t   \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> y\<noteq> t)"  by auto
+lemma lin_dense_P: "\<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> P   \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> P)"  by auto
+
+lemma lin_dense_conj:
+  "\<lbrakk>\<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> P1 x
+  \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> P1 y) ;
+  \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> P2 x
+  \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> P2 y)\<rbrakk> \<Longrightarrow>
+  \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> (P1 x \<and> P2 x)
+  \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> (P1 y \<and> P2 y))"
+  by blast
+lemma lin_dense_disj:
+  "\<lbrakk>\<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> P1 x
+  \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> P1 y) ;
+  \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> P2 x
+  \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> P2 y)\<rbrakk> \<Longrightarrow>
+  \<forall>x l u. (\<forall> t. l < t \<and> t< u \<longrightarrow> t \<notin> U) \<and> l< x \<and> x < u \<and> (P1 x \<or> P2 x)
+  \<longrightarrow> (\<forall> y. l < y \<and> y < u \<longrightarrow> (P1 y \<or> P2 y))"
+  by blast
+
+lemma npmibnd: "\<lbrakk>\<forall>x. \<not> MP \<and> P x \<longrightarrow> (\<exists> u\<in> U. u \<le> x); \<forall>x. \<not>PP \<and> P x \<longrightarrow> (\<exists> u\<in> U. x \<le> u)\<rbrakk>
+  \<Longrightarrow> \<forall>x. \<not> MP \<and> \<not>PP \<and> P x \<longrightarrow> (\<exists> u\<in> U. \<exists> u' \<in> U. u \<le> x \<and> x \<le> u')"
+by auto
+
+lemma finite_set_intervals:
+  assumes px: "P x" and lx: "l \<le> x" and xu: "x \<le> u" and linS: "l\<in> S"
+  and uinS: "u \<in> S" and fS:"finite S" and lS: "\<forall> x\<in> S. l \<le> x" and Su: "\<forall> x\<in> S. x \<le> u"
+  shows "\<exists> a \<in> S. \<exists> b \<in> S. (\<forall> y. a < y \<and> y < b \<longrightarrow> y \<notin> S) \<and> a \<le> x \<and> x \<le> b \<and> P x"
+proof-
+  let ?Mx = "{y. y\<in> S \<and> y \<le> x}"
+  let ?xM = "{y. y\<in> S \<and> x \<le> y}"
+  let ?a = "Max ?Mx"
+  let ?b = "Min ?xM"
+  have MxS: "?Mx \<subseteq> S" by blast
+  hence fMx: "finite ?Mx" using fS finite_subset by auto
+  from lx linS have linMx: "l \<in> ?Mx" by blast
+  hence Mxne: "?Mx \<noteq> {}" by blast
+  have xMS: "?xM \<subseteq> S" by blast
+  hence fxM: "finite ?xM" using fS finite_subset by auto
+  from xu uinS have linxM: "u \<in> ?xM" by blast
+  hence xMne: "?xM \<noteq> {}" by blast
+  have ax:"?a \<le> x" using Mxne fMx by auto
+  have xb:"x \<le> ?b" using xMne fxM by auto
+  have "?a \<in> ?Mx" using Max_in[OF fMx Mxne] by simp hence ainS: "?a \<in> S" using MxS by blast
+  have "?b \<in> ?xM" using Min_in[OF fxM xMne] by simp hence binS: "?b \<in> S" using xMS by blast
+  have noy:"\<forall> y. ?a < y \<and> y < ?b \<longrightarrow> y \<notin> S"
+  proof(clarsimp)
+    fix y   assume ay: "?a < y" and yb: "y < ?b" and yS: "y \<in> S"
+    from yS have "y\<in> ?Mx \<or> y\<in> ?xM" by (auto simp add: linear)
+    moreover {assume "y \<in> ?Mx" hence "y \<le> ?a" using Mxne fMx by auto with ay have "False" by (simp add: not_le[symmetric])}
+    moreover {assume "y \<in> ?xM" hence "?b \<le> y" using xMne fxM by auto with yb have "False" by (simp add: not_le[symmetric])}
+    ultimately show "False" by blast
+  qed
+  from ainS binS noy ax xb px show ?thesis by blast
+qed
+
+lemma finite_set_intervals2:
+  assumes px: "P x" and lx: "l \<le> x" and xu: "x \<le> u" and linS: "l\<in> S"
+  and uinS: "u \<in> S" and fS:"finite S" and lS: "\<forall> x\<in> S. l \<le> x" and Su: "\<forall> x\<in> S. x \<le> u"
+  shows "(\<exists> s\<in> S. P s) \<or> (\<exists> a \<in> S. \<exists> b \<in> S. (\<forall> y. a < y \<and> y < b \<longrightarrow> y \<notin> S) \<and> a < x \<and> x < b \<and> P x)"
+proof-
+  from finite_set_intervals[where P="P", OF px lx xu linS uinS fS lS Su]
+  obtain a and b where
+    as: "a\<in> S" and bs: "b\<in> S" and noS:"\<forall>y. a < y \<and> y < b \<longrightarrow> y \<notin> S"
+    and axb: "a \<le> x \<and> x \<le> b \<and> P x"  by auto
+  from axb have "x= a \<or> x= b \<or> (a < x \<and> x < b)" by (auto simp add: le_less)
+  thus ?thesis using px as bs noS by blast
+qed
+
+end
+
+section {* The classical QE after Langford for dense linear orders *}
+
+context dense_linear_order
+begin
+
+lemma interval_empty_iff:
+  "{y. x < y \<and> y < z} = {} \<longleftrightarrow> \<not> x < z"
+  by (auto dest: dense)
+
+lemma dlo_qe_bnds: 
+  assumes ne: "L \<noteq> {}" and neU: "U \<noteq> {}" and fL: "finite L" and fU: "finite U"
+  shows "(\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> U. x < y)) \<equiv> (\<forall> l \<in> L. \<forall>u \<in> U. l < u)"
+proof (simp only: atomize_eq, rule iffI)
+  assume H: "\<exists>x. (\<forall>y\<in>L. y < x) \<and> (\<forall>y\<in>U. x < y)"
+  then obtain x where xL: "\<forall>y\<in>L. y < x" and xU: "\<forall>y\<in>U. x < y" by blast
+  {fix l u assume l: "l \<in> L" and u: "u \<in> U"
+    have "l < x" using xL l by blast
+    also have "x < u" using xU u by blast
+    finally (less_trans) have "l < u" .}
+  thus "\<forall>l\<in>L. \<forall>u\<in>U. l < u" by blast
+next
+  assume H: "\<forall>l\<in>L. \<forall>u\<in>U. l < u"
+  let ?ML = "Max L"
+  let ?MU = "Min U"  
+  from fL ne have th1: "?ML \<in> L" and th1': "\<forall>l\<in>L. l \<le> ?ML" by auto
+  from fU neU have th2: "?MU \<in> U" and th2': "\<forall>u\<in>U. ?MU \<le> u" by auto
+  from th1 th2 H have "?ML < ?MU" by auto
+  with dense obtain w where th3: "?ML < w" and th4: "w < ?MU" by blast
+  from th3 th1' have "\<forall>l \<in> L. l < w" by auto
+  moreover from th4 th2' have "\<forall>u \<in> U. w < u" by auto
+  ultimately show "\<exists>x. (\<forall>y\<in>L. y < x) \<and> (\<forall>y\<in>U. x < y)" by auto
+qed
+
+lemma dlo_qe_noub: 
+  assumes ne: "L \<noteq> {}" and fL: "finite L"
+  shows "(\<exists>x. (\<forall>y \<in> L. y < x) \<and> (\<forall>y \<in> {}. x < y)) \<equiv> True"
+proof(simp add: atomize_eq)
+  from gt_ex[of "Max L"] obtain M where M: "Max L < M" by blast
+  from ne fL have "\<forall>x \<in> L. x \<le> Max L" by simp
+  with M have "\<forall>x\<in>L. x < M" by (auto intro: le_less_trans)
+  thus "\<exists>x. \<forall>y\<in>L. y < x" by blast
+qed
+
+lemma dlo_qe_nolb: 
+  assumes ne: "U \<noteq> {}" and fU: "finite U"
+  shows "(\<exists>x. (\<forall>y \<in> {}. y < x) \<and> (\<forall>y \<in> U. x < y)) \<equiv> True"
+proof(simp add: atomize_eq)
+  from lt_ex[of "Min U"] obtain M where M: "M < Min U" by blast
+  from ne fU have "\<forall>x \<in> U. Min U \<le> x" by simp
+  with M have "\<forall>x\<in>U. M < x" by (auto intro: less_le_trans)
+  thus "\<exists>x. \<forall>y\<in>U. x < y" by blast
+qed
+
+lemma exists_neq: "\<exists>(x::'a). x \<noteq> t" "\<exists>(x::'a). t \<noteq> x" 
+  using gt_ex[of t] by auto
+
+lemmas dlo_simps = order_refl less_irrefl not_less not_le exists_neq 
+  le_less neq_iff linear less_not_permute
+
+lemma axiom: "dense_linear_order (op \<le>) (op <)" by (rule dense_linear_order_axioms)
+lemma atoms:
+  shows "TERM (less :: 'a \<Rightarrow> _)"
+    and "TERM (less_eq :: 'a \<Rightarrow> _)"
+    and "TERM (op = :: 'a \<Rightarrow> _)" .
+
+declare axiom[langford qe: dlo_qe_bnds dlo_qe_nolb dlo_qe_noub gather: gather_start gather_simps atoms: atoms]
+declare dlo_simps[langfordsimp]
+
+end
+
+(* FIXME: Move to HOL -- together with the conj_aci_rule in langford.ML *)
+lemma dnf:
+  "(P & (Q | R)) = ((P&Q) | (P&R))" 
+  "((Q | R) & P) = ((Q&P) | (R&P))"
+  by blast+
+
+lemmas weak_dnf_simps = simp_thms dnf
+
+lemma nnf_simps:
+    "(\<not>(P \<and> Q)) = (\<not>P \<or> \<not>Q)" "(\<not>(P \<or> Q)) = (\<not>P \<and> \<not>Q)" "(P \<longrightarrow> Q) = (\<not>P \<or> Q)"
+    "(P = Q) = ((P \<and> Q) \<or> (\<not>P \<and> \<not> Q))" "(\<not> \<not>(P)) = P"
+  by blast+
+
+lemma ex_distrib: "(\<exists>x. P x \<or> Q x) \<longleftrightarrow> ((\<exists>x. P x) \<or> (\<exists>x. Q x))" by blast
+
+lemmas dnf_simps = weak_dnf_simps nnf_simps ex_distrib
+
+use "~~/src/HOL/Tools/Qelim/langford.ML"
+method_setup dlo = {*
+  Method.ctxt_args (Method.SIMPLE_METHOD' o LangfordQE.dlo_tac)
+*} "Langford's algorithm for quantifier elimination in dense linear orders"
+
+
+section {* Contructive dense linear orders yield QE for linear arithmetic over ordered Fields -- see @{text "Arith_Tools.thy"} *}
+
+text {* Linear order without upper bounds *}
+
+class_locale linorder_stupid_syntax = linorder
+begin
+notation
+  less_eq  ("op \<sqsubseteq>") and
+  less_eq  ("(_/ \<sqsubseteq> _)" [51, 51] 50) and
+  less  ("op \<sqsubset>") and
+  less  ("(_/ \<sqsubset> _)"  [51, 51] 50)
+
+end
+
+class_locale linorder_no_ub = linorder_stupid_syntax +
+  assumes gt_ex: "\<exists>y. less x y"
+begin
+lemma ge_ex: "\<exists>y. x \<sqsubseteq> y" using gt_ex by auto
+
+text {* Theorems for @{text "\<exists>z. \<forall>x. z \<sqsubset> x \<longrightarrow> (P x \<longleftrightarrow> P\<^bsub>+\<infinity>\<^esub>)"} *}
+lemma pinf_conj:
+  assumes ex1: "\<exists>z1. \<forall>x. z1 \<sqsubset> x \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
+  and ex2: "\<exists>z2. \<forall>x. z2 \<sqsubset> x \<longrightarrow> (P2 x \<longleftrightarrow> P2')"
+  shows "\<exists>z. \<forall>x. z \<sqsubset>  x \<longrightarrow> ((P1 x \<and> P2 x) \<longleftrightarrow> (P1' \<and> P2'))"
+proof-
+  from ex1 ex2 obtain z1 and z2 where z1: "\<forall>x. z1 \<sqsubset> x \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
+     and z2: "\<forall>x. z2 \<sqsubset> x \<longrightarrow> (P2 x \<longleftrightarrow> P2')" by blast
+  from gt_ex obtain z where z:"ord.max less_eq z1 z2 \<sqsubset> z" by blast
+  from z have zz1: "z1 \<sqsubset> z" and zz2: "z2 \<sqsubset> z" by simp_all
+  {fix x assume H: "z \<sqsubset> x"
+    from less_trans[OF zz1 H] less_trans[OF zz2 H]
+    have "(P1 x \<and> P2 x) \<longleftrightarrow> (P1' \<and> P2')"  using z1 zz1 z2 zz2 by auto
+  }
+  thus ?thesis by blast
+qed
+
+lemma pinf_disj:
+  assumes ex1: "\<exists>z1. \<forall>x. z1 \<sqsubset> x \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
+  and ex2: "\<exists>z2. \<forall>x. z2 \<sqsubset> x \<longrightarrow> (P2 x \<longleftrightarrow> P2')"
+  shows "\<exists>z. \<forall>x. z \<sqsubset>  x \<longrightarrow> ((P1 x \<or> P2 x) \<longleftrightarrow> (P1' \<or> P2'))"
+proof-
+  from ex1 ex2 obtain z1 and z2 where z1: "\<forall>x. z1 \<sqsubset> x \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
+     and z2: "\<forall>x. z2 \<sqsubset> x \<longrightarrow> (P2 x \<longleftrightarrow> P2')" by blast
+  from gt_ex obtain z where z:"ord.max less_eq z1 z2 \<sqsubset> z" by blast
+  from z have zz1: "z1 \<sqsubset> z" and zz2: "z2 \<sqsubset> z" by simp_all
+  {fix x assume H: "z \<sqsubset> x"
+    from less_trans[OF zz1 H] less_trans[OF zz2 H]
+    have "(P1 x \<or> P2 x) \<longleftrightarrow> (P1' \<or> P2')"  using z1 zz1 z2 zz2 by auto
+  }
+  thus ?thesis by blast
+qed
+
+lemma pinf_ex: assumes ex:"\<exists>z. \<forall>x. z \<sqsubset> x \<longrightarrow> (P x \<longleftrightarrow> P1)" and p1: P1 shows "\<exists> x. P x"
+proof-
+  from ex obtain z where z: "\<forall>x. z \<sqsubset> x \<longrightarrow> (P x \<longleftrightarrow> P1)" by blast
+  from gt_ex obtain x where x: "z \<sqsubset> x" by blast
+  from z x p1 show ?thesis by blast
+qed
+
+end
+
+text {* Linear order without upper bounds *}
+
+class_locale linorder_no_lb = linorder_stupid_syntax +
+  assumes lt_ex: "\<exists>y. less y x"
+begin
+lemma le_ex: "\<exists>y. y \<sqsubseteq> x" using lt_ex by auto
+
+
+text {* Theorems for @{text "\<exists>z. \<forall>x. x \<sqsubset> z \<longrightarrow> (P x \<longleftrightarrow> P\<^bsub>-\<infinity>\<^esub>)"} *}
+lemma minf_conj:
+  assumes ex1: "\<exists>z1. \<forall>x. x \<sqsubset> z1 \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
+  and ex2: "\<exists>z2. \<forall>x. x \<sqsubset> z2 \<longrightarrow> (P2 x \<longleftrightarrow> P2')"
+  shows "\<exists>z. \<forall>x. x \<sqsubset>  z \<longrightarrow> ((P1 x \<and> P2 x) \<longleftrightarrow> (P1' \<and> P2'))"
+proof-
+  from ex1 ex2 obtain z1 and z2 where z1: "\<forall>x. x \<sqsubset> z1 \<longrightarrow> (P1 x \<longleftrightarrow> P1')"and z2: "\<forall>x. x \<sqsubset> z2 \<longrightarrow> (P2 x \<longleftrightarrow> P2')" by blast
+  from lt_ex obtain z where z:"z \<sqsubset> ord.min less_eq z1 z2" by blast
+  from z have zz1: "z \<sqsubset> z1" and zz2: "z \<sqsubset> z2" by simp_all
+  {fix x assume H: "x \<sqsubset> z"
+    from less_trans[OF H zz1] less_trans[OF H zz2]
+    have "(P1 x \<and> P2 x) \<longleftrightarrow> (P1' \<and> P2')"  using z1 zz1 z2 zz2 by auto
+  }
+  thus ?thesis by blast
+qed
+
+lemma minf_disj:
+  assumes ex1: "\<exists>z1. \<forall>x. x \<sqsubset> z1 \<longrightarrow> (P1 x \<longleftrightarrow> P1')"
+  and ex2: "\<exists>z2. \<forall>x. x \<sqsubset> z2 \<longrightarrow> (P2 x \<longleftrightarrow> P2')"
+  shows "\<exists>z. \<forall>x. x \<sqsubset>  z \<longrightarrow> ((P1 x \<or> P2 x) \<longleftrightarrow> (P1' \<or> P2'))"
+proof-
+  from ex1 ex2 obtain z1 and z2 where z1: "\<forall>x. x \<sqsubset> z1 \<longrightarrow> (P1 x \<longleftrightarrow> P1')"and z2: "\<forall>x. x \<sqsubset> z2 \<longrightarrow> (P2 x \<longleftrightarrow> P2')" by blast
+  from lt_ex obtain z where z:"z \<sqsubset> ord.min less_eq z1 z2" by blast
+  from z have zz1: "z \<sqsubset> z1" and zz2: "z \<sqsubset> z2" by simp_all
+  {fix x assume H: "x \<sqsubset> z"
+    from less_trans[OF H zz1] less_trans[OF H zz2]
+    have "(P1 x \<or> P2 x) \<longleftrightarrow> (P1' \<or> P2')"  using z1 zz1 z2 zz2 by auto
+  }
+  thus ?thesis by blast
+qed
+
+lemma minf_ex: assumes ex:"\<exists>z. \<forall>x. x \<sqsubset> z \<longrightarrow> (P x \<longleftrightarrow> P1)" and p1: P1 shows "\<exists> x. P x"
+proof-
+  from ex obtain z where z: "\<forall>x. x \<sqsubset> z \<longrightarrow> (P x \<longleftrightarrow> P1)" by blast
+  from lt_ex obtain x where x: "x \<sqsubset> z" by blast
+  from z x p1 show ?thesis by blast
+qed
+
+end
+
+
+class_locale constr_dense_linear_order = linorder_no_lb + linorder_no_ub +
+  fixes between
+  assumes between_less: "less x y \<Longrightarrow> less x (between x y) \<and> less (between x y) y"
+     and  between_same: "between x x = x"
+
+class_interpretation  constr_dense_linear_order < dense_linear_order 
+  apply unfold_locales
+  using gt_ex lt_ex between_less
+    by (auto, rule_tac x="between x y" in exI, simp)
+
+context  constr_dense_linear_order
+begin
+
+lemma rinf_U:
+  assumes fU: "finite U"
+  and lin_dense: "\<forall>x l u. (\<forall> t. l \<sqsubset> t \<and> t\<sqsubset> u \<longrightarrow> t \<notin> U) \<and> l\<sqsubset> x \<and> x \<sqsubset> u \<and> P x
+  \<longrightarrow> (\<forall> y. l \<sqsubset> y \<and> y \<sqsubset> u \<longrightarrow> P y )"
+  and nmpiU: "\<forall>x. \<not> MP \<and> \<not>PP \<and> P x \<longrightarrow> (\<exists> u\<in> U. \<exists> u' \<in> U. u \<sqsubseteq> x \<and> x \<sqsubseteq> u')"
+  and nmi: "\<not> MP"  and npi: "\<not> PP"  and ex: "\<exists> x.  P x"
+  shows "\<exists> u\<in> U. \<exists> u' \<in> U. P (between u u')"
+proof-
+  from ex obtain x where px: "P x" by blast
+  from px nmi npi nmpiU have "\<exists> u\<in> U. \<exists> u' \<in> U. u \<sqsubseteq> x \<and> x \<sqsubseteq> u'" by auto
+  then obtain u and u' where uU:"u\<in> U" and uU': "u' \<in> U" and ux:"u \<sqsubseteq> x" and xu':"x \<sqsubseteq> u'" by auto
+  from uU have Une: "U \<noteq> {}" by auto
+  term "linorder.Min less_eq"
+  let ?l = "linorder.Min less_eq U"
+  let ?u = "linorder.Max less_eq U"
+  have linM: "?l \<in> U" using fU Une by simp
+  have uinM: "?u \<in> U" using fU Une by simp
+  have lM: "\<forall> t\<in> U. ?l \<sqsubseteq> t" using Une fU by auto
+  have Mu: "\<forall> t\<in> U. t \<sqsubseteq> ?u" using Une fU by auto
+  have th:"?l \<sqsubseteq> u" using uU Une lM by auto
+  from order_trans[OF th ux] have lx: "?l \<sqsubseteq> x" .
+  have th: "u' \<sqsubseteq> ?u" using uU' Une Mu by simp
+  from order_trans[OF xu' th] have xu: "x \<sqsubseteq> ?u" .
+  from finite_set_intervals2[where P="P",OF px lx xu linM uinM fU lM Mu]
+  have "(\<exists> s\<in> U. P s) \<or>
+      (\<exists> t1\<in> U. \<exists> t2 \<in> U. (\<forall> y. t1 \<sqsubset> y \<and> y \<sqsubset> t2 \<longrightarrow> y \<notin> U) \<and> t1 \<sqsubset> x \<and> x \<sqsubset> t2 \<and> P x)" .
+  moreover { fix u assume um: "u\<in>U" and pu: "P u"
+    have "between u u = u" by (simp add: between_same)
+    with um pu have "P (between u u)" by simp
+    with um have ?thesis by blast}
+  moreover{
+    assume "\<exists> t1\<in> U. \<exists> t2 \<in> U. (\<forall> y. t1 \<sqsubset> y \<and> y \<sqsubset> t2 \<longrightarrow> y \<notin> U) \<and> t1 \<sqsubset> x \<and> x \<sqsubset> t2 \<and> P x"
+      then obtain t1 and t2 where t1M: "t1 \<in> U" and t2M: "t2\<in> U"
+        and noM: "\<forall> y. t1 \<sqsubset> y \<and> y \<sqsubset> t2 \<longrightarrow> y \<notin> U" and t1x: "t1 \<sqsubset> x" and xt2: "x \<sqsubset> t2" and px: "P x"
+        by blast
+      from less_trans[OF t1x xt2] have t1t2: "t1 \<sqsubset> t2" .
+      let ?u = "between t1 t2"
+      from between_less t1t2 have t1lu: "t1 \<sqsubset> ?u" and ut2: "?u \<sqsubset> t2" by auto
+      from lin_dense noM t1x xt2 px t1lu ut2 have "P ?u" by blast
+      with t1M t2M have ?thesis by blast}
+    ultimately show ?thesis by blast
+  qed
+
+theorem fr_eq:
+  assumes fU: "finite U"
+  and lin_dense: "\<forall>x l u. (\<forall> t. l \<sqsubset> t \<and> t\<sqsubset> u \<longrightarrow> t \<notin> U) \<and> l\<sqsubset> x \<and> x \<sqsubset> u \<and> P x
+   \<longrightarrow> (\<forall> y. l \<sqsubset> y \<and> y \<sqsubset> u \<longrightarrow> P y )"
+  and nmibnd: "\<forall>x. \<not> MP \<and> P x \<longrightarrow> (\<exists> u\<in> U. u \<sqsubseteq> x)"
+  and npibnd: "\<forall>x. \<not>PP \<and> P x \<longrightarrow> (\<exists> u\<in> U. x \<sqsubseteq> u)"
+  and mi: "\<exists>z. \<forall>x. x \<sqsubset> z \<longrightarrow> (P x = MP)"  and pi: "\<exists>z. \<forall>x. z \<sqsubset> x \<longrightarrow> (P x = PP)"
+  shows "(\<exists> x. P x) \<equiv> (MP \<or> PP \<or> (\<exists> u \<in> U. \<exists> u'\<in> U. P (between u u')))"
+  (is "_ \<equiv> (_ \<or> _ \<or> ?F)" is "?E \<equiv> ?D")
+proof-
+ {
+   assume px: "\<exists> x. P x"
+   have "MP \<or> PP \<or> (\<not> MP \<and> \<not> PP)" by blast
+   moreover {assume "MP \<or> PP" hence "?D" by blast}
+   moreover {assume nmi: "\<not> MP" and npi: "\<not> PP"
+     from npmibnd[OF nmibnd npibnd]
+     have nmpiU: "\<forall>x. \<not> MP \<and> \<not>PP \<and> P x \<longrightarrow> (\<exists> u\<in> U. \<exists> u' \<in> U. u \<sqsubseteq> x \<and> x \<sqsubseteq> u')" .
+     from rinf_U[OF fU lin_dense nmpiU nmi npi px] have "?D" by blast}
+   ultimately have "?D" by blast}
+ moreover
+ { assume "?D"
+   moreover {assume m:"MP" from minf_ex[OF mi m] have "?E" .}
+   moreover {assume p: "PP" from pinf_ex[OF pi p] have "?E" . }
+   moreover {assume f:"?F" hence "?E" by blast}
+   ultimately have "?E" by blast}
+ ultimately have "?E = ?D" by blast thus "?E \<equiv> ?D" by simp
+qed
+
+lemmas minf_thms = minf_conj minf_disj minf_eq minf_neq minf_lt minf_le minf_gt minf_ge minf_P
+lemmas pinf_thms = pinf_conj pinf_disj pinf_eq pinf_neq pinf_lt pinf_le pinf_gt pinf_ge pinf_P
+
+lemmas nmi_thms = nmi_conj nmi_disj nmi_eq nmi_neq nmi_lt nmi_le nmi_gt nmi_ge nmi_P
+lemmas npi_thms = npi_conj npi_disj npi_eq npi_neq npi_lt npi_le npi_gt npi_ge npi_P
+lemmas lin_dense_thms = lin_dense_conj lin_dense_disj lin_dense_eq lin_dense_neq lin_dense_lt lin_dense_le lin_dense_gt lin_dense_ge lin_dense_P
+
+lemma ferrack_axiom: "constr_dense_linear_order less_eq less between"
+  by (rule constr_dense_linear_order_axioms)
+lemma atoms:
+  shows "TERM (less :: 'a \<Rightarrow> _)"
+    and "TERM (less_eq :: 'a \<Rightarrow> _)"
+    and "TERM (op = :: 'a \<Rightarrow> _)" .
+
+declare ferrack_axiom [ferrack minf: minf_thms pinf: pinf_thms
+    nmi: nmi_thms npi: npi_thms lindense:
+    lin_dense_thms qe: fr_eq atoms: atoms]
+
+declaration {*
+let
+fun simps phi = map (Morphism.thm phi) [@{thm "not_less"}, @{thm "not_le"}]
+fun generic_whatis phi =
+ let
+  val [lt, le] = map (Morphism.term phi) [@{term "op \<sqsubset>"}, @{term "op \<sqsubseteq>"}]
+  fun h x t =
+   case term_of t of
+     Const("op =", _)$y$z => if term_of x aconv y then Ferrante_Rackoff_Data.Eq
+                            else Ferrante_Rackoff_Data.Nox
+   | @{term "Not"}$(Const("op =", _)$y$z) => if term_of x aconv y then Ferrante_Rackoff_Data.NEq
+                            else Ferrante_Rackoff_Data.Nox
+   | b$y$z => if Term.could_unify (b, lt) then
+                 if term_of x aconv y then Ferrante_Rackoff_Data.Lt
+                 else if term_of x aconv z then Ferrante_Rackoff_Data.Gt
+                 else Ferrante_Rackoff_Data.Nox
+             else if Term.could_unify (b, le) then
+                 if term_of x aconv y then Ferrante_Rackoff_Data.Le
+                 else if term_of x aconv z then Ferrante_Rackoff_Data.Ge
+                 else Ferrante_Rackoff_Data.Nox
+             else Ferrante_Rackoff_Data.Nox
+   | _ => Ferrante_Rackoff_Data.Nox
+ in h end
+ fun ss phi = HOL_ss addsimps (simps phi)
+in
+ Ferrante_Rackoff_Data.funs  @{thm "ferrack_axiom"}
+  {isolate_conv = K (K (K Thm.reflexive)), whatis = generic_whatis, simpset = ss}
+end
+*}
+
+end
+
+use "~~/src/HOL/Tools/Qelim/ferrante_rackoff.ML"
+
+method_setup ferrack = {*
+  Method.ctxt_args (Method.SIMPLE_METHOD' o FerranteRackoff.dlo_tac)
+*} "Ferrante and Rackoff's algorithm for quantifier elimination in dense linear orders"
+
+subsection {* Ferrante and Rackoff algorithm over ordered fields *}
+
+lemma neg_prod_lt:"(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x < 0) == (x > 0))"
+proof-
+  assume H: "c < 0"
+  have "c*x < 0 = (0/c < x)" by (simp only: neg_divide_less_eq[OF H] ring_simps)
+  also have "\<dots> = (0 < x)" by simp
+  finally show  "(c*x < 0) == (x > 0)" by simp
+qed
+
+lemma pos_prod_lt:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x < 0) == (x < 0))"
+proof-
+  assume H: "c > 0"
+  hence "c*x < 0 = (0/c > x)" by (simp only: pos_less_divide_eq[OF H] ring_simps)
+  also have "\<dots> = (0 > x)" by simp
+  finally show  "(c*x < 0) == (x < 0)" by simp
+qed
+
+lemma neg_prod_sum_lt: "(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x + t< 0) == (x > (- 1/c)*t))"
+proof-
+  assume H: "c < 0"
+  have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp)
+  also have "\<dots> = (-t/c < x)" by (simp only: neg_divide_less_eq[OF H] ring_simps)
+  also have "\<dots> = ((- 1/c)*t < x)" by simp
+  finally show  "(c*x + t < 0) == (x > (- 1/c)*t)" by simp
+qed
+
+lemma pos_prod_sum_lt:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x + t < 0) == (x < (- 1/c)*t))"
+proof-
+  assume H: "c > 0"
+  have "c*x + t< 0 = (c*x < -t)"  by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp)
+  also have "\<dots> = (-t/c > x)" by (simp only: pos_less_divide_eq[OF H] ring_simps)
+  also have "\<dots> = ((- 1/c)*t > x)" by simp
+  finally show  "(c*x + t < 0) == (x < (- 1/c)*t)" by simp
+qed
+
+lemma sum_lt:"((x::'a::pordered_ab_group_add) + t < 0) == (x < - t)"
+  using less_diff_eq[where a= x and b=t and c=0] by simp
+
+lemma neg_prod_le:"(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x <= 0) == (x >= 0))"
+proof-
+  assume H: "c < 0"
+  have "c*x <= 0 = (0/c <= x)" by (simp only: neg_divide_le_eq[OF H] ring_simps)
+  also have "\<dots> = (0 <= x)" by simp
+  finally show  "(c*x <= 0) == (x >= 0)" by simp
+qed
+
+lemma pos_prod_le:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x <= 0) == (x <= 0))"
+proof-
+  assume H: "c > 0"
+  hence "c*x <= 0 = (0/c >= x)" by (simp only: pos_le_divide_eq[OF H] ring_simps)
+  also have "\<dots> = (0 >= x)" by simp
+  finally show  "(c*x <= 0) == (x <= 0)" by simp
+qed
+
+lemma neg_prod_sum_le: "(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x + t <= 0) == (x >= (- 1/c)*t))"
+proof-
+  assume H: "c < 0"
+  have "c*x + t <= 0 = (c*x <= -t)"  by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp)
+  also have "\<dots> = (-t/c <= x)" by (simp only: neg_divide_le_eq[OF H] ring_simps)
+  also have "\<dots> = ((- 1/c)*t <= x)" by simp
+  finally show  "(c*x + t <= 0) == (x >= (- 1/c)*t)" by simp
+qed
+
+lemma pos_prod_sum_le:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x + t <= 0) == (x <= (- 1/c)*t))"
+proof-
+  assume H: "c > 0"
+  have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp)
+  also have "\<dots> = (-t/c >= x)" by (simp only: pos_le_divide_eq[OF H] ring_simps)
+  also have "\<dots> = ((- 1/c)*t >= x)" by simp
+  finally show  "(c*x + t <= 0) == (x <= (- 1/c)*t)" by simp
+qed
+
+lemma sum_le:"((x::'a::pordered_ab_group_add) + t <= 0) == (x <= - t)"
+  using le_diff_eq[where a= x and b=t and c=0] by simp
+
+lemma nz_prod_eq:"(c\<Colon>'a\<Colon>ordered_field) \<noteq> 0 \<Longrightarrow> ((c*x = 0) == (x = 0))" by simp
+lemma nz_prod_sum_eq: "(c\<Colon>'a\<Colon>ordered_field) \<noteq> 0 \<Longrightarrow> ((c*x + t = 0) == (x = (- 1/c)*t))"
+proof-
+  assume H: "c \<noteq> 0"
+  have "c*x + t = 0 = (c*x = -t)" by (subst eq_iff_diff_eq_0 [of "c*x" "-t"], simp)
+  also have "\<dots> = (x = -t/c)" by (simp only: nonzero_eq_divide_eq[OF H] ring_simps)
+  finally show  "(c*x + t = 0) == (x = (- 1/c)*t)" by simp
+qed
+lemma sum_eq:"((x::'a::pordered_ab_group_add) + t = 0) == (x = - t)"
+  using eq_diff_eq[where a= x and b=t and c=0] by simp
+
+
+class_interpretation class_ordered_field_dense_linear_order: constr_dense_linear_order
+ ["op <=" "op <"
+   "\<lambda> x y. 1/2 * ((x::'a::{ordered_field,recpower,number_ring}) + y)"]
+proof (unfold_locales, dlo, dlo, auto)
+  fix x y::'a assume lt: "x < y"
+  from  less_half_sum[OF lt] show "x < (x + y) /2" by simp
+next
+  fix x y::'a assume lt: "x < y"
+  from  gt_half_sum[OF lt] show "(x + y) /2 < y" by simp
+qed
+
+declaration{*
+let
+fun earlier [] x y = false
+        | earlier (h::t) x y =
+    if h aconvc y then false else if h aconvc x then true else earlier t x y;
+
+fun dest_frac ct = case term_of ct of
+   Const (@{const_name "HOL.divide"},_) $ a $ b=>
+    Rat.rat_of_quotient (snd (HOLogic.dest_number a), snd (HOLogic.dest_number b))
+ | t => Rat.rat_of_int (snd (HOLogic.dest_number t))
+
+fun mk_frac phi cT x =
+ let val (a, b) = Rat.quotient_of_rat x
+ in if b = 1 then Numeral.mk_cnumber cT a
+    else Thm.capply
+         (Thm.capply (Drule.cterm_rule (instantiate' [SOME cT] []) @{cpat "op /"})
+                     (Numeral.mk_cnumber cT a))
+         (Numeral.mk_cnumber cT b)
+ end
+
+fun whatis x ct = case term_of ct of
+  Const(@{const_name "HOL.plus"}, _)$(Const(@{const_name "HOL.times"},_)$_$y)$_ =>
+     if y aconv term_of x then ("c*x+t",[(funpow 2 Thm.dest_arg1) ct, Thm.dest_arg ct])
+     else ("Nox",[])
+| Const(@{const_name "HOL.plus"}, _)$y$_ =>
+     if y aconv term_of x then ("x+t",[Thm.dest_arg ct])
+     else ("Nox",[])
+| Const(@{const_name "HOL.times"}, _)$_$y =>
+     if y aconv term_of x then ("c*x",[Thm.dest_arg1 ct])
+     else ("Nox",[])
+| t => if t aconv term_of x then ("x",[]) else ("Nox",[]);
+
+fun xnormalize_conv ctxt [] ct = reflexive ct
+| xnormalize_conv ctxt (vs as (x::_)) ct =
+   case term_of ct of
+   Const(@{const_name HOL.less},_)$_$Const(@{const_name "HOL.zero"},_) =>
+    (case whatis x (Thm.dest_arg1 ct) of
+    ("c*x+t",[c,t]) =>
+       let
+        val cr = dest_frac c
+        val clt = Thm.dest_fun2 ct
+        val cz = Thm.dest_arg ct
+        val neg = cr </ Rat.zero
+        val cthp = Simplifier.rewrite (local_simpset_of ctxt)
+               (Thm.capply @{cterm "Trueprop"}
+                  (if neg then Thm.capply (Thm.capply clt c) cz
+                    else Thm.capply (Thm.capply clt cz) c))
+        val cth = equal_elim (symmetric cthp) TrueI
+        val th = implies_elim (instantiate' [SOME (ctyp_of_term x)] (map SOME [c,x,t])
+             (if neg then @{thm neg_prod_sum_lt} else @{thm pos_prod_sum_lt})) cth
+        val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv
+                   (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th
+      in rth end
+    | ("x+t",[t]) =>
+       let
+        val T = ctyp_of_term x
+        val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_lt"}
+        val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv
+              (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th
+       in  rth end
+    | ("c*x",[c]) =>
+       let
+        val cr = dest_frac c
+        val clt = Thm.dest_fun2 ct
+        val cz = Thm.dest_arg ct
+        val neg = cr </ Rat.zero
+        val cthp = Simplifier.rewrite (local_simpset_of ctxt)
+               (Thm.capply @{cterm "Trueprop"}
+                  (if neg then Thm.capply (Thm.capply clt c) cz
+                    else Thm.capply (Thm.capply clt cz) c))
+        val cth = equal_elim (symmetric cthp) TrueI
+        val th = implies_elim (instantiate' [SOME (ctyp_of_term x)] (map SOME [c,x])
+             (if neg then @{thm neg_prod_lt} else @{thm pos_prod_lt})) cth
+        val rth = th
+      in rth end
+    | _ => reflexive ct)
+
+
+|  Const(@{const_name HOL.less_eq},_)$_$Const(@{const_name "HOL.zero"},_) =>
+   (case whatis x (Thm.dest_arg1 ct) of
+    ("c*x+t",[c,t]) =>
+       let
+        val T = ctyp_of_term x
+        val cr = dest_frac c
+        val clt = Drule.cterm_rule (instantiate' [SOME T] []) @{cpat "op <"}
+        val cz = Thm.dest_arg ct
+        val neg = cr </ Rat.zero
+        val cthp = Simplifier.rewrite (local_simpset_of ctxt)
+               (Thm.capply @{cterm "Trueprop"}
+                  (if neg then Thm.capply (Thm.capply clt c) cz
+                    else Thm.capply (Thm.capply clt cz) c))
+        val cth = equal_elim (symmetric cthp) TrueI
+        val th = implies_elim (instantiate' [SOME T] (map SOME [c,x,t])
+             (if neg then @{thm neg_prod_sum_le} else @{thm pos_prod_sum_le})) cth
+        val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv
+                   (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th
+      in rth end
+    | ("x+t",[t]) =>
+       let
+        val T = ctyp_of_term x
+        val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_le"}
+        val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv
+              (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th
+       in  rth end
+    | ("c*x",[c]) =>
+       let
+        val T = ctyp_of_term x
+        val cr = dest_frac c
+        val clt = Drule.cterm_rule (instantiate' [SOME T] []) @{cpat "op <"}
+        val cz = Thm.dest_arg ct
+        val neg = cr </ Rat.zero
+        val cthp = Simplifier.rewrite (local_simpset_of ctxt)
+               (Thm.capply @{cterm "Trueprop"}
+                  (if neg then Thm.capply (Thm.capply clt c) cz
+                    else Thm.capply (Thm.capply clt cz) c))
+        val cth = equal_elim (symmetric cthp) TrueI
+        val th = implies_elim (instantiate' [SOME (ctyp_of_term x)] (map SOME [c,x])
+             (if neg then @{thm neg_prod_le} else @{thm pos_prod_le})) cth
+        val rth = th
+      in rth end
+    | _ => reflexive ct)
+
+|  Const("op =",_)$_$Const(@{const_name "HOL.zero"},_) =>
+   (case whatis x (Thm.dest_arg1 ct) of
+    ("c*x+t",[c,t]) =>
+       let
+        val T = ctyp_of_term x
+        val cr = dest_frac c
+        val ceq = Thm.dest_fun2 ct
+        val cz = Thm.dest_arg ct
+        val cthp = Simplifier.rewrite (local_simpset_of ctxt)
+            (Thm.capply @{cterm "Trueprop"}
+             (Thm.capply @{cterm "Not"} (Thm.capply (Thm.capply ceq c) cz)))
+        val cth = equal_elim (symmetric cthp) TrueI
+        val th = implies_elim
+                 (instantiate' [SOME T] (map SOME [c,x,t]) @{thm nz_prod_sum_eq}) cth
+        val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv
+                   (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th
+      in rth end
+    | ("x+t",[t]) =>
+       let
+        val T = ctyp_of_term x
+        val th = instantiate' [SOME T] [SOME x, SOME t] @{thm "sum_eq"}
+        val rth = Conv.fconv_rule (Conv.arg_conv (Conv.binop_conv
+              (Normalizer.semiring_normalize_ord_conv ctxt (earlier vs)))) th
+       in  rth end
+    | ("c*x",[c]) =>
+       let
+        val T = ctyp_of_term x
+        val cr = dest_frac c
+        val ceq = Thm.dest_fun2 ct
+        val cz = Thm.dest_arg ct
+        val cthp = Simplifier.rewrite (local_simpset_of ctxt)
+            (Thm.capply @{cterm "Trueprop"}
+             (Thm.capply @{cterm "Not"} (Thm.capply (Thm.capply ceq c) cz)))
+        val cth = equal_elim (symmetric cthp) TrueI
+        val rth = implies_elim
+                 (instantiate' [SOME T] (map SOME [c,x]) @{thm nz_prod_eq}) cth
+      in rth end
+    | _ => reflexive ct);
+
+local
+  val less_iff_diff_less_0 = mk_meta_eq @{thm "less_iff_diff_less_0"}
+  val le_iff_diff_le_0 = mk_meta_eq @{thm "le_iff_diff_le_0"}
+  val eq_iff_diff_eq_0 = mk_meta_eq @{thm "eq_iff_diff_eq_0"}
+in
+fun field_isolate_conv phi ctxt vs ct = case term_of ct of
+  Const(@{const_name HOL.less},_)$a$b =>
+   let val (ca,cb) = Thm.dest_binop ct
+       val T = ctyp_of_term ca
+       val th = instantiate' [SOME T] [SOME ca, SOME cb] less_iff_diff_less_0
+       val nth = Conv.fconv_rule
+         (Conv.arg_conv (Conv.arg1_conv
+              (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th
+       val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth))
+   in rth end
+| Const(@{const_name HOL.less_eq},_)$a$b =>
+   let val (ca,cb) = Thm.dest_binop ct
+       val T = ctyp_of_term ca
+       val th = instantiate' [SOME T] [SOME ca, SOME cb] le_iff_diff_le_0
+       val nth = Conv.fconv_rule
+         (Conv.arg_conv (Conv.arg1_conv
+              (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th
+       val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth))
+   in rth end
+
+| Const("op =",_)$a$b =>
+   let val (ca,cb) = Thm.dest_binop ct
+       val T = ctyp_of_term ca
+       val th = instantiate' [SOME T] [SOME ca, SOME cb] eq_iff_diff_eq_0
+       val nth = Conv.fconv_rule
+         (Conv.arg_conv (Conv.arg1_conv
+              (Normalizer.semiring_normalize_ord_conv @{context} (earlier vs)))) th
+       val rth = transitive nth (xnormalize_conv ctxt vs (Thm.rhs_of nth))
+   in rth end
+| @{term "Not"} $(Const("op =",_)$a$b) => Conv.arg_conv (field_isolate_conv phi ctxt vs) ct
+| _ => reflexive ct
+end;
+
+fun classfield_whatis phi =
+ let
+  fun h x t =
+   case term_of t of
+     Const("op =", _)$y$z => if term_of x aconv y then Ferrante_Rackoff_Data.Eq
+                            else Ferrante_Rackoff_Data.Nox
+   | @{term "Not"}$(Const("op =", _)$y$z) => if term_of x aconv y then Ferrante_Rackoff_Data.NEq
+                            else Ferrante_Rackoff_Data.Nox
+   | Const(@{const_name HOL.less},_)$y$z =>
+       if term_of x aconv y then Ferrante_Rackoff_Data.Lt
+        else if term_of x aconv z then Ferrante_Rackoff_Data.Gt
+        else Ferrante_Rackoff_Data.Nox
+   | Const (@{const_name HOL.less_eq},_)$y$z =>
+         if term_of x aconv y then Ferrante_Rackoff_Data.Le
+         else if term_of x aconv z then Ferrante_Rackoff_Data.Ge
+         else Ferrante_Rackoff_Data.Nox
+   | _ => Ferrante_Rackoff_Data.Nox
+ in h end;
+fun class_field_ss phi =
+   HOL_basic_ss addsimps ([@{thm "linorder_not_less"}, @{thm "linorder_not_le"}])
+   addsplits [@{thm "abs_split"},@{thm "split_max"}, @{thm "split_min"}]
+
+in
+Ferrante_Rackoff_Data.funs @{thm "class_ordered_field_dense_linear_order.ferrack_axiom"}
+  {isolate_conv = field_isolate_conv, whatis = classfield_whatis, simpset = class_field_ss}
+end
+*}
+
+
+end 
--- a/src/HOL/Divides.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Divides.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -639,7 +639,7 @@
 
 text {* @{term "op dvd"} is a partial order *}
 
-interpretation dvd: order ["op dvd" "\<lambda>n m \<Colon> nat. n dvd m \<and> \<not> m dvd n"]
+class_interpretation dvd: order ["op dvd" "\<lambda>n m \<Colon> nat. n dvd m \<and> \<not> m dvd n"]
   proof qed (auto intro: dvd_refl dvd_trans dvd_anti_sym)
 
 lemma dvd_diff: "[| k dvd m; k dvd n |] ==> k dvd (m-n :: nat)"
--- a/src/HOL/Finite_Set.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Finite_Set.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -750,7 +750,7 @@
 assumes "finite A" and "a \<notin> A"
 shows "fold_image times g z (insert a A) = g a * (fold_image times g z A)"
 proof -
-  interpret I: fun_left_comm ["%x y. (g x) * y"]
+  interpret I: fun_left_comm "%x y. (g x) * y"
     by unfold_locales (simp add: mult_ac)
   show ?thesis using assms by(simp add:fold_image_def I.fold_insert)
 qed
@@ -798,7 +798,7 @@
     and hyp: "\<And>x y. h (g x y) = times x (h y)"
   shows "h (fold g j w A) = fold times j (h w) A"
 proof -
-  interpret ab_semigroup_mult [g] by fact
+  class_interpret ab_semigroup_mult [g] by fact
   show ?thesis using fin hyp by (induct set: finite) simp_all
 qed
 *)
@@ -873,7 +873,7 @@
 
 subsection {* Generalized summation over a set *}
 
-interpretation comm_monoid_add: comm_monoid_mult ["0::'a::comm_monoid_add" "op +"]
+class_interpretation comm_monoid_add: comm_monoid_mult ["0::'a::comm_monoid_add" "op +"]
   proof qed (auto intro: add_assoc add_commute)
 
 definition setsum :: "('a => 'b) => 'a set => 'b::comm_monoid_add"
@@ -1760,7 +1760,7 @@
 proof (induct rule: finite_induct)
   case empty then show ?case by simp
 next
-  interpret ab_semigroup_mult ["op Un"]
+  class_interpret ab_semigroup_mult ["op Un"]
     proof qed auto
   case insert 
   then show ?case by simp
@@ -1943,7 +1943,7 @@
 assumes fold: "fold_graph times (b::'a) A y" and "b \<notin> A"
 shows "fold_graph times z (insert b A) (z * y)"
 proof -
-  interpret fun_left_comm ["op *::'a \<Rightarrow> 'a \<Rightarrow> 'a"] by (rule fun_left_comm)
+  interpret fun_left_comm "op *::'a \<Rightarrow> 'a \<Rightarrow> 'a" by (rule fun_left_comm)
 from assms show ?thesis
 proof (induct rule: fold_graph.induct)
   case emptyI thus ?case by (force simp add: fold_insert_aux mult_commute)
@@ -1983,7 +1983,7 @@
 lemma fold1_eq_fold:
 assumes "finite A" "a \<notin> A" shows "fold1 times (insert a A) = fold times a A"
 proof -
-  interpret fun_left_comm ["op *::'a \<Rightarrow> 'a \<Rightarrow> 'a"] by (rule fun_left_comm)
+  interpret fun_left_comm "op *::'a \<Rightarrow> 'a \<Rightarrow> 'a" by (rule fun_left_comm)
   from assms show ?thesis
 apply (simp add: fold1_def fold_def)
 apply (rule the_equality)
@@ -2010,7 +2010,7 @@
   assumes nonempty: "A \<noteq> {}" and A: "finite A" "x \<notin> A"
   shows "fold1 times (insert x A) = x * fold1 times A"
 proof -
-  interpret fun_left_comm ["op *::'a \<Rightarrow> 'a \<Rightarrow> 'a"] by (rule fun_left_comm)
+  interpret fun_left_comm "op *::'a \<Rightarrow> 'a \<Rightarrow> 'a" by (rule fun_left_comm)
   from nonempty obtain a A' where "A = insert a A' & a ~: A'"
     by (auto simp add: nonempty_iff)
   with A show ?thesis
@@ -2033,7 +2033,7 @@
   assumes nonempty: "A \<noteq> {}" and A: "finite A" 
   shows "fold1 times (insert x A) = x * fold1 times A"
 proof -
-  interpret fun_left_comm_idem ["op *::'a \<Rightarrow> 'a \<Rightarrow> 'a"]
+  interpret fun_left_comm_idem "op *::'a \<Rightarrow> 'a \<Rightarrow> 'a"
     by (rule fun_left_comm_idem)
   from nonempty obtain a A' where A': "A = insert a A' & a ~: A'"
     by (auto simp add: nonempty_iff)
@@ -2198,7 +2198,7 @@
   assumes "finite A" "A \<noteq> {}"
   shows "x \<le> fold1 inf A \<longleftrightarrow> (\<forall>a\<in>A. x \<le> a)"
 proof -
-  interpret ab_semigroup_idem_mult [inf]
+  class_interpret ab_semigroup_idem_mult [inf]
     by (rule ab_semigroup_idem_mult_inf)
   show ?thesis using assms by (induct rule: finite_ne_induct) simp_all
 qed
@@ -2213,7 +2213,7 @@
   proof (induct rule: finite_ne_induct)
     case singleton thus ?case by simp
   next
-    interpret ab_semigroup_idem_mult [inf]
+    class_interpret ab_semigroup_idem_mult [inf]
       by (rule ab_semigroup_idem_mult_inf)
     case (insert x F)
     from insert(5) have "a = x \<or> a \<in> F" by simp
@@ -2288,7 +2288,7 @@
     and "A \<noteq> {}"
   shows "sup x (\<Sqinter>\<^bsub>fin\<^esub>A) = \<Sqinter>\<^bsub>fin\<^esub>{sup x a|a. a \<in> A}"
 proof -
-  interpret ab_semigroup_idem_mult [inf]
+  class_interpret ab_semigroup_idem_mult [inf]
     by (rule ab_semigroup_idem_mult_inf)
   from assms show ?thesis
     by (simp add: Inf_fin_def image_def
@@ -2303,7 +2303,7 @@
   case singleton thus ?case
     by (simp add: sup_Inf1_distrib [OF B] fold1_singleton_def [OF Inf_fin_def])
 next
-  interpret ab_semigroup_idem_mult [inf]
+  class_interpret ab_semigroup_idem_mult [inf]
     by (rule ab_semigroup_idem_mult_inf)
   case (insert x A)
   have finB: "finite {sup x b |b. b \<in> B}"
@@ -2333,7 +2333,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "inf x (\<Squnion>\<^bsub>fin\<^esub>A) = \<Squnion>\<^bsub>fin\<^esub>{inf x a|a. a \<in> A}"
 proof -
-  interpret ab_semigroup_idem_mult [sup]
+  class_interpret ab_semigroup_idem_mult [sup]
     by (rule ab_semigroup_idem_mult_sup)
   from assms show ?thesis
     by (simp add: Sup_fin_def image_def hom_fold1_commute [where h="inf x", OF inf_sup_distrib1])
@@ -2357,7 +2357,7 @@
     thus ?thesis by(simp add: insert(1) B(1))
   qed
   have ne: "{inf a b |a b. a \<in> A \<and> b \<in> B} \<noteq> {}" using insert B by blast
-  interpret ab_semigroup_idem_mult [sup]
+  class_interpret ab_semigroup_idem_mult [sup]
     by (rule ab_semigroup_idem_mult_sup)
   have "inf (\<Squnion>\<^bsub>fin\<^esub>(insert x A)) (\<Squnion>\<^bsub>fin\<^esub>B) = inf (sup x (\<Squnion>\<^bsub>fin\<^esub>A)) (\<Squnion>\<^bsub>fin\<^esub>B)"
     using insert by (simp add: fold1_insert_idem_def [OF Sup_fin_def])
@@ -2386,7 +2386,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "\<Sqinter>\<^bsub>fin\<^esub>A = Inf A"
 proof -
-  interpret ab_semigroup_idem_mult [inf]
+  class_interpret ab_semigroup_idem_mult [inf]
     by (rule ab_semigroup_idem_mult_inf)
   from assms show ?thesis
   unfolding Inf_fin_def by (induct A set: finite)
@@ -2397,7 +2397,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "\<Squnion>\<^bsub>fin\<^esub>A = Sup A"
 proof -
-  interpret ab_semigroup_idem_mult [sup]
+  class_interpret ab_semigroup_idem_mult [sup]
     by (rule ab_semigroup_idem_mult_sup)
   from assms show ?thesis
   unfolding Sup_fin_def by (induct A set: finite)
@@ -2446,7 +2446,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "x < fold1 min A \<longleftrightarrow> (\<forall>a\<in>A. x < a)"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms show ?thesis
   by (induct rule: finite_ne_induct)
@@ -2457,7 +2457,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "fold1 min A \<le> x \<longleftrightarrow> (\<exists>a\<in>A. a \<le> x)"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms show ?thesis
   by (induct rule: finite_ne_induct)
@@ -2468,7 +2468,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "fold1 min A < x \<longleftrightarrow> (\<exists>a\<in>A. a < x)"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms show ?thesis
   by (induct rule: finite_ne_induct)
@@ -2481,7 +2481,7 @@
 proof cases
   assume "A = B" thus ?thesis by simp
 next
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   assume "A \<noteq> B"
   have B: "B = A \<union> (B-A)" using `A \<subseteq> B` by blast
@@ -2515,7 +2515,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "Min (insert x A) = min x (Min A)"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms show ?thesis by (rule fold1_insert_idem_def [OF Min_def])
 qed
@@ -2524,7 +2524,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "Max (insert x A) = max x (Max A)"
 proof -
-  interpret ab_semigroup_idem_mult [max]
+  class_interpret ab_semigroup_idem_mult [max]
     by (rule ab_semigroup_idem_mult_max)
   from assms show ?thesis by (rule fold1_insert_idem_def [OF Max_def])
 qed
@@ -2533,7 +2533,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "Min A \<in> A"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms fold1_in show ?thesis by (fastsimp simp: Min_def min_def)
 qed
@@ -2542,7 +2542,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "Max A \<in> A"
 proof -
-  interpret ab_semigroup_idem_mult [max]
+  class_interpret ab_semigroup_idem_mult [max]
     by (rule ab_semigroup_idem_mult_max)
   from assms fold1_in [of A] show ?thesis by (fastsimp simp: Max_def max_def)
 qed
@@ -2551,7 +2551,7 @@
   assumes "finite A" and "A \<noteq> {}" and "finite B" and "B \<noteq> {}"
   shows "Min (A \<union> B) = min (Min A) (Min B)"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms show ?thesis
     by (simp add: Min_def fold1_Un2)
@@ -2561,7 +2561,7 @@
   assumes "finite A" and "A \<noteq> {}" and "finite B" and "B \<noteq> {}"
   shows "Max (A \<union> B) = max (Max A) (Max B)"
 proof -
-  interpret ab_semigroup_idem_mult [max]
+  class_interpret ab_semigroup_idem_mult [max]
     by (rule ab_semigroup_idem_mult_max)
   from assms show ?thesis
     by (simp add: Max_def fold1_Un2)
@@ -2572,7 +2572,7 @@
     and "finite N" and "N \<noteq> {}"
   shows "h (Min N) = Min (h ` N)"
 proof -
-  interpret ab_semigroup_idem_mult [min]
+  class_interpret ab_semigroup_idem_mult [min]
     by (rule ab_semigroup_idem_mult_min)
   from assms show ?thesis
     by (simp add: Min_def hom_fold1_commute)
@@ -2583,7 +2583,7 @@
     and "finite N" and "N \<noteq> {}"
   shows "h (Max N) = Max (h ` N)"
 proof -
-  interpret ab_semigroup_idem_mult [max]
+  class_interpret ab_semigroup_idem_mult [max]
     by (rule ab_semigroup_idem_mult_max)
   from assms show ?thesis
     by (simp add: Max_def hom_fold1_commute [of h])
@@ -2593,7 +2593,7 @@
   assumes "finite A" and "x \<in> A"
   shows "Min A \<le> x"
 proof -
-  interpret lower_semilattice ["op \<le>" "op <" min]
+  class_interpret lower_semilattice ["op \<le>" "op <" min]
     by (rule min_lattice)
   from assms show ?thesis by (simp add: Min_def fold1_belowI)
 qed
@@ -2611,7 +2611,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "x \<le> Min A \<longleftrightarrow> (\<forall>a\<in>A. x \<le> a)"
 proof -
-  interpret lower_semilattice ["op \<le>" "op <" min]
+  class_interpret lower_semilattice ["op \<le>" "op <" min]
     by (rule min_lattice)
   from assms show ?thesis by (simp add: Min_def below_fold1_iff)
 qed
@@ -2629,7 +2629,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "x < Min A \<longleftrightarrow> (\<forall>a\<in>A. x < a)"
 proof -
-  interpret lower_semilattice ["op \<le>" "op <" min]
+  class_interpret lower_semilattice ["op \<le>" "op <" min]
     by (rule min_lattice)
   from assms show ?thesis by (simp add: Min_def strict_below_fold1_iff)
 qed
@@ -2639,7 +2639,7 @@
   shows "Max A < x \<longleftrightarrow> (\<forall>a\<in>A. a < x)"
 proof -
   note Max = Max_def
-  interpret linorder ["op \<ge>" "op >"]
+  class_interpret linorder ["op \<ge>" "op >"]
     by (rule dual_linorder)
   from assms show ?thesis
     by (simp add: Max strict_below_fold1_iff [folded dual_max])
@@ -2649,7 +2649,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "Min A \<le> x \<longleftrightarrow> (\<exists>a\<in>A. a \<le> x)"
 proof -
-  interpret lower_semilattice ["op \<le>" "op <" min]
+  class_interpret lower_semilattice ["op \<le>" "op <" min]
     by (rule min_lattice)
   from assms show ?thesis
     by (simp add: Min_def fold1_below_iff)
@@ -2660,7 +2660,7 @@
   shows "x \<le> Max A \<longleftrightarrow> (\<exists>a\<in>A. x \<le> a)"
 proof -
   note Max = Max_def
-  interpret linorder ["op \<ge>" "op >"]
+  class_interpret linorder ["op \<ge>" "op >"]
     by (rule dual_linorder)
   from assms show ?thesis
     by (simp add: Max fold1_below_iff [folded dual_max])
@@ -2670,7 +2670,7 @@
   assumes "finite A" and "A \<noteq> {}"
   shows "Min A < x \<longleftrightarrow> (\<exists>a\<in>A. a < x)"
 proof -
-  interpret lower_semilattice ["op \<le>" "op <" min]
+  class_interpret lower_semilattice ["op \<le>" "op <" min]
     by (rule min_lattice)
   from assms show ?thesis
     by (simp add: Min_def fold1_strict_below_iff)
@@ -2681,7 +2681,7 @@
   shows "x < Max A \<longleftrightarrow> (\<exists>a\<in>A. x < a)"
 proof -
   note Max = Max_def
-  interpret linorder ["op \<ge>" "op >"]
+  class_interpret linorder ["op \<ge>" "op >"]
     by (rule dual_linorder)
   from assms show ?thesis
     by (simp add: Max fold1_strict_below_iff [folded dual_max])
@@ -2691,7 +2691,7 @@
   assumes "M \<subseteq> N" and "M \<noteq> {}" and "finite N"
   shows "Min N \<le> Min M"
 proof -
-  interpret distrib_lattice ["op \<le>" "op <" min max]
+  class_interpret distrib_lattice ["op \<le>" "op <" min max]
     by (rule distrib_lattice_min_max)
   from assms show ?thesis by (simp add: Min_def fold1_antimono)
 qed
@@ -2701,7 +2701,7 @@
   shows "Max M \<le> Max N"
 proof -
   note Max = Max_def
-  interpret linorder ["op \<ge>" "op >"]
+  class_interpret linorder ["op \<ge>" "op >"]
     by (rule dual_linorder)
   from assms show ?thesis
     by (simp add: Max fold1_antimono [folded dual_max])
--- a/src/HOL/FrechetDeriv.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/FrechetDeriv.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -65,8 +65,8 @@
   assumes "bounded_linear g"
   shows "bounded_linear (\<lambda>x. f x + g x)"
 proof -
-  interpret f: bounded_linear [f] by fact
-  interpret g: bounded_linear [g] by fact
+  interpret f: bounded_linear f by fact
+  interpret g: bounded_linear g by fact
   show ?thesis apply (unfold_locales)
     apply (simp only: f.add g.add add_ac)
     apply (simp only: f.scaleR g.scaleR scaleR_right_distrib)
@@ -124,7 +124,7 @@
   assumes "bounded_linear f"
   shows "bounded_linear (\<lambda>x. - f x)"
 proof -
-  interpret f: bounded_linear [f] by fact
+  interpret f: bounded_linear f by fact
   show ?thesis apply (unfold_locales)
     apply (simp add: f.add)
     apply (simp add: f.scaleR)
@@ -151,7 +151,7 @@
   assumes f: "FDERIV f x :> F"
   shows "isCont f x"
 proof -
-  from f interpret F: bounded_linear ["F"] by (rule FDERIV_bounded_linear)
+  from f interpret F: bounded_linear "F" by (rule FDERIV_bounded_linear)
   have "(\<lambda>h. norm (f (x + h) - f x - F h) / norm h) -- 0 --> 0"
     by (rule FDERIV_D [OF f])
   hence "(\<lambda>h. norm (f (x + h) - f x - F h) / norm h * norm h) -- 0 --> 0"
@@ -180,8 +180,8 @@
   assumes "bounded_linear g"
   shows "bounded_linear (\<lambda>x. f (g x))"
 proof -
-  interpret f: bounded_linear [f] by fact
-  interpret g: bounded_linear [g] by fact
+  interpret f: bounded_linear f by fact
+  interpret g: bounded_linear g by fact
   show ?thesis proof (unfold_locales)
     fix x y show "f (g (x + y)) = f (g x) + f (g y)"
       by (simp only: f.add g.add)
@@ -223,8 +223,8 @@
   let ?k = "\<lambda>h. f (x + h) - f x"
   let ?Nf = "\<lambda>h. norm (?Rf h) / norm h"
   let ?Ng = "\<lambda>h. norm (?Rg (?k h)) / norm (?k h)"
-  from f interpret F: bounded_linear ["F"] by (rule FDERIV_bounded_linear)
-  from g interpret G: bounded_linear ["G"] by (rule FDERIV_bounded_linear)
+  from f interpret F!: bounded_linear "F" by (rule FDERIV_bounded_linear)
+  from g interpret G!: bounded_linear "G" by (rule FDERIV_bounded_linear)
   from F.bounded obtain kF where kF: "\<And>x. norm (F x) \<le> norm x * kF" by fast
   from G.bounded obtain kG where kG: "\<And>x. norm (G x) \<le> norm x * kG" by fast
 
@@ -375,9 +375,9 @@
     by (simp only: FDERIV_lemma)
 qed
 
-lemmas FDERIV_mult = bounded_bilinear_locale.mult.prod.FDERIV
+lemmas FDERIV_mult = mult.FDERIV
 
-lemmas FDERIV_scaleR = bounded_bilinear_locale.scaleR.prod.FDERIV
+lemmas FDERIV_scaleR = scaleR.FDERIV
 
 
 subsection {* Powers *}
@@ -409,10 +409,10 @@
 by (simp add: right_diff_distrib left_diff_distrib mult_assoc)
 
 lemmas bounded_linear_mult_const =
-  bounded_bilinear_locale.mult.prod.bounded_linear_left [THEN bounded_linear_compose]
+  mult.bounded_linear_left [THEN bounded_linear_compose]
 
 lemmas bounded_linear_const_mult =
-  bounded_bilinear_locale.mult.prod.bounded_linear_right [THEN bounded_linear_compose]
+  mult.bounded_linear_right [THEN bounded_linear_compose]
 
 lemma FDERIV_inverse:
   fixes x :: "'a::real_normed_div_algebra"
@@ -492,7 +492,7 @@
   fixes x :: "'a::real_normed_field" shows
   "FDERIV f x :> (\<lambda>h. h * D) = (\<lambda>h. (f (x + h) - f x) / h) -- 0 --> D"
  apply (unfold fderiv_def)
- apply (simp add: bounded_bilinear_locale.mult.prod.bounded_linear_left)
+ apply (simp add: mult.bounded_linear_left)
  apply (simp cong: LIM_cong add: nonzero_norm_divide [symmetric])
  apply (subst diff_divide_distrib)
  apply (subst times_divide_eq_left [symmetric])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Fundamental_Theorem_Algebra.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -0,0 +1,1327 @@
+(* Author: Amine Chaieb, TU Muenchen *)
+
+header{*Fundamental Theorem of Algebra*}
+
+theory Fundamental_Theorem_Algebra
+imports Univ_Poly Dense_Linear_Order Complex
+begin
+
+subsection {* Square root of complex numbers *}
+definition csqrt :: "complex \<Rightarrow> complex" where
+"csqrt z = (if Im z = 0 then
+            if 0 \<le> Re z then Complex (sqrt(Re z)) 0
+            else Complex 0 (sqrt(- Re z))
+           else Complex (sqrt((cmod z + Re z) /2))
+                        ((Im z / abs(Im z)) * sqrt((cmod z - Re z) /2)))"
+
+lemma csqrt[algebra]: "csqrt z ^ 2 = z"
+proof-
+  obtain x y where xy: "z = Complex x y" by (cases z)
+  {assume y0: "y = 0"
+    {assume x0: "x \<ge> 0" 
+      then have ?thesis using y0 xy real_sqrt_pow2[OF x0]
+	by (simp add: csqrt_def power2_eq_square)}
+    moreover
+    {assume "\<not> x \<ge> 0" hence x0: "- x \<ge> 0" by arith
+      then have ?thesis using y0 xy real_sqrt_pow2[OF x0] 
+	by (simp add: csqrt_def power2_eq_square) }
+    ultimately have ?thesis by blast}
+  moreover
+  {assume y0: "y\<noteq>0"
+    {fix x y
+      let ?z = "Complex x y"
+      from abs_Re_le_cmod[of ?z] have tha: "abs x \<le> cmod ?z" by auto
+      hence "cmod ?z - x \<ge> 0" "cmod ?z + x \<ge> 0" by arith+ 
+      hence "(sqrt (x * x + y * y) + x) / 2 \<ge> 0" "(sqrt (x * x + y * y) - x) / 2 \<ge> 0" by (simp_all add: power2_eq_square) }
+    note th = this
+    have sq4: "\<And>x::real. x^2 / 4 = (x / 2) ^ 2" 
+      by (simp add: power2_eq_square) 
+    from th[of x y]
+    have sq4': "sqrt (((sqrt (x * x + y * y) + x)^2 / 4)) = (sqrt (x * x + y * y) + x) / 2" "sqrt (((sqrt (x * x + y * y) - x)^2 / 4)) = (sqrt (x * x + y * y) - x) / 2" unfolding sq4 by simp_all
+    then have th1: "sqrt ((sqrt (x * x + y * y) + x) * (sqrt (x * x + y * y) + x) / 4) - sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) - x) / 4) = x"
+      unfolding power2_eq_square by simp 
+    have "sqrt 4 = sqrt (2^2)" by simp 
+    hence sqrt4: "sqrt 4 = 2" by (simp only: real_sqrt_abs)
+    have th2: "2 *(y * sqrt ((sqrt (x * x + y * y) - x) * (sqrt (x * x + y * y) + x) / 4)) / \<bar>y\<bar> = y"
+      using iffD2[OF real_sqrt_pow2_iff sum_power2_ge_zero[of x y]] y0
+      unfolding power2_eq_square 
+      by (simp add: ring_simps real_sqrt_divide sqrt4)
+     from y0 xy have ?thesis  apply (simp add: csqrt_def power2_eq_square)
+       apply (simp add: real_sqrt_sum_squares_mult_ge_zero[of x y] real_sqrt_pow2[OF th(1)[of x y], unfolded power2_eq_square] real_sqrt_pow2[OF th(2)[of x y], unfolded power2_eq_square] real_sqrt_mult[symmetric])
+      using th1 th2  ..}
+  ultimately show ?thesis by blast
+qed
+
+
+subsection{* More lemmas about module of complex numbers *}
+
+lemma complex_of_real_power: "complex_of_real x ^ n = complex_of_real (x^n)"
+  by (rule of_real_power [symmetric])
+
+lemma real_down2: "(0::real) < d1 \<Longrightarrow> 0 < d2 ==> EX e. 0 < e & e < d1 & e < d2"
+  apply ferrack apply arith done
+
+text{* The triangle inequality for cmod *}
+lemma complex_mod_triangle_sub: "cmod w \<le> cmod (w + z) + norm z"
+  using complex_mod_triangle_ineq2[of "w + z" "-z"] by auto
+
+subsection{* Basic lemmas about complex polynomials *}
+
+lemma poly_bound_exists:
+  shows "\<exists>m. m > 0 \<and> (\<forall>z. cmod z <= r \<longrightarrow> cmod (poly p z) \<le> m)"
+proof(induct p)
+  case Nil thus ?case by (rule exI[where x=1], simp) 
+next
+  case (Cons c cs)
+  from Cons.hyps obtain m where m: "\<forall>z. cmod z \<le> r \<longrightarrow> cmod (poly cs z) \<le> m"
+    by blast
+  let ?k = " 1 + cmod c + \<bar>r * m\<bar>"
+  have kp: "?k > 0" using abs_ge_zero[of "r*m"] norm_ge_zero[of c] by arith
+  {fix z
+    assume H: "cmod z \<le> r"
+    from m H have th: "cmod (poly cs z) \<le> m" by blast
+    from H have rp: "r \<ge> 0" using norm_ge_zero[of z] by arith
+    have "cmod (poly (c # cs) z) \<le> cmod c + cmod (z* poly cs z)"
+      using norm_triangle_ineq[of c "z* poly cs z"] by simp
+    also have "\<dots> \<le> cmod c + r*m" using mult_mono[OF H th rp norm_ge_zero[of "poly cs z"]] by (simp add: norm_mult)
+    also have "\<dots> \<le> ?k" by simp
+    finally have "cmod (poly (c # cs) z) \<le> ?k" .}
+  with kp show ?case by blast
+qed
+
+
+text{* Offsetting the variable in a polynomial gives another of same degree *}
+  (* FIXME : Lemma holds also in locale --- fix it later *)
+lemma  poly_offset_lemma:
+  shows "\<exists>b q. (length q = length p) \<and> (\<forall>x. poly (b#q) (x::complex) = (a + x) * poly p x)"
+proof(induct p)
+  case Nil thus ?case by simp
+next
+  case (Cons c cs)
+  from Cons.hyps obtain b q where 
+    bq: "length q = length cs" "\<forall>x. poly (b # q) x = (a + x) * poly cs x"
+    by blast
+  let ?b = "a*c"
+  let ?q = "(b+c)#q"
+  have lg: "length ?q = length (c#cs)" using bq(1) by simp
+  {fix x
+    from bq(2)[rule_format, of x]
+    have "x*poly (b # q) x = x*((a + x) * poly cs x)" by simp
+    hence "poly (?b# ?q) x = (a + x) * poly (c # cs) x"
+      by (simp add: ring_simps)}
+  with lg  show ?case by blast 
+qed
+
+    (* FIXME : This one too*)
+lemma poly_offset: "\<exists> q. length q = length p \<and> (\<forall>x. poly q (x::complex) = poly p (a + x))"
+proof (induct p)
+  case Nil thus ?case by simp
+next
+  case (Cons c cs)
+  from Cons.hyps obtain q where q: "length q = length cs" "\<forall>x. poly q x = poly cs (a + x)" by blast
+  from poly_offset_lemma[of q a] obtain b p where 
+    bp: "length p = length q" "\<forall>x. poly (b # p) x = (a + x) * poly q x"
+    by blast
+  thus ?case using q bp by - (rule exI[where x="(c + b)#p"], simp)
+qed
+
+text{* An alternative useful formulation of completeness of the reals *}
+lemma real_sup_exists: assumes ex: "\<exists>x. P x" and bz: "\<exists>z. \<forall>x. P x \<longrightarrow> x < z"
+  shows "\<exists>(s::real). \<forall>y. (\<exists>x. P x \<and> y < x) \<longleftrightarrow> y < s"
+proof-
+  from ex bz obtain x Y where x: "P x" and Y: "\<And>x. P x \<Longrightarrow> x < Y"  by blast
+  from ex have thx:"\<exists>x. x \<in> Collect P" by blast
+  from bz have thY: "\<exists>Y. isUb UNIV (Collect P) Y" 
+    by(auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def order_le_less)
+  from reals_complete[OF thx thY] obtain L where L: "isLub UNIV (Collect P) L"
+    by blast
+  from Y[OF x] have xY: "x < Y" .
+  from L have L': "\<forall>x. P x \<longrightarrow> x \<le> L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def)  
+  from Y have Y': "\<forall>x. P x \<longrightarrow> x \<le> Y" 
+    apply (clarsimp, atomize (full)) by auto 
+  from L Y' have "L \<le> Y" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def)
+  {fix y
+    {fix z assume z: "P z" "y < z"
+      from L' z have "y < L" by auto }
+    moreover
+    {assume yL: "y < L" "\<forall>z. P z \<longrightarrow> \<not> y < z"
+      hence nox: "\<forall>z. P z \<longrightarrow> y \<ge> z" by auto
+      from nox L have "y \<ge> L" by (auto simp add: isUb_def isLub_def setge_def setle_def leastP_def Ball_def) 
+      with yL(1) have False  by arith}
+    ultimately have "(\<exists>x. P x \<and> y < x) \<longleftrightarrow> y < L" by blast}
+  thus ?thesis by blast
+qed
+
+
+subsection{* Some theorems about Sequences*}
+text{* Given a binary function @{text "f:: nat \<Rightarrow> 'a \<Rightarrow> 'a"}, its values are uniquely determined by a function g *}
+
+lemma num_Axiom: "EX! g. g 0 = e \<and> (\<forall>n. g (Suc n) = f n (g n))"
+  unfolding Ex1_def
+  apply (rule_tac x="nat_rec e f" in exI)
+  apply (rule conjI)+
+apply (rule def_nat_rec_0, simp)
+apply (rule allI, rule def_nat_rec_Suc, simp)
+apply (rule allI, rule impI, rule ext)
+apply (erule conjE)
+apply (induct_tac x)
+apply (simp add: nat_rec_0)
+apply (erule_tac x="n" in allE)
+apply (simp)
+done
+
+ text{* An equivalent formulation of monotony -- Not used here, but might be useful *}
+lemma mono_Suc: "mono f = (\<forall>n. (f n :: 'a :: order) \<le> f (Suc n))"
+unfolding mono_def
+proof auto
+  fix A B :: nat
+  assume H: "\<forall>n. f n \<le> f (Suc n)" "A \<le> B"
+  hence "\<exists>k. B = A + k" apply -  apply (thin_tac "\<forall>n. f n \<le> f (Suc n)") 
+    by presburger
+  then obtain k where k: "B = A + k" by blast
+  {fix a k
+    have "f a \<le> f (a + k)"
+    proof (induct k)
+      case 0 thus ?case by simp
+    next
+      case (Suc k)
+      from Suc.hyps H(1)[rule_format, of "a + k"] show ?case by simp
+    qed}
+  with k show "f A \<le> f B" by blast
+qed
+
+text{* for any sequence, there is a mootonic subsequence *}
+lemma seq_monosub: "\<exists>f. subseq f \<and> monoseq (\<lambda> n. (s (f n)))"
+proof-
+  {assume H: "\<forall>n. \<exists>p >n. \<forall> m\<ge>p. s m \<le> s p"
+    let ?P = "\<lambda> p n. p > n \<and> (\<forall>m \<ge> p. s m \<le> s p)"
+    from num_Axiom[of "SOME p. ?P p 0" "\<lambda>p n. SOME p. ?P p n"]
+    obtain f where f: "f 0 = (SOME p. ?P p 0)" "\<forall>n. f (Suc n) = (SOME p. ?P p (f n))" by blast
+    have "?P (f 0) 0"  unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p 0"]
+      using H apply - 
+      apply (erule allE[where x=0], erule exE, rule_tac x="p" in exI) 
+      unfolding order_le_less by blast 
+    hence f0: "f 0 > 0" "\<forall>m \<ge> f 0. s m \<le> s (f 0)" by blast+
+    {fix n
+      have "?P (f (Suc n)) (f n)" 
+	unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
+	using H apply - 
+      apply (erule allE[where x="f n"], erule exE, rule_tac x="p" in exI) 
+      unfolding order_le_less by blast 
+    hence "f (Suc n) > f n" "\<forall>m \<ge> f (Suc n). s m \<le> s (f (Suc n))" by blast+}
+  note fSuc = this
+    {fix p q assume pq: "p \<ge> f q"
+      have "s p \<le> s(f(q))"  using f0(2)[rule_format, of p] pq fSuc
+	by (cases q, simp_all) }
+    note pqth = this
+    {fix q
+      have "f (Suc q) > f q" apply (induct q) 
+	using f0(1) fSuc(1)[of 0] apply simp by (rule fSuc(1))}
+    note fss = this
+    from fss have th1: "subseq f" unfolding subseq_Suc_iff ..
+    {fix a b 
+      have "f a \<le> f (a + b)"
+      proof(induct b)
+	case 0 thus ?case by simp
+      next
+	case (Suc b)
+	from fSuc(1)[of "a + b"] Suc.hyps show ?case by simp
+      qed}
+    note fmon0 = this
+    have "monoseq (\<lambda>n. s (f n))" 
+    proof-
+      {fix n
+	have "s (f n) \<ge> s (f (Suc n))" 
+	proof(cases n)
+	  case 0
+	  assume n0: "n = 0"
+	  from fSuc(1)[of 0] have th0: "f 0 \<le> f (Suc 0)" by simp
+	  from f0(2)[rule_format, OF th0] show ?thesis  using n0 by simp
+	next
+	  case (Suc m)
+	  assume m: "n = Suc m"
+	  from fSuc(1)[of n] m have th0: "f (Suc m) \<le> f (Suc (Suc m))" by simp
+	  from m fSuc(2)[rule_format, OF th0] show ?thesis by simp 
+	qed}
+      thus "monoseq (\<lambda>n. s (f n))" unfolding monoseq_Suc by blast 
+    qed
+    with th1 have ?thesis by blast}
+  moreover
+  {fix N assume N: "\<forall>p >N. \<exists> m\<ge>p. s m > s p"
+    {fix p assume p: "p \<ge> Suc N" 
+      hence pN: "p > N" by arith with N obtain m where m: "m \<ge> p" "s m > s p" by blast
+      have "m \<noteq> p" using m(2) by auto 
+      with m have "\<exists>m>p. s p < s m" by - (rule exI[where x=m], auto)}
+    note th0 = this
+    let ?P = "\<lambda>m x. m > x \<and> s x < s m"
+    from num_Axiom[of "SOME x. ?P x (Suc N)" "\<lambda>m x. SOME y. ?P y x"]
+    obtain f where f: "f 0 = (SOME x. ?P x (Suc N))" 
+      "\<forall>n. f (Suc n) = (SOME m. ?P m (f n))" by blast
+    have "?P (f 0) (Suc N)"  unfolding f(1) some_eq_ex[of "\<lambda>p. ?P p (Suc N)"]
+      using N apply - 
+      apply (erule allE[where x="Suc N"], clarsimp)
+      apply (rule_tac x="m" in exI)
+      apply auto
+      apply (subgoal_tac "Suc N \<noteq> m")
+      apply simp
+      apply (rule ccontr, simp)
+      done
+    hence f0: "f 0 > Suc N" "s (Suc N) < s (f 0)" by blast+
+    {fix n
+      have "f n > N \<and> ?P (f (Suc n)) (f n)"
+	unfolding f(2)[rule_format, of n] some_eq_ex[of "\<lambda>p. ?P p (f n)"]
+      proof (induct n)
+	case 0 thus ?case
+	  using f0 N apply auto 
+	  apply (erule allE[where x="f 0"], clarsimp) 
+	  apply (rule_tac x="m" in exI, simp)
+	  by (subgoal_tac "f 0 \<noteq> m", auto)
+      next
+	case (Suc n)
+	from Suc.hyps have Nfn: "N < f n" by blast
+	from Suc.hyps obtain m where m: "m > f n" "s (f n) < s m" by blast
+	with Nfn have mN: "m > N" by arith
+	note key = Suc.hyps[unfolded some_eq_ex[of "\<lambda>p. ?P p (f n)", symmetric] f(2)[rule_format, of n, symmetric]]
+	
+	from key have th0: "f (Suc n) > N" by simp
+	from N[rule_format, OF th0]
+	obtain m' where m': "m' \<ge> f (Suc n)" "s (f (Suc n)) < s m'" by blast
+	have "m' \<noteq> f (Suc (n))" apply (rule ccontr) using m'(2) by auto
+	hence "m' > f (Suc n)" using m'(1) by simp
+	with key m'(2) show ?case by auto
+      qed}
+    note fSuc = this
+    {fix n
+      have "f n \<ge> Suc N \<and> f(Suc n) > f n \<and> s(f n) < s(f(Suc n))" using fSuc[of n] by auto 
+      hence "f n \<ge> Suc N" "f(Suc n) > f n" "s(f n) < s(f(Suc n))" by blast+}
+    note thf = this
+    have sqf: "subseq f" unfolding subseq_Suc_iff using thf by simp
+    have "monoseq (\<lambda>n. s (f n))"  unfolding monoseq_Suc using thf
+      apply -
+      apply (rule disjI1)
+      apply auto
+      apply (rule order_less_imp_le)
+      apply blast
+      done
+    then have ?thesis  using sqf by blast}
+  ultimately show ?thesis unfolding linorder_not_less[symmetric] by blast
+qed
+
+lemma seq_suble: assumes sf: "subseq f" shows "n \<le> f n"
+proof(induct n)
+  case 0 thus ?case by simp
+next
+  case (Suc n)
+  from sf[unfolded subseq_Suc_iff, rule_format, of n] Suc.hyps
+  have "n < f (Suc n)" by arith 
+  thus ?case by arith
+qed
+
+subsection {* Fundamental theorem of algebra *}
+lemma  unimodular_reduce_norm:
+  assumes md: "cmod z = 1"
+  shows "cmod (z + 1) < 1 \<or> cmod (z - 1) < 1 \<or> cmod (z + ii) < 1 \<or> cmod (z - ii) < 1"
+proof-
+  obtain x y where z: "z = Complex x y " by (cases z, auto)
+  from md z have xy: "x^2 + y^2 = 1" by (simp add: cmod_def)
+  {assume C: "cmod (z + 1) \<ge> 1" "cmod (z - 1) \<ge> 1" "cmod (z + ii) \<ge> 1" "cmod (z - ii) \<ge> 1"
+    from C z xy have "2*x \<le> 1" "2*x \<ge> -1" "2*y \<le> 1" "2*y \<ge> -1"
+      by (simp_all add: cmod_def power2_eq_square ring_simps)
+    hence "abs (2*x) \<le> 1" "abs (2*y) \<le> 1" by simp_all
+    hence "(abs (2 * x))^2 <= 1^2" "(abs (2 * y)) ^2 <= 1^2"
+      by - (rule power_mono, simp, simp)+
+    hence th0: "4*x^2 \<le> 1" "4*y^2 \<le> 1" 
+      by (simp_all  add: power2_abs power_mult_distrib)
+    from add_mono[OF th0] xy have False by simp }
+  thus ?thesis unfolding linorder_not_le[symmetric] by blast
+qed
+
+text{* Hence we can always reduce modulus of @{text "1 + b z^n"} if nonzero *}
+lemma reduce_poly_simple:
+ assumes b: "b \<noteq> 0" and n: "n\<noteq>0"
+  shows "\<exists>z. cmod (1 + b * z^n) < 1"
+using n
+proof(induct n rule: nat_less_induct)
+  fix n
+  assume IH: "\<forall>m<n. m \<noteq> 0 \<longrightarrow> (\<exists>z. cmod (1 + b * z ^ m) < 1)" and n: "n \<noteq> 0"
+  let ?P = "\<lambda>z n. cmod (1 + b * z ^ n) < 1"
+  {assume e: "even n"
+    hence "\<exists>m. n = 2*m" by presburger
+    then obtain m where m: "n = 2*m" by blast
+    from n m have "m\<noteq>0" "m < n" by presburger+
+    with IH[rule_format, of m] obtain z where z: "?P z m" by blast
+    from z have "?P (csqrt z) n" by (simp add: m power_mult csqrt)
+    hence "\<exists>z. ?P z n" ..}
+  moreover
+  {assume o: "odd n"
+    from b have b': "b^2 \<noteq> 0" unfolding power2_eq_square by simp
+    have "Im (inverse b) * (Im (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) +
+    Re (inverse b) * (Re (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) = 
+    ((Re (inverse b))^2 + (Im (inverse b))^2) * \<bar>Im b * Im b + Re b * Re b\<bar>" by algebra
+    also have "\<dots> = cmod (inverse b) ^2 * cmod b ^ 2" 
+      apply (simp add: cmod_def) using realpow_two_le_add_order[of "Re b" "Im b"]
+      by (simp add: power2_eq_square)
+    finally 
+    have th0: "Im (inverse b) * (Im (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) +
+    Re (inverse b) * (Re (inverse b) * \<bar>Im b * Im b + Re b * Re b\<bar>) =
+    1" 
+      apply (simp add: power2_eq_square norm_mult[symmetric] norm_inverse[symmetric])
+      using right_inverse[OF b']
+      by (simp add: power2_eq_square[symmetric] power_inverse[symmetric] ring_simps)
+    have th0: "cmod (complex_of_real (cmod b) / b) = 1"
+      apply (simp add: complex_Re_mult cmod_def power2_eq_square Re_complex_of_real Im_complex_of_real divide_inverse ring_simps )
+      by (simp add: real_sqrt_mult[symmetric] th0)        
+    from o have "\<exists>m. n = Suc (2*m)" by presburger+
+    then obtain m where m: "n = Suc (2*m)" by blast
+    from unimodular_reduce_norm[OF th0] o
+    have "\<exists>v. cmod (complex_of_real (cmod b) / b + v^n) < 1"
+      apply (cases "cmod (complex_of_real (cmod b) / b + 1) < 1", rule_tac x="1" in exI, simp)
+      apply (cases "cmod (complex_of_real (cmod b) / b - 1) < 1", rule_tac x="-1" in exI, simp add: diff_def)
+      apply (cases "cmod (complex_of_real (cmod b) / b + ii) < 1")
+      apply (cases "even m", rule_tac x="ii" in exI, simp add: m power_mult)
+      apply (rule_tac x="- ii" in exI, simp add: m power_mult)
+      apply (cases "even m", rule_tac x="- ii" in exI, simp add: m power_mult diff_def)
+      apply (rule_tac x="ii" in exI, simp add: m power_mult diff_def)
+      done
+    then obtain v where v: "cmod (complex_of_real (cmod b) / b + v^n) < 1" by blast
+    let ?w = "v / complex_of_real (root n (cmod b))"
+    from odd_real_root_pow[OF o, of "cmod b"]
+    have th1: "?w ^ n = v^n / complex_of_real (cmod b)" 
+      by (simp add: power_divide complex_of_real_power)
+    have th2:"cmod (complex_of_real (cmod b) / b) = 1" using b by (simp add: norm_divide)
+    hence th3: "cmod (complex_of_real (cmod b) / b) \<ge> 0" by simp
+    have th4: "cmod (complex_of_real (cmod b) / b) *
+   cmod (1 + b * (v ^ n / complex_of_real (cmod b)))
+   < cmod (complex_of_real (cmod b) / b) * 1"
+      apply (simp only: norm_mult[symmetric] right_distrib)
+      using b v by (simp add: th2)
+
+    from mult_less_imp_less_left[OF th4 th3]
+    have "?P ?w n" unfolding th1 . 
+    hence "\<exists>z. ?P z n" .. }
+  ultimately show "\<exists>z. ?P z n" by blast
+qed
+
+
+text{* Bolzano-Weierstrass type property for closed disc in complex plane. *}
+
+lemma metric_bound_lemma: "cmod (x - y) <= \<bar>Re x - Re y\<bar> + \<bar>Im x - Im y\<bar>"
+  using real_sqrt_sum_squares_triangle_ineq[of "Re x - Re y" 0 0 "Im x - Im y" ]
+  unfolding cmod_def by simp
+
+lemma bolzano_weierstrass_complex_disc:
+  assumes r: "\<forall>n. cmod (s n) \<le> r"
+  shows "\<exists>f z. subseq f \<and> (\<forall>e >0. \<exists>N. \<forall>n \<ge> N. cmod (s (f n) - z) < e)"
+proof-
+  from seq_monosub[of "Re o s"] 
+  obtain f g where f: "subseq f" "monoseq (\<lambda>n. Re (s (f n)))" 
+    unfolding o_def by blast
+  from seq_monosub[of "Im o s o f"] 
+  obtain g where g: "subseq g" "monoseq (\<lambda>n. Im (s(f(g n))))" unfolding o_def by blast  
+  let ?h = "f o g"
+  from r[rule_format, of 0] have rp: "r \<ge> 0" using norm_ge_zero[of "s 0"] by arith 
+  have th:"\<forall>n. r + 1 \<ge> \<bar> Re (s n)\<bar>" 
+  proof
+    fix n
+    from abs_Re_le_cmod[of "s n"] r[rule_format, of n]  show "\<bar>Re (s n)\<bar> \<le> r + 1" by arith
+  qed
+  have conv1: "convergent (\<lambda>n. Re (s ( f n)))"
+    apply (rule Bseq_monoseq_convergent)
+    apply (simp add: Bseq_def)
+    apply (rule exI[where x= "r + 1"])
+    using th rp apply simp
+    using f(2) .
+  have th:"\<forall>n. r + 1 \<ge> \<bar> Im (s n)\<bar>" 
+  proof
+    fix n
+    from abs_Im_le_cmod[of "s n"] r[rule_format, of n]  show "\<bar>Im (s n)\<bar> \<le> r + 1" by arith
+  qed
+
+  have conv2: "convergent (\<lambda>n. Im (s (f (g n))))"
+    apply (rule Bseq_monoseq_convergent)
+    apply (simp add: Bseq_def)
+    apply (rule exI[where x= "r + 1"])
+    using th rp apply simp
+    using g(2) .
+
+  from conv1[unfolded convergent_def] obtain x where "LIMSEQ (\<lambda>n. Re (s (f n))) x" 
+    by blast 
+  hence  x: "\<forall>r>0. \<exists>n0. \<forall>n\<ge>n0. \<bar> Re (s (f n)) - x \<bar> < r" 
+    unfolding LIMSEQ_def real_norm_def .
+
+  from conv2[unfolded convergent_def] obtain y where "LIMSEQ (\<lambda>n. Im (s (f (g n)))) y" 
+    by blast 
+  hence  y: "\<forall>r>0. \<exists>n0. \<forall>n\<ge>n0. \<bar> Im (s (f (g n))) - y \<bar> < r" 
+    unfolding LIMSEQ_def real_norm_def .
+  let ?w = "Complex x y"
+  from f(1) g(1) have hs: "subseq ?h" unfolding subseq_def by auto 
+  {fix e assume ep: "e > (0::real)"
+    hence e2: "e/2 > 0" by simp
+    from x[rule_format, OF e2] y[rule_format, OF e2]
+    obtain N1 N2 where N1: "\<forall>n\<ge>N1. \<bar>Re (s (f n)) - x\<bar> < e / 2" and N2: "\<forall>n\<ge>N2. \<bar>Im (s (f (g n))) - y\<bar> < e / 2" by blast
+    {fix n assume nN12: "n \<ge> N1 + N2"
+      hence nN1: "g n \<ge> N1" and nN2: "n \<ge> N2" using seq_suble[OF g(1), of n] by arith+
+      from add_strict_mono[OF N1[rule_format, OF nN1] N2[rule_format, OF nN2]]
+      have "cmod (s (?h n) - ?w) < e" 
+	using metric_bound_lemma[of "s (f (g n))" ?w] by simp }
+    hence "\<exists>N. \<forall>n\<ge>N. cmod (s (?h n) - ?w) < e" by blast }
+  with hs show ?thesis  by blast  
+qed
+
+text{* Polynomial is continuous. *}
+
+lemma poly_cont:
+  assumes ep: "e > 0" 
+  shows "\<exists>d >0. \<forall>w. 0 < cmod (w - z) \<and> cmod (w - z) < d \<longrightarrow> cmod (poly p w - poly p z) < e"
+proof-
+  from poly_offset[of p z] obtain q where q: "length q = length p" "\<And>x. poly q x = poly p (z + x)" by blast
+  {fix w
+    note q(2)[of "w - z", simplified]}
+  note th = this
+  show ?thesis unfolding th[symmetric]
+  proof(induct q)
+    case Nil thus ?case  using ep by auto
+  next
+    case (Cons c cs)
+    from poly_bound_exists[of 1 "cs"] 
+    obtain m where m: "m > 0" "\<And>z. cmod z \<le> 1 \<Longrightarrow> cmod (poly cs z) \<le> m" by blast
+    from ep m(1) have em0: "e/m > 0" by (simp add: field_simps)
+    have one0: "1 > (0::real)"  by arith
+    from real_lbound_gt_zero[OF one0 em0] 
+    obtain d where d: "d >0" "d < 1" "d < e / m" by blast
+    from d(1,3) m(1) have dm: "d*m > 0" "d*m < e" 
+      by (simp_all add: field_simps real_mult_order)
+    show ?case 
+      proof(rule ex_forward[OF real_lbound_gt_zero[OF one0 em0]], clarsimp simp add: norm_mult)
+	fix d w
+	assume H: "d > 0" "d < 1" "d < e/m" "w\<noteq>z" "cmod (w-z) < d"
+	hence d1: "cmod (w-z) \<le> 1" "d \<ge> 0" by simp_all
+	from H(3) m(1) have dme: "d*m < e" by (simp add: field_simps)
+	from H have th: "cmod (w-z) \<le> d" by simp 
+	from mult_mono[OF th m(2)[OF d1(1)] d1(2) norm_ge_zero] dme
+	show "cmod (w - z) * cmod (poly cs (w - z)) < e" by simp
+      qed  
+    qed
+qed
+
+text{* Hence a polynomial attains minimum on a closed disc 
+  in the complex plane. *}
+lemma  poly_minimum_modulus_disc:
+  "\<exists>z. \<forall>w. cmod w \<le> r \<longrightarrow> cmod (poly p z) \<le> cmod (poly p w)"
+proof-
+  {assume "\<not> r \<ge> 0" hence ?thesis unfolding linorder_not_le
+      apply -
+      apply (rule exI[where x=0]) 
+      apply auto
+      apply (subgoal_tac "cmod w < 0")
+      apply simp
+      apply arith
+      done }
+  moreover
+  {assume rp: "r \<ge> 0"
+    from rp have "cmod 0 \<le> r \<and> cmod (poly p 0) = - (- cmod (poly p 0))" by simp 
+    hence mth1: "\<exists>x z. cmod z \<le> r \<and> cmod (poly p z) = - x"  by blast
+    {fix x z
+      assume H: "cmod z \<le> r" "cmod (poly p z) = - x" "\<not>x < 1"
+      hence "- x < 0 " by arith
+      with H(2) norm_ge_zero[of "poly p z"]  have False by simp }
+    then have mth2: "\<exists>z. \<forall>x. (\<exists>z. cmod z \<le> r \<and> cmod (poly p z) = - x) \<longrightarrow> x < z" by blast
+    from real_sup_exists[OF mth1 mth2] obtain s where 
+      s: "\<forall>y. (\<exists>x. (\<exists>z. cmod z \<le> r \<and> cmod (poly p z) = - x) \<and> y < x) \<longleftrightarrow>(y < s)" by blast
+    let ?m = "-s"
+    {fix y
+      from s[rule_format, of "-y"] have 
+    "(\<exists>z x. cmod z \<le> r \<and> -(- cmod (poly p z)) < y) \<longleftrightarrow> ?m < y" 
+	unfolding minus_less_iff[of y ] equation_minus_iff by blast }
+    note s1 = this[unfolded minus_minus]
+    from s1[of ?m] have s1m: "\<And>z x. cmod z \<le> r \<Longrightarrow> cmod (poly p z) \<ge> ?m" 
+      by auto
+    {fix n::nat
+      from s1[rule_format, of "?m + 1/real (Suc n)"] 
+      have "\<exists>z. cmod z \<le> r \<and> cmod (poly p z) < - s + 1 / real (Suc n)"
+	by simp}
+    hence th: "\<forall>n. \<exists>z. cmod z \<le> r \<and> cmod (poly p z) < - s + 1 / real (Suc n)" ..
+    from choice[OF th] obtain g where 
+      g: "\<forall>n. cmod (g n) \<le> r" "\<forall>n. cmod (poly p (g n)) <?m+1 /real(Suc n)" 
+      by blast
+    from bolzano_weierstrass_complex_disc[OF g(1)] 
+    obtain f z where fz: "subseq f" "\<forall>e>0. \<exists>N. \<forall>n\<ge>N. cmod (g (f n) - z) < e"
+      by blast    
+    {fix w 
+      assume wr: "cmod w \<le> r"
+      let ?e = "\<bar>cmod (poly p z) - ?m\<bar>"
+      {assume e: "?e > 0"
+	hence e2: "?e/2 > 0" by simp
+	from poly_cont[OF e2, of z p] obtain d where
+	  d: "d>0" "\<forall>w. 0<cmod (w - z)\<and> cmod(w - z) < d \<longrightarrow> cmod(poly p w - poly p z) < ?e/2" by blast
+	{fix w assume w: "cmod (w - z) < d"
+	  have "cmod(poly p w - poly p z) < ?e / 2"
+	    using d(2)[rule_format, of w] w e by (cases "w=z", simp_all)}
+	note th1 = this
+	
+	from fz(2)[rule_format, OF d(1)] obtain N1 where 
+	  N1: "\<forall>n\<ge>N1. cmod (g (f n) - z) < d" by blast
+	from reals_Archimedean2[of "2/?e"] obtain N2::nat where
+	  N2: "2/?e < real N2" by blast
+	have th2: "cmod(poly p (g(f(N1 + N2))) - poly p z) < ?e/2"
+	  using N1[rule_format, of "N1 + N2"] th1 by simp
+	{fix a b e2 m :: real
+	have "a < e2 \<Longrightarrow> abs(b - m) < e2 \<Longrightarrow> 2 * e2 <= abs(b - m) + a
+          ==> False" by arith}
+      note th0 = this
+      have ath: 
+	"\<And>m x e. m <= x \<Longrightarrow>  x < m + e ==> abs(x - m::real) < e" by arith
+      from s1m[OF g(1)[rule_format]]
+      have th31: "?m \<le> cmod(poly p (g (f (N1 + N2))))" .
+      from seq_suble[OF fz(1), of "N1+N2"]
+      have th00: "real (Suc (N1+N2)) \<le> real (Suc (f (N1+N2)))" by simp
+      have th000: "0 \<le> (1::real)" "(1::real) \<le> 1" "real (Suc (N1+N2)) > 0"  
+	using N2 by auto
+      from frac_le[OF th000 th00] have th00: "?m +1 / real (Suc (f (N1 + N2))) \<le> ?m + 1 / real (Suc (N1 + N2))" by simp
+      from g(2)[rule_format, of "f (N1 + N2)"]
+      have th01:"cmod (poly p (g (f (N1 + N2)))) < - s + 1 / real (Suc (f (N1 + N2)))" .
+      from order_less_le_trans[OF th01 th00]
+      have th32: "cmod(poly p (g (f (N1 + N2)))) < ?m + (1/ real(Suc (N1 + N2)))" .
+      from N2 have "2/?e < real (Suc (N1 + N2))" by arith
+      with e2 less_imp_inverse_less[of "2/?e" "real (Suc (N1 + N2))"]
+      have "?e/2 > 1/ real (Suc (N1 + N2))" by (simp add: inverse_eq_divide)
+      with ath[OF th31 th32]
+      have thc1:"\<bar>cmod(poly p (g (f (N1 + N2)))) - ?m\<bar>< ?e/2" by arith  
+      have ath2: "\<And>(a::real) b c m. \<bar>a - b\<bar> <= c ==> \<bar>b - m\<bar> <= \<bar>a - m\<bar> + c" 
+	by arith
+      have th22: "\<bar>cmod (poly p (g (f (N1 + N2)))) - cmod (poly p z)\<bar>
+\<le> cmod (poly p (g (f (N1 + N2))) - poly p z)" 
+	by (simp add: norm_triangle_ineq3)
+      from ath2[OF th22, of ?m]
+      have thc2: "2*(?e/2) \<le> \<bar>cmod(poly p (g (f (N1 + N2)))) - ?m\<bar> + cmod (poly p (g (f (N1 + N2))) - poly p z)" by simp
+      from th0[OF th2 thc1 thc2] have False .}
+      hence "?e = 0" by auto
+      then have "cmod (poly p z) = ?m" by simp  
+      with s1m[OF wr]
+      have "cmod (poly p z) \<le> cmod (poly p w)" by simp }
+    hence ?thesis by blast}
+  ultimately show ?thesis by blast
+qed
+
+lemma "(rcis (sqrt (abs r)) (a/2)) ^ 2 = rcis (abs r) a"
+  unfolding power2_eq_square
+  apply (simp add: rcis_mult)
+  apply (simp add: power2_eq_square[symmetric])
+  done
+
+lemma cispi: "cis pi = -1" 
+  unfolding cis_def
+  by simp
+
+lemma "(rcis (sqrt (abs r)) ((pi + a)/2)) ^ 2 = rcis (- abs r) a"
+  unfolding power2_eq_square
+  apply (simp add: rcis_mult add_divide_distrib)
+  apply (simp add: power2_eq_square[symmetric] rcis_def cispi cis_mult[symmetric])
+  done
+
+text {* Nonzero polynomial in z goes to infinity as z does. *}
+
+instance complex::idom_char_0 by (intro_classes)
+instance complex :: recpower_idom_char_0 by intro_classes
+
+lemma poly_infinity:
+  assumes ex: "list_ex (\<lambda>c. c \<noteq> 0) p"
+  shows "\<exists>r. \<forall>z. r \<le> cmod z \<longrightarrow> d \<le> cmod (poly (a#p) z)"
+using ex
+proof(induct p arbitrary: a d)
+  case (Cons c cs a d) 
+  {assume H: "list_ex (\<lambda>c. c\<noteq>0) cs"
+    with Cons.hyps obtain r where r: "\<forall>z. r \<le> cmod z \<longrightarrow> d + cmod a \<le> cmod (poly (c # cs) z)" by blast
+    let ?r = "1 + \<bar>r\<bar>"
+    {fix z assume h: "1 + \<bar>r\<bar> \<le> cmod z"
+      have r0: "r \<le> cmod z" using h by arith
+      from r[rule_format, OF r0]
+      have th0: "d + cmod a \<le> 1 * cmod(poly (c#cs) z)" by arith
+      from h have z1: "cmod z \<ge> 1" by arith
+      from order_trans[OF th0 mult_right_mono[OF z1 norm_ge_zero[of "poly (c#cs) z"]]]
+      have th1: "d \<le> cmod(z * poly (c#cs) z) - cmod a"
+	unfolding norm_mult by (simp add: ring_simps)
+      from complex_mod_triangle_sub[of "z * poly (c#cs) z" a]
+      have th2: "cmod(z * poly (c#cs) z) - cmod a \<le> cmod (poly (a#c#cs) z)" 
+	by (simp add: diff_le_eq ring_simps) 
+      from th1 th2 have "d \<le> cmod (poly (a#c#cs) z)"  by arith}
+    hence ?case by blast}
+  moreover
+  {assume cs0: "\<not> (list_ex (\<lambda>c. c \<noteq> 0) cs)"
+    with Cons.prems have c0: "c \<noteq> 0" by simp
+    from cs0 have cs0': "list_all (\<lambda>c. c = 0) cs" 
+      by (auto simp add: list_all_iff list_ex_iff)
+    {fix z
+      assume h: "(\<bar>d\<bar> + cmod a) / cmod c \<le> cmod z"
+      from c0 have "cmod c > 0" by simp
+      from h c0 have th0: "\<bar>d\<bar> + cmod a \<le> cmod (z*c)" 
+	by (simp add: field_simps norm_mult)
+      have ath: "\<And>mzh mazh ma. mzh <= mazh + ma ==> abs(d) + ma <= mzh ==> d <= mazh" by arith
+      from complex_mod_triangle_sub[of "z*c" a ]
+      have th1: "cmod (z * c) \<le> cmod (a + z * c) + cmod a"
+	by (simp add: ring_simps)
+      from ath[OF th1 th0] have "d \<le> cmod (poly (a # c # cs) z)" 
+	using poly_0[OF cs0'] by simp}
+    then have ?case  by blast}
+  ultimately show ?case by blast
+qed simp
+
+text {* Hence polynomial's modulus attains its minimum somewhere. *}
+lemma poly_minimum_modulus:
+  "\<exists>z.\<forall>w. cmod (poly p z) \<le> cmod (poly p w)"
+proof(induct p)
+  case (Cons c cs) 
+  {assume cs0: "list_ex (\<lambda>c. c \<noteq> 0) cs"
+    from poly_infinity[OF cs0, of "cmod (poly (c#cs) 0)" c]
+    obtain r where r: "\<And>z. r \<le> cmod z \<Longrightarrow> cmod (poly (c # cs) 0) \<le> cmod (poly (c # cs) z)" by blast
+    have ath: "\<And>z r. r \<le> cmod z \<or> cmod z \<le> \<bar>r\<bar>" by arith
+    from poly_minimum_modulus_disc[of "\<bar>r\<bar>" "c#cs"] 
+    obtain v where v: "\<And>w. cmod w \<le> \<bar>r\<bar> \<Longrightarrow> cmod (poly (c # cs) v) \<le> cmod (poly (c # cs) w)" by blast
+    {fix z assume z: "r \<le> cmod z"
+      from v[of 0] r[OF z] 
+      have "cmod (poly (c # cs) v) \<le> cmod (poly (c # cs) z)"
+	by simp }
+    note v0 = this
+    from v0 v ath[of r] have ?case by blast}
+  moreover
+  {assume cs0: "\<not> (list_ex (\<lambda>c. c\<noteq>0) cs)"
+    hence th:"list_all (\<lambda>c. c = 0) cs" by (simp add: list_all_iff list_ex_iff)
+    from poly_0[OF th] Cons.hyps have ?case by simp}
+  ultimately show ?case by blast
+qed simp
+
+text{* Constant function (non-syntactic characterization). *}
+definition "constant f = (\<forall>x y. f x = f y)"
+
+lemma nonconstant_length: "\<not> (constant (poly p)) \<Longrightarrow> length p \<ge> 2"
+  unfolding constant_def
+  apply (induct p, auto)
+  apply (unfold not_less[symmetric])
+  apply simp
+  apply (rule ccontr)
+  apply auto
+  done
+ 
+lemma poly_replicate_append:
+  "poly ((replicate n 0)@p) (x::'a::{recpower, comm_ring}) = x^n * poly p x"
+  by(induct n, auto simp add: power_Suc ring_simps)
+
+text {* Decomposition of polynomial, skipping zero coefficients 
+  after the first.  *}
+
+lemma poly_decompose_lemma:
+ assumes nz: "\<not>(\<forall>z. z\<noteq>0 \<longrightarrow> poly p z = (0::'a::{recpower,idom}))"
+  shows "\<exists>k a q. a\<noteq>0 \<and> Suc (length q + k) = length p \<and> 
+                 (\<forall>z. poly p z = z^k * poly (a#q) z)"
+using nz
+proof(induct p)
+  case Nil thus ?case by simp
+next
+  case (Cons c cs)
+  {assume c0: "c = 0"
+    
+    from Cons.hyps Cons.prems c0 have ?case apply auto
+      apply (rule_tac x="k+1" in exI)
+      apply (rule_tac x="a" in exI, clarsimp)
+      apply (rule_tac x="q" in exI)
+      by (auto simp add: power_Suc)}
+  moreover
+  {assume c0: "c\<noteq>0"
+    hence ?case apply-
+      apply (rule exI[where x=0])
+      apply (rule exI[where x=c], clarsimp)
+      apply (rule exI[where x=cs])
+      apply auto
+      done}
+  ultimately show ?case by blast
+qed
+
+lemma poly_decompose:
+  assumes nc: "~constant(poly p)"
+  shows "\<exists>k a q. a\<noteq>(0::'a::{recpower,idom}) \<and> k\<noteq>0 \<and>
+               length q + k + 1 = length p \<and> 
+              (\<forall>z. poly p z = poly p 0 + z^k * poly (a#q) z)"
+using nc 
+proof(induct p)
+  case Nil thus ?case by (simp add: constant_def)
+next
+  case (Cons c cs)
+  {assume C:"\<forall>z. z \<noteq> 0 \<longrightarrow> poly cs z = 0"
+    {fix x y
+      from C have "poly (c#cs) x = poly (c#cs) y" by (cases "x=0", auto)}
+    with Cons.prems have False by (auto simp add: constant_def)}
+  hence th: "\<not> (\<forall>z. z \<noteq> 0 \<longrightarrow> poly cs z = 0)" ..
+  from poly_decompose_lemma[OF th] 
+  show ?case 
+    apply clarsimp    
+    apply (rule_tac x="k+1" in exI)
+    apply (rule_tac x="a" in exI)
+    apply simp
+    apply (rule_tac x="q" in exI)
+    apply (auto simp add: power_Suc)
+    done
+qed
+
+text{* Fundamental theorem of algebral *}
+
+lemma fundamental_theorem_of_algebra:
+  assumes nc: "~constant(poly p)"
+  shows "\<exists>z::complex. poly p z = 0"
+using nc
+proof(induct n\<equiv> "length p" arbitrary: p rule: nat_less_induct)
+  fix n fix p :: "complex list"
+  let ?p = "poly p"
+  assume H: "\<forall>m<n. \<forall>p. \<not> constant (poly p) \<longrightarrow> m = length p \<longrightarrow> (\<exists>(z::complex). poly p z = 0)" and nc: "\<not> constant ?p" and n: "n = length p"
+  let ?ths = "\<exists>z. ?p z = 0"
+
+  from nonconstant_length[OF nc] have n2: "n\<ge> 2" by (simp add: n)
+  from poly_minimum_modulus obtain c where 
+    c: "\<forall>w. cmod (?p c) \<le> cmod (?p w)" by blast
+  {assume pc: "?p c = 0" hence ?ths by blast}
+  moreover
+  {assume pc0: "?p c \<noteq> 0"
+    from poly_offset[of p c] obtain q where
+      q: "length q = length p" "\<forall>x. poly q x = ?p (c+x)" by blast
+    {assume h: "constant (poly q)"
+      from q(2) have th: "\<forall>x. poly q (x - c) = ?p x" by auto
+      {fix x y
+	from th have "?p x = poly q (x - c)" by auto 
+	also have "\<dots> = poly q (y - c)" 
+	  using h unfolding constant_def by blast
+	also have "\<dots> = ?p y" using th by auto
+	finally have "?p x = ?p y" .}
+      with nc have False unfolding constant_def by blast }
+    hence qnc: "\<not> constant (poly q)" by blast
+    from q(2) have pqc0: "?p c = poly q 0" by simp
+    from c pqc0 have cq0: "\<forall>w. cmod (poly q 0) \<le> cmod (?p w)" by simp 
+    let ?a0 = "poly q 0"
+    from pc0 pqc0 have a00: "?a0 \<noteq> 0" by simp 
+    from a00 
+    have qr: "\<forall>z. poly q z = poly (map (op * (inverse ?a0)) q) z * ?a0"
+      by (simp add: poly_cmult_map)
+    let ?r = "map (op * (inverse ?a0)) q"
+    have lgqr: "length q = length ?r" by simp 
+    {assume h: "\<And>x y. poly ?r x = poly ?r y"
+      {fix x y
+	from qr[rule_format, of x] 
+	have "poly q x = poly ?r x * ?a0" by auto
+	also have "\<dots> = poly ?r y * ?a0" using h by simp
+	also have "\<dots> = poly q y" using qr[rule_format, of y] by simp
+	finally have "poly q x = poly q y" .} 
+      with qnc have False unfolding constant_def by blast}
+    hence rnc: "\<not> constant (poly ?r)" unfolding constant_def by blast
+    from qr[rule_format, of 0] a00  have r01: "poly ?r 0 = 1" by auto
+    {fix w 
+      have "cmod (poly ?r w) < 1 \<longleftrightarrow> cmod (poly q w / ?a0) < 1"
+	using qr[rule_format, of w] a00 by simp
+      also have "\<dots> \<longleftrightarrow> cmod (poly q w) < cmod ?a0"
+	using a00 unfolding norm_divide by (simp add: field_simps)
+      finally have "cmod (poly ?r w) < 1 \<longleftrightarrow> cmod (poly q w) < cmod ?a0" .}
+    note mrmq_eq = this
+    from poly_decompose[OF rnc] obtain k a s where 
+      kas: "a\<noteq>0" "k\<noteq>0" "length s + k + 1 = length ?r" 
+      "\<forall>z. poly ?r z = poly ?r 0 + z^k* poly (a#s) z" by blast
+    {assume "k + 1 = n"
+      with kas(3) lgqr[symmetric] q(1) n[symmetric] have s0:"s=[]" by auto
+      {fix w
+	have "cmod (poly ?r w) = cmod (1 + a * w ^ k)" 
+	  using kas(4)[rule_format, of w] s0 r01 by (simp add: ring_simps)}
+      note hth = this [symmetric]
+	from reduce_poly_simple[OF kas(1,2)] 
+      have "\<exists>w. cmod (poly ?r w) < 1" unfolding hth by blast}
+    moreover
+    {assume kn: "k+1 \<noteq> n"
+      from kn kas(3) q(1) n[symmetric] have k1n: "k + 1 < n" by simp
+      have th01: "\<not> constant (poly (1#((replicate (k - 1) 0)@[a])))" 
+	unfolding constant_def poly_Nil poly_Cons poly_replicate_append
+	using kas(1) apply simp 
+	by (rule exI[where x=0], rule exI[where x=1], simp)
+      from kas(2) have th02: "k+1 = length (1#((replicate (k - 1) 0)@[a]))" 
+	by simp
+      from H[rule_format, OF k1n th01 th02]
+      obtain w where w: "1 + w^k * a = 0"
+	unfolding poly_Nil poly_Cons poly_replicate_append
+	using kas(2) by (auto simp add: power_Suc[symmetric, of _ "k - Suc 0"] 
+	  mult_assoc[of _ _ a, symmetric])
+      from poly_bound_exists[of "cmod w" s] obtain m where 
+	m: "m > 0" "\<forall>z. cmod z \<le> cmod w \<longrightarrow> cmod (poly s z) \<le> m" by blast
+      have w0: "w\<noteq>0" using kas(2) w by (auto simp add: power_0_left)
+      from w have "(1 + w ^ k * a) - 1 = 0 - 1" by simp
+      then have wm1: "w^k * a = - 1" by simp
+      have inv0: "0 < inverse (cmod w ^ (k + 1) * m)" 
+	using norm_ge_zero[of w] w0 m(1)
+	  by (simp add: inverse_eq_divide zero_less_mult_iff)
+      with real_down2[OF zero_less_one] obtain t where
+	t: "t > 0" "t < 1" "t < inverse (cmod w ^ (k + 1) * m)" by blast
+      let ?ct = "complex_of_real t"
+      let ?w = "?ct * w"
+      have "1 + ?w^k * (a + ?w * poly s ?w) = 1 + ?ct^k * (w^k * a) + ?w^k * ?w * poly s ?w" using kas(1) by (simp add: ring_simps power_mult_distrib)
+      also have "\<dots> = complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w"
+	unfolding wm1 by (simp)
+      finally have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) = cmod (complex_of_real (1 - t^k) + ?w^k * ?w * poly s ?w)" 
+	apply -
+	apply (rule cong[OF refl[of cmod]])
+	apply assumption
+	done
+      with norm_triangle_ineq[of "complex_of_real (1 - t^k)" "?w^k * ?w * poly s ?w"] 
+      have th11: "cmod (1 + ?w^k * (a + ?w * poly s ?w)) \<le> \<bar>1 - t^k\<bar> + cmod (?w^k * ?w * poly s ?w)" unfolding norm_of_real by simp 
+      have ath: "\<And>x (t::real). 0\<le> x \<Longrightarrow> x < t \<Longrightarrow> t\<le>1 \<Longrightarrow> \<bar>1 - t\<bar> + x < 1" by arith
+      have "t *cmod w \<le> 1 * cmod w" apply (rule mult_mono) using t(1,2) by auto
+      then have tw: "cmod ?w \<le> cmod w" using t(1) by (simp add: norm_mult) 
+      from t inv0 have "t* (cmod w ^ (k + 1) * m) < 1"
+	by (simp add: inverse_eq_divide field_simps)
+      with zero_less_power[OF t(1), of k] 
+      have th30: "t^k * (t* (cmod w ^ (k + 1) * m)) < t^k * 1" 
+	apply - apply (rule mult_strict_left_mono) by simp_all
+      have "cmod (?w^k * ?w * poly s ?w) = t^k * (t* (cmod w ^ (k+1) * cmod (poly s ?w)))"  using w0 t(1)
+	by (simp add: ring_simps power_mult_distrib norm_of_real norm_power norm_mult)
+      then have "cmod (?w^k * ?w * poly s ?w) \<le> t^k * (t* (cmod w ^ (k + 1) * m))"
+	using t(1,2) m(2)[rule_format, OF tw] w0
+	apply (simp only: )
+	apply auto
+	apply (rule mult_mono, simp_all add: norm_ge_zero)+
+	apply (simp add: zero_le_mult_iff zero_le_power)
+	done
+      with th30 have th120: "cmod (?w^k * ?w * poly s ?w) < t^k" by simp 
+      from power_strict_mono[OF t(2), of k] t(1) kas(2) have th121: "t^k \<le> 1" 
+	by auto
+      from ath[OF norm_ge_zero[of "?w^k * ?w * poly s ?w"] th120 th121]
+      have th12: "\<bar>1 - t^k\<bar> + cmod (?w^k * ?w * poly s ?w) < 1" . 
+      from th11 th12
+      have "cmod (1 + ?w^k * (a + ?w * poly s ?w)) < 1"  by arith 
+      then have "cmod (poly ?r ?w) < 1" 
+	unfolding kas(4)[rule_format, of ?w] r01 by simp 
+      then have "\<exists>w. cmod (poly ?r w) < 1" by blast}
+    ultimately have cr0_contr: "\<exists>w. cmod (poly ?r w) < 1" by blast
+    from cr0_contr cq0 q(2)
+    have ?ths unfolding mrmq_eq not_less[symmetric] by auto}
+  ultimately show ?ths by blast
+qed
+
+text {* Alternative version with a syntactic notion of constant polynomial. *}
+
+lemma fundamental_theorem_of_algebra_alt:
+  assumes nc: "~(\<exists>a l. a\<noteq> 0 \<and> list_all(\<lambda>b. b = 0) l \<and> p = a#l)"
+  shows "\<exists>z. poly p z = (0::complex)"
+using nc
+proof(induct p)
+  case (Cons c cs)
+  {assume "c=0" hence ?case by auto}
+  moreover
+  {assume c0: "c\<noteq>0"
+    {assume nc: "constant (poly (c#cs))"
+      from nc[unfolded constant_def, rule_format, of 0] 
+      have "\<forall>w. w \<noteq> 0 \<longrightarrow> poly cs w = 0" by auto 
+      hence "list_all (\<lambda>c. c=0) cs"
+	proof(induct cs)
+	  case (Cons d ds)
+	  {assume "d=0" hence ?case using Cons.prems Cons.hyps by simp}
+	  moreover
+	  {assume d0: "d\<noteq>0"
+	    from poly_bound_exists[of 1 ds] obtain m where 
+	      m: "m > 0" "\<forall>z. \<forall>z. cmod z \<le> 1 \<longrightarrow> cmod (poly ds z) \<le> m" by blast
+	    have dm: "cmod d / m > 0" using d0 m(1) by (simp add: field_simps)
+	    from real_down2[OF dm zero_less_one] obtain x where 
+	      x: "x > 0" "x < cmod d / m" "x < 1" by blast
+	    let ?x = "complex_of_real x"
+	    from x have cx: "?x \<noteq> 0"  "cmod ?x \<le> 1" by simp_all
+	    from Cons.prems[rule_format, OF cx(1)]
+	    have cth: "cmod (?x*poly ds ?x) = cmod d" by (simp add: eq_diff_eq[symmetric])
+	    from m(2)[rule_format, OF cx(2)] x(1)
+	    have th0: "cmod (?x*poly ds ?x) \<le> x*m"
+	      by (simp add: norm_mult)
+	    from x(2) m(1) have "x*m < cmod d" by (simp add: field_simps)
+	    with th0 have "cmod (?x*poly ds ?x) \<noteq> cmod d" by auto
+	    with cth  have ?case by blast}
+	  ultimately show ?case by blast 
+	qed simp}
+      then have nc: "\<not> constant (poly (c#cs))" using Cons.prems c0 
+	by blast
+      from fundamental_theorem_of_algebra[OF nc] have ?case .}
+  ultimately show ?case by blast  
+qed simp
+
+subsection{* Nullstellenstatz, degrees and divisibility of polynomials *}
+
+lemma nullstellensatz_lemma:
+  fixes p :: "complex list"
+  assumes "\<forall>x. poly p x = 0 \<longrightarrow> poly q x = 0"
+  and "degree p = n" and "n \<noteq> 0"
+  shows "p divides (pexp q n)"
+using prems
+proof(induct n arbitrary: p q rule: nat_less_induct)
+  fix n::nat fix p q :: "complex list"
+  assume IH: "\<forall>m<n. \<forall>p q.
+                 (\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longrightarrow>
+                 degree p = m \<longrightarrow> m \<noteq> 0 \<longrightarrow> p divides (q %^ m)"
+    and pq0: "\<forall>x. poly p x = 0 \<longrightarrow> poly q x = 0" 
+    and dpn: "degree p = n" and n0: "n \<noteq> 0"
+  let ?ths = "p divides (q %^ n)"
+  {fix a assume a: "poly p a = 0"
+    {assume p0: "poly p = poly []" 
+      hence ?ths unfolding divides_def  using pq0 n0
+	apply - apply (rule exI[where x="[]"], rule ext)
+	by (auto simp add: poly_mult poly_exp)}
+    moreover
+    {assume p0: "poly p \<noteq> poly []" 
+      and oa: "order  a p \<noteq> 0"
+      from p0 have pne: "p \<noteq> []" by auto
+      let ?op = "order a p"
+      from p0 have ap: "([- a, 1] %^ ?op) divides p" 
+	"\<not> pexp [- a, 1] (Suc ?op) divides p" using order by blast+ 
+      note oop = order_degree[OF p0, unfolded dpn]
+      {assume q0: "q = []"
+	hence ?ths using n0 unfolding divides_def 
+	  apply simp
+	  apply (rule exI[where x="[]"], rule ext)
+	  by (simp add: divides_def poly_exp poly_mult)}
+      moreover
+      {assume q0: "q\<noteq>[]"
+	from pq0[rule_format, OF a, unfolded poly_linear_divides] q0
+	obtain r where r: "q = pmult [- a, 1] r" by blast
+	from ap[unfolded divides_def] obtain s where
+	  s: "poly p = poly (pmult (pexp [- a, 1] ?op) s)" by blast
+	have s0: "poly s \<noteq> poly []"
+	  using s p0 by (simp add: poly_entire)
+	hence pns0: "poly (pnormalize s) \<noteq> poly []" and sne: "s\<noteq>[]" by auto
+	{assume ds0: "degree s = 0"
+	  from ds0 pns0 have "\<exists>k. pnormalize s = [k]" unfolding degree_def 
+	    by (cases "pnormalize s", auto)
+	  then obtain k where kpn: "pnormalize s = [k]" by blast
+	  from pns0[unfolded poly_zero] kpn have k: "k \<noteq>0" "poly s = poly [k]"
+	    using poly_normalize[of s] by simp_all
+	  let ?w = "pmult (pmult [1/k] (pexp [-a,1] (n - ?op))) (pexp r n)"
+	  from k r s oop have "poly (pexp q n) = poly (pmult p ?w)"
+	    by - (rule ext, simp add: poly_mult poly_exp poly_cmult poly_add power_add[symmetric] ring_simps power_mult_distrib[symmetric])
+	  hence ?ths unfolding divides_def by blast}
+	moreover
+	{assume ds0: "degree s \<noteq> 0"
+	  from ds0 s0 dpn degree_unique[OF s, unfolded linear_pow_mul_degree] oa
+	    have dsn: "degree s < n" by auto 
+	    {fix x assume h: "poly s x = 0"
+	      {assume xa: "x = a"
+		from h[unfolded xa poly_linear_divides] sne obtain u where
+		  u: "s = pmult [- a, 1] u" by blast
+		have "poly p = poly (pmult (pexp [- a, 1] (Suc ?op)) u)"
+		  unfolding s u
+		  apply (rule ext)
+		  by (simp add: ring_simps power_mult_distrib[symmetric] poly_mult poly_cmult poly_add poly_exp)
+		with ap(2)[unfolded divides_def] have False by blast}
+	      note xa = this
+	      from h s have "poly p x = 0" by (simp add: poly_mult)
+	      with pq0 have "poly q x = 0" by blast
+	      with r xa have "poly r x = 0"
+		by (auto simp add: poly_mult poly_add poly_cmult eq_diff_eq[symmetric])}
+	    note impth = this
+	    from IH[rule_format, OF dsn, of s r] impth ds0
+	    have "s divides (pexp r (degree s))" by blast
+	    then obtain u where u: "poly (pexp r (degree s)) = poly (pmult s u)"
+	      unfolding divides_def by blast
+	    hence u': "\<And>x. poly s x * poly u x = poly r x ^ degree s"
+	      by (simp add: poly_mult[symmetric] poly_exp[symmetric])
+	    let ?w = "pmult (pmult u (pexp [-a,1] (n - ?op))) (pexp r (n - degree s))"
+	    from u' s r oop[of a] dsn have "poly (pexp q n) = poly (pmult p ?w)"
+	      apply - apply (rule ext)
+	      apply (simp only:  power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult ring_simps)
+	      
+	      apply (simp add:  power_mult_distrib power_add[symmetric] poly_add poly_mult poly_exp poly_cmult mult_assoc[symmetric])
+	      done
+	    hence ?ths unfolding divides_def by blast}
+      ultimately have ?ths by blast }
+      ultimately have ?ths by blast}
+    ultimately have ?ths using a order_root by blast}
+  moreover
+  {assume exa: "\<not> (\<exists>a. poly p a = 0)"
+    from fundamental_theorem_of_algebra_alt[of p] exa obtain c cs where
+      ccs: "c\<noteq>0" "list_all (\<lambda>c. c = 0) cs" "p = c#cs" by blast
+    
+    from poly_0[OF ccs(2)] ccs(3) 
+    have pp: "\<And>x. poly p x =  c" by simp
+    let ?w = "pmult [1/c] (pexp q n)"
+    from pp ccs(1) 
+    have "poly (pexp q n) = poly (pmult p ?w) "
+      apply - apply (rule ext)
+      unfolding poly_mult_assoc[symmetric] by (simp add: poly_mult)
+    hence ?ths unfolding divides_def by blast}
+  ultimately show ?ths by blast
+qed
+
+lemma nullstellensatz_univariate:
+  "(\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longleftrightarrow> 
+    p divides (q %^ (degree p)) \<or> (poly p = poly [] \<and> poly q = poly [])"
+proof-
+  {assume pe: "poly p = poly []"
+    hence eq: "(\<forall>x. poly p x = (0::complex) \<longrightarrow> poly q x = 0) \<longleftrightarrow> poly q = poly []"
+      apply auto
+      by (rule ext, simp)
+    {assume "p divides (pexp q (degree p))"
+      then obtain r where r: "poly (pexp q (degree p)) = poly (pmult p r)" 
+	unfolding divides_def by blast
+      from cong[OF r refl] pe degree_unique[OF pe]
+      have False by (simp add: poly_mult degree_def)}
+    with eq pe have ?thesis by blast}
+  moreover
+  {assume pe: "poly p \<noteq> poly []"
+    have p0: "poly [0] = poly []" by (rule ext, simp)
+    {assume dp: "degree p = 0"
+      then obtain k where "pnormalize p = [k]" using pe poly_normalize[of p]
+	unfolding degree_def by (cases "pnormalize p", auto)
+      hence k: "pnormalize p = [k]" "poly p = poly [k]" "k\<noteq>0"
+	using pe poly_normalize[of p] by (auto simp add: p0)
+      hence th1: "\<forall>x. poly p x \<noteq> 0" by simp
+      from k(2,3) dp have "poly (pexp q (degree p)) = poly (pmult p [1/k]) "
+	by - (rule ext, simp add: poly_mult poly_exp)
+      hence th2: "p divides (pexp q (degree p))" unfolding divides_def by blast
+      from th1 th2 pe have ?thesis by blast}
+    moreover
+    {assume dp: "degree p \<noteq> 0"
+      then obtain n where n: "degree p = Suc n " by (cases "degree p", auto)
+      {assume "p divides (pexp q (Suc n))"
+	then obtain u where u: "poly (pexp q (Suc n)) = poly (pmult p u)"
+	  unfolding divides_def by blast
+	hence u' :"\<And>x. poly (pexp q (Suc n)) x = poly (pmult p u) x" by simp_all
+	{fix x assume h: "poly p x = 0" "poly q x \<noteq> 0"
+	  hence "poly (pexp q (Suc n)) x \<noteq> 0" by (simp only: poly_exp) simp	  
+	  hence False using u' h(1) by (simp only: poly_mult poly_exp) simp}}
+	with n nullstellensatz_lemma[of p q "degree p"] dp 
+	have ?thesis by auto}
+    ultimately have ?thesis by blast}
+  ultimately show ?thesis by blast
+qed
+
+text{* Useful lemma *}
+
+lemma (in idom_char_0) constant_degree: "constant (poly p) \<longleftrightarrow> degree p = 0" (is "?lhs = ?rhs")
+proof
+  assume l: ?lhs
+  from l[unfolded constant_def, rule_format, of _ "zero"]
+  have th: "poly p = poly [poly p 0]" apply - by (rule ext, simp)
+  from degree_unique[OF th] show ?rhs by (simp add: degree_def)
+next
+  assume r: ?rhs
+  from r have "pnormalize p = [] \<or> (\<exists>k. pnormalize p = [k])"
+    unfolding degree_def by (cases "pnormalize p", auto)
+  then show ?lhs unfolding constant_def poly_normalize[of p, symmetric]
+    by (auto simp del: poly_normalize)
+qed
+
+(* It would be nicer to prove this without using algebraic closure...        *)
+
+lemma divides_degree_lemma: assumes dpn: "degree (p::complex list) = n"
+  shows "n \<le> degree (p *** q) \<or> poly (p *** q) = poly []"
+  using dpn
+proof(induct n arbitrary: p q)
+  case 0 thus ?case by simp
+next
+  case (Suc n p q)
+  from Suc.prems fundamental_theorem_of_algebra[of p] constant_degree[of p]
+  obtain a where a: "poly p a = 0" by auto
+  then obtain r where r: "p = pmult [-a, 1] r" unfolding poly_linear_divides
+    using Suc.prems by (auto simp add: degree_def)
+  {assume h: "poly (pmult r q) = poly []"
+    hence "poly (pmult p q) = poly []" using r
+      apply - apply (rule ext)  by (auto simp add: poly_entire poly_mult poly_add poly_cmult) hence ?case by blast}
+  moreover
+  {assume h: "poly (pmult r q) \<noteq> poly []" 
+    hence r0: "poly r \<noteq> poly []" and q0: "poly q \<noteq> poly []"
+      by (auto simp add: poly_entire)
+    have eq: "poly (pmult p q) = poly (pmult [-a, 1] (pmult r q))"
+      apply - apply (rule ext)
+      by (simp add: r poly_mult poly_add poly_cmult ring_simps)
+    from linear_mul_degree[OF h, of "- a"]
+    have dqe: "degree (pmult p q) = degree (pmult r q) + 1"
+      unfolding degree_unique[OF eq] .
+    from linear_mul_degree[OF r0, of "- a", unfolded r[symmetric]] r Suc.prems 
+    have dr: "degree r = n" by auto
+    from  Suc.hyps[OF dr, of q] have "Suc n \<le> degree (pmult p q)"
+      unfolding dqe using h by (auto simp del: poly.simps) 
+    hence ?case by blast}
+  ultimately show ?case by blast
+qed
+
+lemma divides_degree: assumes pq: "p divides (q:: complex list)"
+  shows "degree p \<le> degree q \<or> poly q = poly []"
+using pq  divides_degree_lemma[OF refl, of p]
+apply (auto simp add: divides_def poly_entire)
+apply atomize
+apply (erule_tac x="qa" in allE, auto)
+apply (subgoal_tac "degree q = degree (p *** qa)", simp)
+apply (rule degree_unique, simp)
+done
+
+(* Arithmetic operations on multivariate polynomials.                        *)
+
+lemma mpoly_base_conv: 
+  "(0::complex) \<equiv> poly [] x" "c \<equiv> poly [c] x" "x \<equiv> poly [0,1] x" by simp_all
+
+lemma mpoly_norm_conv: 
+  "poly [0] (x::complex) \<equiv> poly [] x" "poly [poly [] y] x \<equiv> poly [] x" by simp_all
+
+lemma mpoly_sub_conv: 
+  "poly p (x::complex) - poly q x \<equiv> poly p x + -1 * poly q x"
+  by (simp add: diff_def)
+
+lemma poly_pad_rule: "poly p x = 0 ==> poly (0#p) x = (0::complex)" by simp
+
+lemma poly_cancel_eq_conv: "p = (0::complex) \<Longrightarrow> a \<noteq> 0 \<Longrightarrow> (q = 0) \<equiv> (a * q - b * p = 0)" apply (atomize (full)) by auto
+
+lemma resolve_eq_raw:  "poly [] x \<equiv> 0" "poly [c] x \<equiv> (c::complex)" by auto
+lemma  resolve_eq_then: "(P \<Longrightarrow> (Q \<equiv> Q1)) \<Longrightarrow> (\<not>P \<Longrightarrow> (Q \<equiv> Q2))
+  \<Longrightarrow> Q \<equiv> P \<and> Q1 \<or> \<not>P\<and> Q2" apply (atomize (full)) by blast 
+lemma expand_ex_beta_conv: "list_ex P [c] \<equiv> P c" by simp
+
+lemma poly_divides_pad_rule: 
+  fixes p q :: "complex list"
+  assumes pq: "p divides q"
+  shows "p divides ((0::complex)#q)"
+proof-
+  from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast
+  hence "poly (0#q) = poly (p *** ([0,1] *** r))" 
+    by - (rule ext, simp add: poly_mult poly_cmult poly_add)
+  thus ?thesis unfolding divides_def by blast
+qed
+
+lemma poly_divides_pad_const_rule: 
+  fixes p q :: "complex list"
+  assumes pq: "p divides q"
+  shows "p divides (a %* q)"
+proof-
+  from pq obtain r where r: "poly q = poly (p *** r)" unfolding divides_def by blast
+  hence "poly (a %* q) = poly (p *** (a %* r))" 
+    by - (rule ext, simp add: poly_mult poly_cmult poly_add)
+  thus ?thesis unfolding divides_def by blast
+qed
+
+
+lemma poly_divides_conv0:  
+  fixes p :: "complex list"
+  assumes lgpq: "length q < length p" and lq:"last p \<noteq> 0"
+  shows "p divides q \<equiv> (\<not> (list_ex (\<lambda>c. c \<noteq> 0) q))" (is "?lhs \<equiv> ?rhs")
+proof-
+  {assume r: ?rhs 
+    hence eq: "poly q = poly []" unfolding poly_zero 
+      by (simp add: list_all_iff list_ex_iff)
+    hence "poly q = poly (p *** [])" by - (rule ext, simp add: poly_mult)
+    hence ?lhs unfolding divides_def  by blast}
+  moreover
+  {assume l: ?lhs
+    have ath: "\<And>lq lp dq::nat. lq < lp ==> lq \<noteq> 0 \<Longrightarrow> dq <= lq - 1 ==> dq < lp - 1"
+      by arith
+    {assume q0: "length q = 0"
+      hence "q = []" by simp
+      hence ?rhs by simp}
+    moreover
+    {assume lgq0: "length q \<noteq> 0"
+      from pnormalize_length[of q] have dql: "degree q \<le> length q - 1" 
+	unfolding degree_def by simp
+      from ath[OF lgpq lgq0 dql, unfolded pnormal_degree[OF lq, symmetric]] divides_degree[OF l] have "poly q = poly []" by auto
+      hence ?rhs unfolding poly_zero by (simp add: list_all_iff list_ex_iff)}
+    ultimately have ?rhs by blast }
+  ultimately show "?lhs \<equiv> ?rhs" by - (atomize (full), blast) 
+qed
+
+lemma poly_divides_conv1: 
+  assumes a0: "a\<noteq> (0::complex)" and pp': "(p::complex list) divides p'"
+  and qrp': "\<And>x. a * poly q x - poly p' x \<equiv> poly r x"
+  shows "p divides q \<equiv> p divides (r::complex list)" (is "?lhs \<equiv> ?rhs")
+proof-
+  {
+  from pp' obtain t where t: "poly p' = poly (p *** t)" 
+    unfolding divides_def by blast
+  {assume l: ?lhs
+    then obtain u where u: "poly q = poly (p *** u)" unfolding divides_def by blast
+     have "poly r = poly (p *** ((a %* u) +++ (-- t)))"
+       using u qrp' t
+       by - (rule ext, 
+	 simp add: poly_add poly_mult poly_cmult poly_minus ring_simps)
+     then have ?rhs unfolding divides_def by blast}
+  moreover
+  {assume r: ?rhs
+    then obtain u where u: "poly r = poly (p *** u)" unfolding divides_def by blast
+    from u t qrp' a0 have "poly q = poly (p *** ((1/a) %* (u +++ t)))"
+      by - (rule ext, atomize (full), simp add: poly_mult poly_add poly_cmult field_simps)
+    hence ?lhs  unfolding divides_def by blast}
+  ultimately have "?lhs = ?rhs" by blast }
+thus "?lhs \<equiv> ?rhs"  by - (atomize(full), blast) 
+qed
+
+lemma basic_cqe_conv1:
+  "(\<exists>x. poly p x = 0 \<and> poly [] x \<noteq> 0) \<equiv> False"
+  "(\<exists>x. poly [] x \<noteq> 0) \<equiv> False"
+  "(\<exists>x. poly [c] x \<noteq> 0) \<equiv> c\<noteq>0"
+  "(\<exists>x. poly [] x = 0) \<equiv> True"
+  "(\<exists>x. poly [c] x = 0) \<equiv> c = 0" by simp_all
+
+lemma basic_cqe_conv2: 
+  assumes l:"last (a#b#p) \<noteq> 0" 
+  shows "(\<exists>x. poly (a#b#p) x = (0::complex)) \<equiv> True"
+proof-
+  {fix h t
+    assume h: "h\<noteq>0" "list_all (\<lambda>c. c=(0::complex)) t"  "a#b#p = h#t"
+    hence "list_all (\<lambda>c. c= 0) (b#p)" by simp
+    moreover have "last (b#p) \<in> set (b#p)" by simp
+    ultimately have "last (b#p) = 0" by (simp add: list_all_iff)
+    with l have False by simp}
+  hence th: "\<not> (\<exists> h t. h\<noteq>0 \<and> list_all (\<lambda>c. c=0) t \<and> a#b#p = h#t)"
+    by blast
+  from fundamental_theorem_of_algebra_alt[OF th] 
+  show "(\<exists>x. poly (a#b#p) x = (0::complex)) \<equiv> True" by auto
+qed
+
+lemma  basic_cqe_conv_2b: "(\<exists>x. poly p x \<noteq> (0::complex)) \<equiv> (list_ex (\<lambda>c. c \<noteq> 0) p)"
+proof-
+  have "\<not> (list_ex (\<lambda>c. c \<noteq> 0) p) \<longleftrightarrow> poly p = poly []" 
+    by (simp add: poly_zero list_all_iff list_ex_iff)
+  also have "\<dots> \<longleftrightarrow> (\<not> (\<exists>x. poly p x \<noteq> 0))" by (auto intro: ext)
+  finally show "(\<exists>x. poly p x \<noteq> (0::complex)) \<equiv> (list_ex (\<lambda>c. c \<noteq> 0) p)"
+    by - (atomize (full), blast)
+qed
+
+lemma basic_cqe_conv3:
+  fixes p q :: "complex list"
+  assumes l: "last (a#p) \<noteq> 0" 
+  shows "(\<exists>x. poly (a#p) x =0 \<and> poly q x \<noteq> 0) \<equiv> \<not> ((a#p) divides (q %^ (length p)))"
+proof-
+  note np = pnormalize_eq[OF l]
+  {assume "poly (a#p) = poly []" hence False using l
+      unfolding poly_zero apply (auto simp add: list_all_iff del: last.simps)
+      apply (cases p, simp_all) done}
+  then have p0: "poly (a#p) \<noteq> poly []"  by blast
+  from np have dp:"degree (a#p) = length p" by (simp add: degree_def)
+  from nullstellensatz_univariate[of "a#p" q] p0 dp
+  show "(\<exists>x. poly (a#p) x =0 \<and> poly q x \<noteq> 0) \<equiv> \<not> ((a#p) divides (q %^ (length p)))"
+    by - (atomize (full), auto)
+qed
+
+lemma basic_cqe_conv4:
+  fixes p q :: "complex list"
+  assumes h: "\<And>x. poly (q %^ n) x \<equiv> poly r x"
+  shows "p divides (q %^ n) \<equiv> p divides r"
+proof-
+  from h have "poly (q %^ n) = poly r" by (auto intro: ext)  
+  thus "p divides (q %^ n) \<equiv> p divides r" unfolding divides_def by simp
+qed
+
+lemma pmult_Cons_Cons: "((a::complex)#b#p) *** q = (a %*q) +++ (0#((b#p) *** q))"
+  by simp
+
+lemma elim_neg_conv: "- z \<equiv> (-1) * (z::complex)" by simp
+lemma eqT_intr: "PROP P \<Longrightarrow> (True \<Longrightarrow> PROP P )" "PROP P \<Longrightarrow> True" by blast+
+lemma negate_negate_rule: "Trueprop P \<equiv> \<not> P \<equiv> False" by (atomize (full), auto)
+lemma last_simps: "last [x] = x" "last (x#y#ys) = last (y#ys)" by simp_all
+lemma length_simps: "length [] = 0" "length (x#y#xs) = length xs + 2" "length [x] = 1" by simp_all
+
+lemma complex_entire: "(z::complex) \<noteq> 0 \<and> w \<noteq> 0 \<equiv> z*w \<noteq> 0" by simp
+lemma resolve_eq_ne: "(P \<equiv> True) \<equiv> (\<not>P \<equiv> False)" "(P \<equiv> False) \<equiv> (\<not>P \<equiv> True)" 
+  by (atomize (full)) simp_all
+lemma cqe_conv1: "poly [] x = 0 \<longleftrightarrow> True"  by simp
+lemma cqe_conv2: "(p \<Longrightarrow> (q \<equiv> r)) \<equiv> ((p \<and> q) \<equiv> (p \<and> r))"  (is "?l \<equiv> ?r")
+proof
+  assume "p \<Longrightarrow> q \<equiv> r" thus "p \<and> q \<equiv> p \<and> r" apply - apply (atomize (full)) by blast
+next
+  assume "p \<and> q \<equiv> p \<and> r" "p"
+  thus "q \<equiv> r" apply - apply (atomize (full)) apply blast done
+qed
+lemma poly_const_conv: "poly [c] (x::complex) = y \<longleftrightarrow> c = y" by simp
+
+end
\ No newline at end of file
--- a/src/HOL/Groebner_Basis.thy	Mon Dec 29 11:04:27 2008 -0800
+++ b/src/HOL/Groebner_Basis.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -1,5 +1,4 @@
 (*  Title:      HOL/Groebner_Basis.thy
-    ID:         $Id$
     Author:     Amine Chaieb, TU Muenchen
 *)
 
@@ -164,8 +163,8 @@
 
 end
 
-interpretation class_semiring: gb_semiring
-    ["op +" "op *" "op ^" "0::'a::{comm_semiring_1, recpower}" "1"]
+interpretation class_semiring!: gb_semiring
+    "op +" "op *" "op ^" "0::'a::{comm_semiring_1, recpower}" "1"
   proof qed (auto simp add: ring_simps power_Suc)
 
 lemmas nat_arith =
@@ -243,8 +242,8 @@
 end
 
 
-interpretation class_ring: gb_ring ["op +" "op *" "op ^"
-    "0::'a::{comm_semiring_1,recpower,number_ring}" 1 "op -" "uminus"]
+interpretation class_ring!: gb_ring "op +" "op *" "op ^"
+    "0::'a::{comm_semiring_1,recpower,number_ring}" 1 "op -" "uminus"
   proof qed simp_all
 
 
@@ -344,8 +343,8 @@
   thus "b = 0" by blast
 qed
 
-interpretation class_ringb: ringb
-  ["op +" "op *" "op ^" "0::'a::{idom,recpower,number_ring}" "1" "op -" "uminus"]
+interpretation class_ringb!: ringb
+  "op +" "op *" "op ^" "0::'a::{idom,recpower,number_ring}" "1" "op -" "uminus"
 proof(unfold_locales, simp add: ring_simps power_Suc, auto)
   fix w x y z ::"'a::{idom,recpower,number_ring}"
   assume p: "w * y + x * z = w * z + x * y" and ynz: "y \<noteq> z"
@@ -360,8 +359,8 @@
 
 declaration {* normalizer_funs @{thm class_ringb.ringb_axioms'} *}
 
-interpretation natgb: semiringb
-  ["op +" "op *" "op ^" "0::nat" "1"]
+interpretation natgb!: semiringb
+  "op +" "op *" "op ^" "0::nat" "1"
 proof (unfold_locales, simp add: ring_simps power_Suc)
   fix w x y z ::"nat"
   { assume p: "w * y + x * z = w * z + x * y" and ynz: "y \<noteq> z"
@@ -465,8 +464,8 @@
 
 subsection{* Groebner Bases for fields *}
 
-interpretation class_fieldgb:
-  fieldgb["op +" "op *" "op ^" "0::'a::{field,recpower,number_ring}" "1" "op -" "uminus" "op /" "inverse"] apply (unfold_locales) by (simp_all add: divide_inverse)
+interpretation class_fieldgb!:
+  fieldgb "op +" "op *" "op ^" "0::'a::{field,recpower,number_ring}" "1" "op -" "uminus" "op /" "inverse" apply (unfold_locales) by (simp_all add: divide_inverse)
 
 lemma divide_Numeral1: "(x::'a::{field,number_ring}) / Numeral1 = x" by simp
 lemma divide_Numeral0: "(x::'a::{field,number_ring, division_by_zero}) / Numeral0 = 0"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/HahnBanach/Bounds.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -0,0 +1,82 @@
+(*  Title:      HOL/Real/HahnBanach/Bounds.thy
+    Author:     Gertrud Bauer, TU Munich
+*)
+
+header {* Bounds *}
+
+theory Bounds
+imports Main ContNotDenum
+begin
+
+locale lub =
+  fixes A and x
+  assumes least [intro?]: "(\<And>a. a \<in> A \<Longrightarrow> a \<le> b) \<Longrightarrow> x \<le> b"
+    and upper [intro?]: "a \<in> A \<Longrightarrow> a \<le> x"
+
+lemmas [elim?] = lub.least lub.upper
+
+definition
+  the_lub :: "'a::order set \<Rightarrow> 'a" where
+  "the_lub A = The (lub A)"
+
+notation (xsymbols)
+  the_lub  ("\<Squnion>_" [90] 90)
+
+lemma the_lub_equality [elim?]:
+  assumes "lub A x"
+  shows "\<Squnion>A = (x::'a::order)"
+proof -
+  interpret lub A x by fact
+  show ?thesis
+  proof (unfold the_lub_def)
+    from `lub A x` show "The (lub A) = x"
+    proof
+      fix x' assume lub': "lub A x'"
+      show "x' = x"
+      proof (rule order_antisym)
+	from lub' show "x' \<le> x"
+	proof
+          fix a assume "a \<in> A"
+          then show "a \<le> x" ..
+	qed
+	show "x \<le> x'"
+	proof
+          fix a assume "a \<in> A"
+          with lub' show "a \<le> x'" ..
+	qed
+      qed
+    qed
+  qed
+qed
+
+lemma the_lubI_ex:
+  assumes ex: "\<exists>x. lub A x"
+  shows "lub A (\<Squnion>A)"
+proof -
+  from ex obtain x where x: "lub A x" ..
+  also from x have [symmetric]: "\<Squnion>A = x" ..
+  finally show ?thesis .
+qed
+
+lemma lub_compat: "lub A x = isLub UNIV A x"
+proof -
+  have "isUb UNIV A = (\<lambda>x. A *<= x \<and> x \<in> UNIV)"
+    by (rule ext) (simp only: isUb_def)
+  then show ?thesis
+    by (simp only: lub_def isLub_def leastP_def setge_def setle_def) blast
+qed
+
+lemma real_complete:
+  fixes A :: "real set"
+  assumes nonempty: "\<exists>a. a \<in> A"
+    and ex_upper: "\<exists>y. \<forall>a \<in> A. a \<le> y"
+  shows "\<exists>x. lub A x"
+proof -
+  from ex_upper have "\<exists>y. isUb UNIV A y"
+    unfolding isUb_def setle_def by blast
+  with nonempty have "\<exists>x. isLub UNIV A x"
+    by (rule reals_complete)
+  then show ?thesis by (simp only: lub_compat)
+qed
+
+end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/HahnBanach/FunctionNorm.thy	Mon Jan 05 07:54:16 2009 -0800
@@ -0,0 +1,278 @@
+(*  Title:      HOL/Real/HahnBanach/FunctionNorm.thy
+    Author:     Gertrud Bauer, TU Munich
+*)
+
+header {* The norm of a function *}
+
+theory FunctionNorm
+imports NormedSpace FunctionOrder
+begin
+
+subsection {* Continuous linear forms*}
+
+text {*
+  A linear form @{text f} on a normed vector space @{text "(V, \<parallel>\<cdot>\<parallel>)"}
+  is \emph{continuous}, iff it is bounded, i.e.
+  \begin{center}
+  @{text "\<exists>c \<in> R. \<forall>x \<in> V. \<bar>f x\<bar> \<le> c \<cdot> \<parallel>x\<parallel>"}
+  \end{center}
+  In our application no other functions than linear forms are
+  considered, so we can define continuous linear forms as bounded
+  linear forms:
+*}
+
+locale continuous = var_V + norm_syntax + linearform +
+  assumes bounded: "\<exists>c. \<forall>x \<in> V. \<bar>f x\<bar> \<le> c * \<parallel>x\<parallel>"
+
+declare continuous.intro [intro?] continuous_axioms.intro [intro?]
+
+lemma continuousI [intro]:
+  fixes norm :: "_ \<Rightarrow> real"  ("\<parallel>_\<parallel>")
+  assumes "linearform V f"
+  assumes r: "\<And>x. x \<in> V \<Longrightarrow> \<bar>f x\<bar> \<le> c * \<parallel>x\<parallel>"
+  shows "continuous V norm f"
+proof
+  show "linearform V f" by fact
+  from r have "\<exists>c. \<forall>x\<in>V. \<bar>f x\<bar> \<le> c * \<parallel>x\<parallel>" by blast
+  then show "continuous_axioms V norm f" ..
+qed
+
+
+subsection {* The norm of a linear form *}
+
+text {*
+  The least real number @{text c} for which holds
+  \begin{center}
+  @{text "\<forall>x \<in> V. \<bar>f x\<bar> \<le> c \<cdot> \<parallel>x\<parallel>"}
+  \end{center}
+  is called the \emph{norm} of @{text f}.
+
+  For non-trivial vector spaces @{text "V \<noteq> {0}"} the norm can be
+  defined as
+  \begin{center}
+  @{text "\<parallel>f\<parallel> = \<sup>x \<noteq> 0. \<bar>f x\<bar> / \<parallel>x\<parallel>"}
+  \end{center}
+
+  For the case @{text "V = {0}"} the supremum would be taken from an
+  empty set. Since @{text \<real>} is unbounded, there would be no supremum.
+  To avoid this situation it must be guaranteed that there is an
+  element in this set. This element must be @{text "{} \<ge> 0"} so that
+  @{text fn_norm} has the norm properties. Furthermore it does not
+  have to change the norm in all other cases, so it must be @{text 0},
+  as all other elements are @{text "{} \<ge> 0"}.
+
+  Thus we define the set @{text B} where the supremum is taken from as
+  follows:
+  \begin{center}
+  @{text "{0} \<union> {\<bar>f x\<bar> / \<parallel>x\<parallel>. x \<noteq> 0 \<and> x \<in> F}"}
+  \end{center}
+
+  @{text fn_norm} is equal to the supremum of @{text B}, if the
+  supremum exists (otherwise it is undefined).
+*}
+
+locale fn_norm = norm_syntax +
+  fixes B defines "B V f \<equiv> {0} \<union> {\<bar>f x\<bar> / \<parallel>x\<parallel> | x. x \<noteq> 0 \<and> x \<in> V}"
+  fixes fn_norm ("\<parallel>_\<parallel>\<hyphen>_" [0, 1000] 999)
+  defines "\<parallel>f\<parallel>\<hyphen>V \<equiv> \<Squnion>(B V f)"
+
+locale normed_vectorspace_with_fn_norm = normed_vectorspace + fn_norm
+
+lemma (in fn_norm) B_not_empty [intro]: "0 \<in> B V f"
+  by (simp add: B_def)
+
+text {*
+  The following lemma states that every continuous linear form on a
+  normed space @{text "(V, \<parallel>\<cdot>\<parallel>)"} has a function norm.
+*}
+
+lemma (in normed_vectorspace_with_fn_norm) fn_norm_works:
+  assumes "continuous V norm f"
+  shows "lub (B V f) (\<parallel>f\<parallel>\<hyphen>V)"
+proof -
+  interpret continuous V norm f by fact
+  txt {* The existence of the supremum is shown using the
+    completeness of the reals. Completeness means, that every
+    non-empty bounded set of reals has a supremum. *}
+  have "\<exists>a. lub (B V f) a"
+  proof (rule real_complete)
+    txt {* First we have to show that @{text B} is non-empty: *}
+    have "0 \<in> B V f" ..
+    then show "\<exists>x. x \<in> B V f" ..
+
+    txt {* Then we have to show that @{text B} is bounded: *}
+    show "\<exists>c. \<forall>y \<in> B V f. y \<le> c"
+    proof -
+      txt {* We know that @{text f} is bounded by some value @{text c}. *}
+      from bounded obtain c where c: "\<forall>x \<in> V. \<bar>f x\<bar> \<le> c * \<parallel>x\<parallel>" ..
+
+      txt {* To prove the thesis, we have to show that there is some
+        @{text b}, such that @{text "y \<le> b"} for all @{text "y \<in>
+        B"}. Due to the definition of @{text B} there are two cases. *}
+
+      def b \<equiv> "max c 0"
+      have "\<forall>y \<in> B V f. y \<le> b"
+      proof
+        fix y assume y: "y \<in> B V f"
+        show "y \<le> b"
+        proof cases
+          assume "y = 0"
+          then show ?thesis unfolding b_def by arith
+        next
+          txt {* The second case is @{text "y = \<bar>f x\<bar> / \<parallel>x\<parallel>"} for some
+            @{text "x \<in> V"} with @{text "x \<noteq> 0"}. *}
+          assume "y \<noteq> 0"
+          with y obtain x where y_rep: "y = \<bar>f x\<bar> * inverse \<parallel>x\<parallel>"
+              and x: "x \<in> V" and neq: "x \<noteq> 0"
+            by (auto simp add: B_def real_divide_def)
+          from x neq have gt: "0 < \<parallel>x\<parallel>" ..
+
+          txt {* The thesis follows by a short calculation using the
+            fact that @{text f} is bounded. *}
+
+          note y_rep
+          also have "\<bar>f x\<bar> * inverse \<parallel>x\<parallel> \<le> (c * \<parallel>x\<parallel>) * inverse \<parallel>x\<parallel>"
+          proof (rule mult_right_mono)
+            from c x show "\<bar>f x\<bar> \<le> c * \<parallel>x\<parallel>" ..
+            from gt have "0 < inverse \<parallel>x\<parallel>" 
+              by (rule positive_imp_inverse_positive)
+            then show "0 \<le> inverse \<parallel>x\<parallel>" by (rule order_less_imp_le)
+          qed
+          also have "\<dots> = c * (\<parallel>x\<parallel> * inverse \<parallel>x\<parallel>)"
+            by (rule real_mult_assoc)
+          also
+          from gt have "\<parallel>x\<parallel> \<noteq> 0" by simp
+          then have "\<parallel>x\<parallel> * inverse \<parallel>x\<parallel> = 1" by simp 
+          also have "c * 1 \<le> b" by (simp add: b_def le_maxI1)
+          finally show "y \<le> b" .
+        qed
+      qed
+      then show ?thesis ..
+    qed
+  qed
+  then show ?thesis unfolding fn_norm_def by (rule the_lubI_ex)
+qed
+
+lemma (in normed_vectorspace_with_fn_norm) fn_norm_ub [iff?]:
+  assumes "continuous V norm f"
+  assumes b: "b \<in> B V f"
+  shows "b \<le> \<parallel>f\<parallel>\<hyphen>V"
+proof -
+  interpret continuous V norm f by fact
+  have "lub (B V f) (\<parallel>f\<parallel>\<hyphen>V)"
+    using `continuous V norm f` by (rule fn_norm_works)
+  from this and b show ?thesis ..
+qed
+
+lemma (in normed_vectorspace_with_fn_norm) fn_norm_leastB:
+  assumes "continuous V norm f"
+  assumes b: "\<And>b. b \<in> B V f \<Longrightarrow> b \<le> y"
+  shows "\<parallel>f\<parallel>\<hyphen>V \<le> y"
+proof -
+  interpret continuous V norm f by fact
+  have "lub (B V f) (\<parallel>f\<parallel>\<hyphen>V)"
+    using `continuous V norm f` by (rule fn_norm_works)
+  from this and b show ?thesis ..
+qed
+
+text {* The norm of a continuous function is always @{text "\<ge> 0"}. *}
+
+lemma (in normed_vectorspace_with_fn_norm) fn_norm_ge_zero [iff]:
+  assumes "continuous V norm f"
+  shows "0 \<le> \<parallel>f\<parallel>\<hyphen>V"
+proof -
+  interpret continuous V norm f by fact
+  txt {* The function norm is defined as the supremum of @{text B}.
+    So it is @{text "\<ge> 0"} if all elements in @{text B} are @{text "\<ge>
+    0"}, provided the supremum exists and @{text B} is not empty. *}
+  have "lub (B V f) (\<parallel>f\<parallel>\<hyphen>V)"
+    using `continuous V norm f` by (rule fn_norm_works)
+  moreover have "0 \<in> B V f" ..
+  ultimately show ?thesis ..
+qed
+
+text {*
+  \medskip The fundamental property of function norms is:
+  \begin{center}
+  @{text "\<bar>f x\<bar> \<le> \<parallel>f\<parallel> \<cdot> \<parallel>x\<parallel>"}
+  \end{center}
+*}
+
+lemma (in normed_vectorspace_with_fn_norm) fn_norm_le_cong:
+  assumes "continuous V norm f" "linearform V f"
+  assumes x: "x \<in> V"
+  shows "\<bar>f x\<bar> \<le> \<parallel>f\<parallel>\<hyphen>V * \<parallel>x\<parallel>"
+proof -
+  interpret continuous V norm f by fact
+  interpret linearform V f by fact
+  show ?thesis
+  proof cases
+    assume "x = 0"
+    then have "\<bar>f x\<bar> = \<bar>f 0\<bar>" by simp
+    also have "f 0 = 0" by rule unfold_locales
+    also have "\<bar>\<dots>\<bar> = 0" by simp
+    also have a: "0 \<le> \<parallel>f\<parallel>\<hyphen>V"
+      using `continuous V norm f` by (rule fn_norm_ge_zero)
+    from x have "0 \<le> norm x" ..
+    with a have "0 \<le> \<parallel>f\<parallel>\<hyphen>V * \<parallel>x\<parallel>" by (simp add: zero_le_mult_iff)
+    finally show "\<bar>f x\<bar> \<le> \<parallel>f\<parallel>\<hyphen>V * \<parallel>x\<parallel>" .
+  next
+    assume "x \<noteq> 0"
+    with x have neq: "\<parallel>x\<parallel> \<noteq> 0" by simp
+    then have "\<bar>f x\<bar> = (\<bar>f x\<bar> * inverse \<parallel>x\<parallel>) * \<parallel>x\<parallel>" by simp
+    also have "\<dots> \<le>  \<parallel>f\<parallel>\<hyphen>V * \<parallel>x\<parallel>"
+    proof (rule mult_right_mono)
+      from x show "0 \<le> \<parallel>x\<parallel>" ..
+      from x and neq have "\<bar>f x\<bar> * inverse \<parallel>x\<parallel> \<in> B V f"
+	by (auto simp add: B_def real_divide_def)
+      with `continuous V norm f` show "\<bar>f x\<bar> * inverse \<parallel>x\<parallel> \<le> \<parallel>f\<parallel>\<hyphen>V"
+	by (rule fn_norm_ub)
+    qed
+    finally show ?thesis .
+  qed
+qed
+
+text {*
+  \medskip The function norm is the least positive real number for
+  which the following inequation holds:
+  \begin{center}
+    @{text "\<bar>f x\<bar> \<le> c \<cdot> \<parallel>x\<parallel>"}
+  \end{center}
+*}
+
+lemma (in normed_vectorspace_with_fn_norm) fn_norm_least [intro?]:
+  assumes "continuous V norm f"
+  assumes ineq: "\<forall>x \<in> V. \<bar>f x\<bar> \<le> c * \<parallel>x\<parallel>" and ge: "0 \<le> c"
+  shows "\<parallel>f\<parallel>\<hyphen>V \<le> c"