--- a/Admin/isatest/settings/at-poly Tue Feb 09 16:07:51 2010 +0100
+++ b/Admin/isatest/settings/at-poly Tue Feb 09 17:06:05 2010 +0100
@@ -24,3 +24,4 @@
ISABELLE_USEDIR_OPTIONS="-i true -d pdf -v true -t true"
+init_component /home/isabelle/contrib_devel/kodkodi
--- a/Admin/isatest/settings/mac-poly Tue Feb 09 16:07:51 2010 +0100
+++ b/Admin/isatest/settings/mac-poly Tue Feb 09 17:06:05 2010 +0100
@@ -24,3 +24,4 @@
ISABELLE_USEDIR_OPTIONS="-i true -d pdf -v true -g false"
+init_component /home/isabelle/contrib_devel/kodkodi
--- a/Admin/isatest/settings/mac-poly-M4 Tue Feb 09 16:07:51 2010 +0100
+++ b/Admin/isatest/settings/mac-poly-M4 Tue Feb 09 17:06:05 2010 +0100
@@ -25,3 +25,4 @@
ISABELLE_USEDIR_OPTIONS="-i false -d false -t true -M 4 -q 2"
+init_component /home/isabelle/contrib_devel/kodkodi
--- a/Admin/isatest/settings/mac-poly-M8 Tue Feb 09 16:07:51 2010 +0100
+++ b/Admin/isatest/settings/mac-poly-M8 Tue Feb 09 17:06:05 2010 +0100
@@ -25,3 +25,5 @@
ISABELLE_USEDIR_OPTIONS="-i false -d false -t true -M 8 -q 2"
+init_component /home/isabelle/contrib_devel/kodkodi
+
--- a/Admin/isatest/settings/mac-poly64-M4 Tue Feb 09 16:07:51 2010 +0100
+++ b/Admin/isatest/settings/mac-poly64-M4 Tue Feb 09 17:06:05 2010 +0100
@@ -4,7 +4,7 @@
ML_SYSTEM="polyml-5.3.0"
ML_PLATFORM="x86_64-darwin"
ML_HOME="$POLYML_HOME/$ML_PLATFORM"
- ML_OPTIONS="--mutable 12000 --immutable 4000"
+ ML_OPTIONS="--mutable 4000 --immutable 4000"
ISABELLE_HOME_USER=~/isabelle-mac-poly64-M4
--- a/Admin/isatest/settings/mac-poly64-M8 Tue Feb 09 16:07:51 2010 +0100
+++ b/Admin/isatest/settings/mac-poly64-M8 Tue Feb 09 17:06:05 2010 +0100
@@ -4,7 +4,7 @@
ML_SYSTEM="polyml-5.3.0"
ML_PLATFORM="x86_64-darwin"
ML_HOME="$POLYML_HOME/$ML_PLATFORM"
- ML_OPTIONS="--mutable 12000 --immutable 4000"
+ ML_OPTIONS="--mutable 4000 --immutable 4000"
ISABELLE_HOME_USER=~/isabelle-mac-poly64-M8
--- a/NEWS Tue Feb 09 16:07:51 2010 +0100
+++ b/NEWS Tue Feb 09 17:06:05 2010 +0100
@@ -12,10 +12,72 @@
*** HOL ***
-* new theory Algebras.thy contains generic algebraic structures and
+* New set of rules "ac_simps" provides combined assoc / commute rewrites
+for all interpretations of the appropriate generic locales.
+
+* Renamed theory "OrderedGroup" to "Groups" and split theory "Ring_and_Field"
+into theories "Rings" and "Fields"; for more appropriate and more
+consistent names suitable for name prefixes within the HOL theories.
+INCOMPATIBILITY.
+
+* More consistent naming of type classes involving orderings (and lattices):
+
+ lower_semilattice ~> semilattice_inf
+ upper_semilattice ~> semilattice_sup
+
+ dense_linear_order ~> dense_linorder
+
+ pordered_ab_group_add ~> ordered_ab_group_add
+ pordered_ab_group_add_abs ~> ordered_ab_group_add_abs
+ pordered_ab_semigroup_add ~> ordered_ab_semigroup_add
+ pordered_ab_semigroup_add_imp_le ~> ordered_ab_semigroup_add_imp_le
+ pordered_cancel_ab_semigroup_add ~> ordered_cancel_ab_semigroup_add
+ pordered_cancel_comm_semiring ~> ordered_cancel_comm_semiring
+ pordered_cancel_semiring ~> ordered_cancel_semiring
+ pordered_comm_monoid_add ~> ordered_comm_monoid_add
+ pordered_comm_ring ~> ordered_comm_ring
+ pordered_comm_semiring ~> ordered_comm_semiring
+ pordered_ring ~> ordered_ring
+ pordered_ring_abs ~> ordered_ring_abs
+ pordered_semiring ~> ordered_semiring
+
+ ordered_ab_group_add ~> linordered_ab_group_add
+ ordered_ab_semigroup_add ~> linordered_ab_semigroup_add
+ ordered_cancel_ab_semigroup_add ~> linordered_cancel_ab_semigroup_add
+ ordered_comm_semiring_strict ~> linordered_comm_semiring_strict
+ ordered_field ~> linordered_field
+ ordered_field_no_lb ~> linordered_field_no_lb
+ ordered_field_no_ub ~> linordered_field_no_ub
+ ordered_field_dense_linear_order ~> dense_linordered_field
+ ordered_idom ~> linordered_idom
+ ordered_ring ~> linordered_ring
+ ordered_ring_le_cancel_factor ~> linordered_ring_le_cancel_factor
+ ordered_ring_less_cancel_factor ~> linordered_ring_less_cancel_factor
+ ordered_ring_strict ~> linordered_ring_strict
+ ordered_semidom ~> linordered_semidom
+ ordered_semiring ~> linordered_semiring
+ ordered_semiring_1 ~> linordered_semiring_1
+ ordered_semiring_1_strict ~> linordered_semiring_1_strict
+ ordered_semiring_strict ~> linordered_semiring_strict
+
+ The following slightly odd type classes have been moved to
+ a separate theory Library/Lattice_Algebras.thy:
+
+ lordered_ab_group_add ~> lattice_ab_group_add
+ lordered_ab_group_add_abs ~> lattice_ab_group_add_abs
+ lordered_ab_group_add_meet ~> semilattice_inf_ab_group_add
+ lordered_ab_group_add_join ~> semilattice_sup_ab_group_add
+ lordered_ring ~> lattice_ring
+
+INCOMPATIBILITY.
+
+* Index syntax for structures must be imported explicitly from library
+theory Structure_Syntax. INCOMPATIBILITY.
+
+* New theory Algebras contains generic algebraic structures and
generic algebraic operations. INCOMPATIBILTY: constants zero, one,
-plus, minus, uminus, times, inverse, divide, abs, sgn, less_eq and less
-have been moved from HOL.thy to Algebras.thy.
+plus, minus, uminus, times, inverse, divide, abs, sgn, less_eq and
+less have been moved from HOL.thy to Algebras.thy.
* HOLogic.strip_psplit: types are returned in syntactic order, similar
to other strip and tuple operations. INCOMPATIBILITY.
@@ -24,24 +86,24 @@
replaced by new-style primrec, especially in theory List. The corresponding
constants now have authentic syntax. INCOMPATIBILITY.
-* Reorganized theory Multiset.thy: less duplication, less historical
+* Reorganized theory Multiset: less duplication, less historical
organization of sections, conversion from associations lists to
multisets, rudimentary code generation. Use insert_DiffM2 [symmetric]
instead of elem_imp_eq_diff_union, if needed. INCOMPATIBILITY.
-* Reorganized theory Sum_Type.thy; Inl and Inr now have
-authentic syntax. INCOMPATIBILITY.
+* Reorganized theory Sum_Type; Inl and Inr now have authentic syntax.
+INCOMPATIBILITY.
* Code generation: ML and OCaml code is decorated with signatures.
-* Complete_Lattice.thy: lemmas top_def and bot_def have been replaced
-by the more convenient lemmas Inf_empty and Sup_empty. Dropped lemmas
-Inf_insert_simp and Sup_insert_simp, which are subsumed by Inf_insert
-and Sup_insert. Lemmas Inf_UNIV and Sup_UNIV replace former Inf_Univ
-and Sup_Univ. Lemmas inf_top_right and sup_bot_right subsume inf_top
-and sup_bot respectively. INCOMPATIBILITY.
-
-* Finite_Set.thy and List.thy: some lemmas have been generalized from
+* Theory Complete_Lattice: lemmas top_def and bot_def have been
+replaced by the more convenient lemmas Inf_empty and Sup_empty.
+Dropped lemmas Inf_insert_simp and Sup_insert_simp, which are subsumed
+by Inf_insert and Sup_insert. Lemmas Inf_UNIV and Sup_UNIV replace
+former Inf_Univ and Sup_Univ. Lemmas inf_top_right and sup_bot_right
+subsume inf_top and sup_bot respectively. INCOMPATIBILITY.
+
+* Theory Finite_Set and List: some lemmas have been generalized from
sets to lattices:
fun_left_comm_idem_inter ~> fun_left_comm_idem_inf
@@ -55,10 +117,15 @@
INTER_fold_inter ~> INFI_fold_inf
UNION_fold_union ~> SUPR_fold_sup
-* Added transpose to List.thy.
+* Theory List: added transpose.
+
*** ML ***
+* Renamed old-style Drule.standard to Drule.export_without_context, to
+emphasize that this is in no way a standard operation.
+INCOMPATIBILITY.
+
* Curried take and drop in library.ML; negative length is interpreted
as infinity (as in chop). INCOMPATIBILITY.
--- a/doc-src/IsarImplementation/Thy/Integration.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Integration.thy Tue Feb 09 17:06:05 2010 +0100
@@ -79,8 +79,9 @@
\item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
toplevel state.
- \item @{ML Toplevel.theory_of}~@{text "state"} selects the theory of
- a theory or proof (!), otherwise raises @{ML Toplevel.UNDEF}.
+ \item @{ML Toplevel.theory_of}~@{text "state"} selects the
+ background theory of @{text "state"}, raises @{ML Toplevel.UNDEF}
+ for an empty toplevel state.
\item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
state if available, otherwise raises @{ML Toplevel.UNDEF}.
@@ -114,16 +115,16 @@
The operational part is represented as the sequential union of a
list of partial functions, which are tried in turn until the first
one succeeds. This acts like an outer case-expression for various
- alternative state transitions. For example, \isakeyword{qed} acts
+ alternative state transitions. For example, \isakeyword{qed} works
differently for a local proofs vs.\ the global ending of the main
proof.
Toplevel transitions are composed via transition transformers.
Internally, Isar commands are put together from an empty transition
- extended by name and source position (and optional source text). It
- is then left to the individual command parser to turn the given
- concrete syntax into a suitable transition transformer that adjoins
- actual operations on a theory or proof state etc.
+ extended by name and source position. It is then left to the
+ individual command parser to turn the given concrete syntax into a
+ suitable transition transformer that adjoins actual operations on a
+ theory or proof state etc.
*}
text %mlref {*
@@ -188,7 +189,7 @@
the toplevel itself, and only make sense in interactive mode. Under
normal circumstances, the user encounters these only implicitly as
part of the protocol between the Isabelle/Isar system and a
- user-interface such as ProofGeneral.
+ user-interface such as Proof~General.
\begin{description}
@@ -323,9 +324,7 @@
associated with each theory, by declaring these dependencies in the
theory header as @{text \<USES>}, and loading them consecutively
within the theory context. The system keeps track of incoming {\ML}
- sources and associates them with the current theory. The file
- @{text A}\verb,.ML, is loaded after a theory has been concluded, in
- order to support legacy proof {\ML} proof scripts.
+ sources and associates them with the current theory.
The basic internal actions of the theory database are @{text
"update"}, @{text "outdate"}, and @{text "remove"}:
@@ -386,12 +385,15 @@
\item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
up-to-date wrt.\ the external file store, reloading outdated
- ancestors as required.
+ ancestors as required. In batch mode, the simultaneous @{ML
+ use_thys} should be used exclusively.
\item @{ML use_thys} is similar to @{ML use_thy}, but handles
several theories simultaneously. Thus it acts like processing the
import header of a theory, without performing the merge of the
- result, though.
+ result. By loading a whole sub-graph of theories like that, the
+ intrinsic parallelism can be exploited by the system, to speedup
+ loading.
\item @{ML ThyInfo.touch_thy}~@{text A} performs and @{text outdate} action
on theory @{text A} and all descendants.
@@ -400,7 +402,7 @@
descendants from the theory database.
\item @{ML ThyInfo.begin_theory} is the basic operation behind a
- @{text \<THEORY>} header declaration. This is {\ML} functions is
+ @{text \<THEORY>} header declaration. This {\ML} function is
normally not invoked directly.
\item @{ML ThyInfo.end_theory} concludes the loading of a theory
--- a/doc-src/IsarImplementation/Thy/Local_Theory.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Local_Theory.thy Tue Feb 09 17:06:05 2010 +0100
@@ -2,7 +2,7 @@
imports Base
begin
-chapter {* Local theory specifications *}
+chapter {* Local theory specifications \label{ch:local-theory} *}
text {*
A \emph{local theory} combines aspects of both theory and proof
--- a/doc-src/IsarImplementation/Thy/Logic.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Logic.thy Tue Feb 09 17:06:05 2010 +0100
@@ -24,7 +24,9 @@
schematic polymorphism, which is strictly speaking outside the
logic.\footnote{This is the deeper logical reason, why the theory
context @{text "\<Theta>"} is separate from the proof context @{text "\<Gamma>"}
- of the core calculus.}
+ of the core calculus: type constructors, term constants, and facts
+ (proof constants) may involve arbitrary type schemes, but the type
+ of a locally fixed term parameter is also fixed!}
*}
@@ -41,18 +43,17 @@
internally. The resulting relation is an ordering: reflexive,
transitive, and antisymmetric.
- A \emph{sort} is a list of type classes written as @{text "s =
- {c\<^isub>1, \<dots>, c\<^isub>m}"}, which represents symbolic
- intersection. Notationally, the curly braces are omitted for
- singleton intersections, i.e.\ any class @{text "c"} may be read as
- a sort @{text "{c}"}. The ordering on type classes is extended to
- sorts according to the meaning of intersections: @{text
- "{c\<^isub>1, \<dots> c\<^isub>m} \<subseteq> {d\<^isub>1, \<dots>, d\<^isub>n}"} iff
- @{text "\<forall>j. \<exists>i. c\<^isub>i \<subseteq> d\<^isub>j"}. The empty intersection
- @{text "{}"} refers to the universal sort, which is the largest
- element wrt.\ the sort order. The intersections of all (finitely
- many) classes declared in the current theory are the minimal
- elements wrt.\ the sort order.
+ A \emph{sort} is a list of type classes written as @{text "s = {c\<^isub>1,
+ \<dots>, c\<^isub>m}"}, it represents symbolic intersection. Notationally, the
+ curly braces are omitted for singleton intersections, i.e.\ any
+ class @{text "c"} may be read as a sort @{text "{c}"}. The ordering
+ on type classes is extended to sorts according to the meaning of
+ intersections: @{text "{c\<^isub>1, \<dots> c\<^isub>m} \<subseteq> {d\<^isub>1, \<dots>, d\<^isub>n}"} iff @{text
+ "\<forall>j. \<exists>i. c\<^isub>i \<subseteq> d\<^isub>j"}. The empty intersection @{text "{}"} refers to
+ the universal sort, which is the largest element wrt.\ the sort
+ order. Thus @{text "{}"} represents the ``full sort'', not the
+ empty one! The intersection of all (finitely many) classes declared
+ in the current theory is the least element wrt.\ the sort ordering.
\medskip A \emph{fixed type variable} is a pair of a basic name
(starting with a @{text "'"} character) and a sort constraint, e.g.\
@@ -62,10 +63,10 @@
printed as @{text "?\<alpha>\<^isub>s"}.
Note that \emph{all} syntactic components contribute to the identity
- of type variables, including the sort constraint. The core logic
- handles type variables with the same name but different sorts as
- different, although some outer layers of the system make it hard to
- produce anything like this.
+ of type variables: basic name, index, and sort constraint. The core
+ logic handles type variables with the same name but different sorts
+ as different, although the type-inference layer (which is outside
+ the core) rejects anything like that.
A \emph{type constructor} @{text "\<kappa>"} is a @{text "k"}-ary operator
on types declared in the theory. Type constructor application is
@@ -77,8 +78,8 @@
right-associative infix @{text "\<alpha> \<Rightarrow> \<beta>"} instead of @{text "(\<alpha>,
\<beta>)fun"}.
- A \emph{type} is defined inductively over type variables and type
- constructors as follows: @{text "\<tau> = \<alpha>\<^isub>s | ?\<alpha>\<^isub>s |
+ The logical category \emph{type} is defined inductively over type
+ variables and type constructors as follows: @{text "\<tau> = \<alpha>\<^isub>s | ?\<alpha>\<^isub>s |
(\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"}.
A \emph{type abbreviation} is a syntactic definition @{text
@@ -116,9 +117,9 @@
text %mlref {*
\begin{mldecls}
- @{index_ML_type class} \\
- @{index_ML_type sort} \\
- @{index_ML_type arity} \\
+ @{index_ML_type class: string} \\
+ @{index_ML_type sort: "class list"} \\
+ @{index_ML_type arity: "string * sort list * sort"} \\
@{index_ML_type typ} \\
@{index_ML map_atyps: "(typ -> typ) -> typ -> typ"} \\
@{index_ML fold_atyps: "(typ -> 'a -> 'a) -> typ -> 'a -> 'a"} \\
@@ -136,15 +137,15 @@
\begin{description}
- \item @{ML_type class} represents type classes; this is an alias for
- @{ML_type string}.
+ \item @{ML_type class} represents type classes.
- \item @{ML_type sort} represents sorts; this is an alias for
- @{ML_type "class list"}.
+ \item @{ML_type sort} represents sorts, i.e.\ finite intersections
+ of classes. The empty list @{ML "[]: sort"} refers to the empty
+ class intersection, i.e.\ the ``full sort''.
- \item @{ML_type arity} represents type arities; this is an alias for
- triples of the form @{text "(\<kappa>, \<^vec>s, s)"} for @{text "\<kappa> ::
- (\<^vec>s)s"} described above.
+ \item @{ML_type arity} represents type arities. A triple @{text
+ "(\<kappa>, \<^vec>s, s) : arity"} represents @{text "\<kappa> :: (\<^vec>s)s"} as
+ described above.
\item @{ML_type typ} represents types; this is a datatype with
constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
@@ -193,15 +194,13 @@
with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
or \cite{paulson-ml2}), with the types being determined by the
corresponding binders. In contrast, free variables and constants
- are have an explicit name and type in each occurrence.
+ have an explicit name and type in each occurrence.
\medskip A \emph{bound variable} is a natural number @{text "b"},
which accounts for the number of intermediate binders between the
variable occurrence in the body and its binding position. For
- example, the de-Bruijn term @{text
- "\<lambda>\<^bsub>nat\<^esub>. \<lambda>\<^bsub>nat\<^esub>. 1 + 0"} would
- correspond to @{text
- "\<lambda>x\<^bsub>nat\<^esub>. \<lambda>y\<^bsub>nat\<^esub>. x + y"} in a named
+ example, the de-Bruijn term @{text "\<lambda>\<^bsub>bool\<^esub>. \<lambda>\<^bsub>bool\<^esub>. 1 \<and> 0"} would
+ correspond to @{text "\<lambda>x\<^bsub>bool\<^esub>. \<lambda>y\<^bsub>bool\<^esub>. x \<and> y"} in a named
representation. Note that a bound variable may be represented by
different de-Bruijn indices at different occurrences, depending on
the nesting of abstractions.
@@ -213,27 +212,27 @@
without any loose variables.
A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
- @{text "(x, \<tau>)"} which is usually printed @{text "x\<^isub>\<tau>"}. A
+ @{text "(x, \<tau>)"} which is usually printed @{text "x\<^isub>\<tau>"} here. A
\emph{schematic variable} is a pair of an indexname and a type,
- e.g.\ @{text "((x, 0), \<tau>)"} which is usually printed as @{text
+ e.g.\ @{text "((x, 0), \<tau>)"} which is likewise printed as @{text
"?x\<^isub>\<tau>"}.
\medskip A \emph{constant} is a pair of a basic name and a type,
- e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text
- "c\<^isub>\<tau>"}. Constants are declared in the context as polymorphic
- families @{text "c :: \<sigma>"}, meaning that all substitution instances
- @{text "c\<^isub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
+ e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text "c\<^isub>\<tau>"}
+ here. Constants are declared in the context as polymorphic families
+ @{text "c :: \<sigma>"}, meaning that all substitution instances @{text
+ "c\<^isub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
- The vector of \emph{type arguments} of constant @{text "c\<^isub>\<tau>"}
- wrt.\ the declaration @{text "c :: \<sigma>"} is defined as the codomain of
- the matcher @{text "\<vartheta> = {?\<alpha>\<^isub>1 \<mapsto> \<tau>\<^isub>1, \<dots>,
- ?\<alpha>\<^isub>n \<mapsto> \<tau>\<^isub>n}"} presented in canonical order @{text
- "(\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>n)"}. Within a given theory context,
- there is a one-to-one correspondence between any constant @{text
- "c\<^isub>\<tau>"} and the application @{text "c(\<tau>\<^isub>1, \<dots>,
- \<tau>\<^isub>n)"} of its type arguments. For example, with @{text "plus
- :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow>
- nat\<^esub>"} corresponds to @{text "plus(nat)"}.
+ The vector of \emph{type arguments} of constant @{text "c\<^isub>\<tau>"} wrt.\
+ the declaration @{text "c :: \<sigma>"} is defined as the codomain of the
+ matcher @{text "\<vartheta> = {?\<alpha>\<^isub>1 \<mapsto> \<tau>\<^isub>1, \<dots>, ?\<alpha>\<^isub>n \<mapsto> \<tau>\<^isub>n}"} presented in
+ canonical order @{text "(\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>n)"}, corresponding to the
+ left-to-right occurrences of the @{text "\<alpha>\<^isub>i"} in @{text "\<sigma>"}.
+ Within a given theory context, there is a one-to-one correspondence
+ between any constant @{text "c\<^isub>\<tau>"} and the application @{text "c(\<tau>\<^isub>1,
+ \<dots>, \<tau>\<^isub>n)"} of its type arguments. For example, with @{text "plus :: \<alpha>
+ \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow> nat\<^esub>"} corresponds to
+ @{text "plus(nat)"}.
Constant declarations @{text "c :: \<sigma>"} may contain sort constraints
for type variables in @{text "\<sigma>"}. These are observed by
@@ -242,14 +241,13 @@
polymorphic constants that the user-level type-checker would reject
due to violation of type class restrictions.
- \medskip An \emph{atomic} term is either a variable or constant. A
- \emph{term} is defined inductively over atomic terms, with
- abstraction and application as follows: @{text "t = b | x\<^isub>\<tau> |
- ?x\<^isub>\<tau> | c\<^isub>\<tau> | \<lambda>\<^isub>\<tau>. t | t\<^isub>1 t\<^isub>2"}.
- Parsing and printing takes care of converting between an external
- representation with named bound variables. Subsequently, we shall
- use the latter notation instead of internal de-Bruijn
- representation.
+ \medskip An \emph{atomic} term is either a variable or constant.
+ The logical category \emph{term} is defined inductively over atomic
+ terms, with abstraction and application as follows: @{text "t = b |
+ x\<^isub>\<tau> | ?x\<^isub>\<tau> | c\<^isub>\<tau> | \<lambda>\<^isub>\<tau>. t | t\<^isub>1 t\<^isub>2"}. Parsing and printing takes care of
+ converting between an external representation with named bound
+ variables. Subsequently, we shall use the latter notation instead
+ of internal de-Bruijn representation.
The inductive relation @{text "t :: \<tau>"} assigns a (unique) type to a
term according to the structure of atomic terms, abstractions, and
@@ -271,18 +269,17 @@
The identity of atomic terms consists both of the name and the type
component. This means that different variables @{text
- "x\<^bsub>\<tau>\<^isub>1\<^esub>"} and @{text
- "x\<^bsub>\<tau>\<^isub>2\<^esub>"} may become the same after type
- instantiation. Some outer layers of the system make it hard to
- produce variables of the same name, but different types. In
- contrast, mixed instances of polymorphic constants occur frequently.
+ "x\<^bsub>\<tau>\<^isub>1\<^esub>"} and @{text "x\<^bsub>\<tau>\<^isub>2\<^esub>"} may become the same after
+ type instantiation. Type-inference rejects variables of the same
+ name, but different types. In contrast, mixed instances of
+ polymorphic constants occur routinely.
\medskip The \emph{hidden polymorphism} of a term @{text "t :: \<sigma>"}
is the set of type variables occurring in @{text "t"}, but not in
- @{text "\<sigma>"}. This means that the term implicitly depends on type
- arguments that are not accounted in the result type, i.e.\ there are
- different type instances @{text "t\<vartheta> :: \<sigma>"} and @{text
- "t\<vartheta>' :: \<sigma>"} with the same type. This slightly
+ its type @{text "\<sigma>"}. This means that the term implicitly depends
+ on type arguments that are not accounted in the result type, i.e.\
+ there are different type instances @{text "t\<vartheta> :: \<sigma>"} and
+ @{text "t\<vartheta>' :: \<sigma>"} with the same type. This slightly
pathological situation notoriously demands additional care.
\medskip A \emph{term abbreviation} is a syntactic definition @{text
@@ -431,14 +428,14 @@
\infer[@{text "(assume)"}]{@{text "A \<turnstile> A"}}{}
\]
\[
- \infer[@{text "(\<And>_intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}{@{text "\<Gamma> \<turnstile> b[x]"} & @{text "x \<notin> \<Gamma>"}}
+ \infer[@{text "(\<And>\<dash>intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}{@{text "\<Gamma> \<turnstile> b[x]"} & @{text "x \<notin> \<Gamma>"}}
\qquad
- \infer[@{text "(\<And>_elim)"}]{@{text "\<Gamma> \<turnstile> b[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}
+ \infer[@{text "(\<And>\<dash>elim)"}]{@{text "\<Gamma> \<turnstile> b[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}
\]
\[
- \infer[@{text "(\<Longrightarrow>_intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \infer[@{text "(\<Longrightarrow>\<dash>intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
\qquad
- \infer[@{text "(\<Longrightarrow>_elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+ \infer[@{text "(\<Longrightarrow>\<dash>elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
\]
\caption{Primitive inferences of Pure}\label{fig:prim-rules}
\end{center}
@@ -467,21 +464,21 @@
terms, and @{text "\<And>/\<Longrightarrow>"} for proofs (cf.\
\cite{Berghofer-Nipkow:2000:TPHOL}).
- Observe that locally fixed parameters (as in @{text "\<And>_intro"}) need
- not be recorded in the hypotheses, because the simple syntactic
- types of Pure are always inhabitable. ``Assumptions'' @{text "x ::
- \<tau>"} for type-membership are only present as long as some @{text
- "x\<^isub>\<tau>"} occurs in the statement body.\footnote{This is the key
- difference to ``@{text "\<lambda>HOL"}'' in the PTS framework
- \cite{Barendregt-Geuvers:2001}, where hypotheses @{text "x : A"} are
- treated uniformly for propositions and types.}
+ Observe that locally fixed parameters (as in @{text
+ "\<And>\<dash>intro"}) need not be recorded in the hypotheses, because
+ the simple syntactic types of Pure are always inhabitable.
+ ``Assumptions'' @{text "x :: \<tau>"} for type-membership are only
+ present as long as some @{text "x\<^isub>\<tau>"} occurs in the statement
+ body.\footnote{This is the key difference to ``@{text "\<lambda>HOL"}'' in
+ the PTS framework \cite{Barendregt-Geuvers:2001}, where hypotheses
+ @{text "x : A"} are treated uniformly for propositions and types.}
\medskip The axiomatization of a theory is implicitly closed by
forming all instances of type and term variables: @{text "\<turnstile>
A\<vartheta>"} holds for any substitution instance of an axiom
@{text "\<turnstile> A"}. By pushing substitutions through derivations
inductively, we also get admissible @{text "generalize"} and @{text
- "instance"} rules as shown in \figref{fig:subst-rules}.
+ "instantiate"} rules as shown in \figref{fig:subst-rules}.
\begin{figure}[htb]
\begin{center}
@@ -588,11 +585,11 @@
Every @{ML thm} value contains a sliding back-reference to the
enclosing theory, cf.\ \secref{sec:context-theory}.
- \item @{ML proofs} determines the detail of proof recording within
+ \item @{ML proofs} specifies the detail of proof recording within
@{ML_type thm} values: @{ML 0} records only the names of oracles,
@{ML 1} records oracle names and propositions, @{ML 2} additionally
records full proof terms. Officially named theorems that contribute
- to a result are always recorded.
+ to a result are recorded in any case.
\item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
@@ -644,8 +641,8 @@
\begin{figure}[htb]
\begin{center}
\begin{tabular}{ll}
- @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&"}) \\
- @{text "\<turnstile> A & B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
+ @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&&&"}) \\
+ @{text "\<turnstile> A &&& B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
@{text "prop :: prop \<Rightarrow> prop"} & (prefix @{text "#"}, suppressed) \\
@{text "#A \<equiv> A"} \\[1ex]
@{text "term :: \<alpha> \<Rightarrow> prop"} & (prefix @{text "TERM"}) \\
@@ -657,13 +654,15 @@
\end{center}
\end{figure}
- Derived conjunction rules include introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &
- B"}, and destructions @{text "A & B \<Longrightarrow> A"} and @{text "A & B \<Longrightarrow> B"}.
- Conjunction allows to treat simultaneous assumptions and conclusions
- uniformly. For example, multiple claims are intermediately
- represented as explicit conjunction, but this is refined into
- separate sub-goals before the user continues the proof; the final
- result is projected into a list of theorems (cf.\
+ The introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &&& B"}, and eliminations
+ (projections) @{text "A &&& B \<Longrightarrow> A"} and @{text "A &&& B \<Longrightarrow> B"} are
+ available as derived rules. Conjunction allows to treat
+ simultaneous assumptions and conclusions uniformly, e.g.\ consider
+ @{text "A \<Longrightarrow> B \<Longrightarrow> C &&& D"}. In particular, the goal mechanism
+ represents multiple claims as explicit conjunction internally, but
+ this is refined (via backwards introduction) into separate sub-goals
+ before the user commences the proof; the final result is projected
+ into a list of theorems using eliminations (cf.\
\secref{sec:tactical-goals}).
The @{text "prop"} marker (@{text "#"}) makes arbitrarily complex
@@ -681,7 +680,7 @@
language of types into that of terms. There is specific notation
@{text "TYPE(\<tau>)"} for @{text "TYPE\<^bsub>\<tau>
itself\<^esub>"}.
- Although being devoid of any particular meaning, the @{text
+ Although being devoid of any particular meaning, the term @{text
"TYPE(\<tau>)"} accounts for the type @{text "\<tau>"} within the term
language. In particular, @{text "TYPE(\<alpha>)"} may be used as formal
argument in primitive definitions, in order to circumvent hidden
@@ -703,11 +702,11 @@
\begin{description}
- \item @{ML Conjunction.intr} derives @{text "A & B"} from @{text
+ \item @{ML Conjunction.intr} derives @{text "A &&& B"} from @{text
"A"} and @{text "B"}.
\item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
- from @{text "A & B"}.
+ from @{text "A &&& B"}.
\item @{ML Drule.mk_term} derives @{text "TERM t"}.
@@ -784,7 +783,8 @@
"(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B"} or mathematical induction @{text "P 0 \<Longrightarrow> (\<And>n. P n
\<Longrightarrow> P (Suc n)) \<Longrightarrow> P n"}. Even deeper nesting occurs in well-founded
induction @{text "(\<And>x. (\<And>y. y \<prec> x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P x"}, but this
- already marks the limit of rule complexity seen in practice.
+ already marks the limit of rule complexity that is usually seen in
+ practice.
\medskip Regular user-level inferences in Isabelle/Pure always
maintain the following canonical form of results:
@@ -890,11 +890,10 @@
\begin{description}
- \item @{text "rule\<^sub>1 RS rule\<^sub>2"} resolves @{text
- "rule\<^sub>1"} with @{text "rule\<^sub>2"} according to the
- @{inference resolution} principle explained above. Note that the
- corresponding attribute in the Isar language is called @{attribute
- THEN}.
+ \item @{text "rule\<^sub>1 RS rule\<^sub>2"} resolves @{text "rule\<^sub>1"} with @{text
+ "rule\<^sub>2"} according to the @{inference resolution} principle
+ explained above. Note that the corresponding rule attribute in the
+ Isar language is called @{attribute THEN}.
\item @{text "rule OF rules"} resolves a list of rules with the
first rule, addressing its premises @{text "1, \<dots>, length rules"}
--- a/doc-src/IsarImplementation/Thy/Prelim.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Prelim.thy Tue Feb 09 17:06:05 2010 +0100
@@ -73,41 +73,47 @@
subsection {* Theory context \label{sec:context-theory} *}
-text {*
- A \emph{theory} is a data container with explicit name and unique
- identifier. Theories are related by a (nominal) sub-theory
+text {* A \emph{theory} is a data container with explicit name and
+ unique identifier. Theories are related by a (nominal) sub-theory
relation, which corresponds to the dependency graph of the original
construction; each theory is derived from a certain sub-graph of
- ancestor theories.
-
- The @{text "merge"} operation produces the least upper bound of two
- theories, which actually degenerates into absorption of one theory
- into the other (due to the nominal sub-theory relation).
+ ancestor theories. To this end, the system maintains a set of
+ symbolic ``identification stamps'' within each theory.
- The @{text "begin"} operation starts a new theory by importing
- several parent theories and entering a special @{text "draft"} mode,
- which is sustained until the final @{text "end"} operation. A draft
- theory acts like a linear type, where updates invalidate earlier
- versions. An invalidated draft is called ``stale''.
+ In order to avoid the full-scale overhead of explicit sub-theory
+ identification of arbitrary intermediate stages, a theory is
+ switched into @{text "draft"} mode under certain circumstances. A
+ draft theory acts like a linear type, where updates invalidate
+ earlier versions. An invalidated draft is called \emph{stale}.
- The @{text "checkpoint"} operation produces an intermediate stepping
- stone that will survive the next update: both the original and the
- changed theory remain valid and are related by the sub-theory
- relation. Checkpointing essentially recovers purely functional
- theory values, at the expense of some extra internal bookkeeping.
+ The @{text "checkpoint"} operation produces a safe stepping stone
+ that will survive the next update without becoming stale: both the
+ old and the new theory remain valid and are related by the
+ sub-theory relation. Checkpointing essentially recovers purely
+ functional theory values, at the expense of some extra internal
+ bookkeeping.
The @{text "copy"} operation produces an auxiliary version that has
the same data content, but is unrelated to the original: updates of
the copy do not affect the original, neither does the sub-theory
relation hold.
+ The @{text "merge"} operation produces the least upper bound of two
+ theories, which actually degenerates into absorption of one theory
+ into the other (according to the nominal sub-theory relation).
+
+ The @{text "begin"} operation starts a new theory by importing
+ several parent theories and entering a special mode of nameless
+ incremental updates, until the final @{text "end"} operation is
+ performed.
+
\medskip The example in \figref{fig:ex-theory} below shows a theory
graph derived from @{text "Pure"}, with theory @{text "Length"}
importing @{text "Nat"} and @{text "List"}. The body of @{text
"Length"} consists of a sequence of updates, working mostly on
- drafts. Intermediate checkpoints may occur as well, due to the
- history mechanism provided by the Isar top-level, cf.\
- \secref{sec:isar-toplevel}.
+ drafts internally, while transaction boundaries of Isar top-level
+ commands (\secref{sec:isar-toplevel}) are guaranteed to be safe
+ checkpoints.
\begin{figure}[htb]
\begin{center}
@@ -147,9 +153,10 @@
\begin{mldecls}
@{index_ML_type theory} \\
@{index_ML Theory.subthy: "theory * theory -> bool"} \\
- @{index_ML Theory.merge: "theory * theory -> theory"} \\
@{index_ML Theory.checkpoint: "theory -> theory"} \\
@{index_ML Theory.copy: "theory -> theory"} \\
+ @{index_ML Theory.merge: "theory * theory -> theory"} \\
+ @{index_ML Theory.begin_theory: "string -> theory list -> theory"} \\
\end{mldecls}
\begin{mldecls}
@{index_ML_type theory_ref} \\
@@ -160,25 +167,32 @@
\begin{description}
\item @{ML_type theory} represents theory contexts. This is
- essentially a linear type! Most operations destroy the original
- version, which then becomes ``stale''.
+ essentially a linear type, with explicit runtime checking! Most
+ internal theory operations destroy the original version, which then
+ becomes ``stale''.
- \item @{ML "Theory.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"}
- compares theories according to the inherent graph structure of the
- construction. This sub-theory relation is a nominal approximation
- of inclusion (@{text "\<subseteq>"}) of the corresponding content.
-
- \item @{ML "Theory.merge"}~@{text "(thy\<^sub>1, thy\<^sub>2)"}
- absorbs one theory into the other. This fails for unrelated
- theories!
+ \item @{ML "Theory.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} compares theories
+ according to the intrinsic graph structure of the construction.
+ This sub-theory relation is a nominal approximation of inclusion
+ (@{text "\<subseteq>"}) of the corresponding content (according to the
+ semantics of the ML modules that implement the data).
\item @{ML "Theory.checkpoint"}~@{text "thy"} produces a safe
- stepping stone in the linear development of @{text "thy"}. The next
- update will result in two related, valid theories.
+ stepping stone in the linear development of @{text "thy"}. This
+ changes the old theory, but the next update will result in two
+ related, valid theories.
\item @{ML "Theory.copy"}~@{text "thy"} produces a variant of @{text
- "thy"} with the same data. The result is not related to the
- original; the original is unchanged.
+ "thy"} with the same data. The copy is not related to the original,
+ but the original is unchanged.
+
+ \item @{ML "Theory.merge"}~@{text "(thy\<^sub>1, thy\<^sub>2)"} absorbs one theory
+ into the other, without changing @{text "thy\<^sub>1"} or @{text "thy\<^sub>2"}.
+ This version of ad-hoc theory merge fails for unrelated theories!
+
+ \item @{ML "Theory.begin_theory"}~@{text "name parents"} constructs
+ a new theory based on the given parents. This {\ML} function is
+ normally not invoked directly.
\item @{ML_type theory_ref} represents a sliding reference to an
always valid theory; updates on the original are propagated
@@ -198,31 +212,32 @@
subsection {* Proof context \label{sec:context-proof} *}
-text {*
- A proof context is a container for pure data with a back-reference
- to the theory it belongs to. The @{text "init"} operation creates a
- proof context from a given theory. Modifications to draft theories
- are propagated to the proof context as usual, but there is also an
- explicit @{text "transfer"} operation to force resynchronization
- with more substantial updates to the underlying theory. The actual
- context data does not require any special bookkeeping, thanks to the
- lack of destructive features.
+text {* A proof context is a container for pure data with a
+ back-reference to the theory it belongs to. The @{text "init"}
+ operation creates a proof context from a given theory.
+ Modifications to draft theories are propagated to the proof context
+ as usual, but there is also an explicit @{text "transfer"} operation
+ to force resynchronization with more substantial updates to the
+ underlying theory.
- Entities derived in a proof context need to record inherent logical
+ Entities derived in a proof context need to record logical
requirements explicitly, since there is no separate context
- identification as for theories. For example, hypotheses used in
- primitive derivations (cf.\ \secref{sec:thms}) are recorded
- separately within the sequent @{text "\<Gamma> \<turnstile> \<phi>"}, just to make double
- sure. Results could still leak into an alien proof context due to
- programming errors, but Isabelle/Isar includes some extra validity
- checks in critical positions, notably at the end of a sub-proof.
+ identification or symbolic inclusion as for theories. For example,
+ hypotheses used in primitive derivations (cf.\ \secref{sec:thms})
+ are recorded separately within the sequent @{text "\<Gamma> \<turnstile> \<phi>"}, just to
+ make double sure. Results could still leak into an alien proof
+ context due to programming errors, but Isabelle/Isar includes some
+ extra validity checks in critical positions, notably at the end of a
+ sub-proof.
Proof contexts may be manipulated arbitrarily, although the common
discipline is to follow block structure as a mental model: a given
context is extended consecutively, and results are exported back
- into the original context. Note that the Isar proof states model
+ into the original context. Note that an Isar proof state models
block-structured reasoning explicitly, using a stack of proof
- contexts internally.
+ contexts internally. For various technical reasons, the background
+ theory of an Isar proof state must not be changed while the proof is
+ still under construction!
*}
text %mlref {*
@@ -267,7 +282,8 @@
Moreover, there are total operations @{text "theory_of"} and @{text
"proof_of"} to convert a generic context into either kind: a theory
can always be selected from the sum, while a proof context might
- have to be constructed by an ad-hoc @{text "init"} operation.
+ have to be constructed by an ad-hoc @{text "init"} operation, which
+ incurs a small runtime overhead.
*}
text %mlref {*
@@ -319,6 +335,16 @@
\emph{any} theory that does not declare actual data content; @{text
"extend"} is acts like a unitary version of @{text "merge"}.
+ Implementing @{text "merge"} can be tricky. The general idea is
+ that @{text "merge (data\<^sub>1, data\<^sub>2)"} inserts those parts of @{text
+ "data\<^sub>2"} into @{text "data\<^sub>1"} that are not yet present, while
+ keeping the general order of things. The @{ML Library.merge}
+ function on plain lists may serve as canonical template.
+
+ Particularly note that shared parts of the data must not be
+ duplicated by naive concatenation, or a theory graph that is like a
+ chain of diamonds would cause an exponential blowup!
+
\paragraph{Proof context data} declarations need to implement the
following SML signature:
@@ -330,15 +356,18 @@
\medskip
\noindent The @{text "init"} operation is supposed to produce a pure
- value from the given background theory.
+ value from the given background theory and should be somehow
+ ``immediate''. Whenever a proof context is initialized, which
+ happens frequently, the the system invokes the @{text "init"}
+ operation of \emph{all} theory data slots ever declared.
\paragraph{Generic data} provides a hybrid interface for both theory
and proof data. The @{text "init"} operation for proof contexts is
predefined to select the current data value from the background
theory.
- \bigskip A data declaration of type @{text "T"} results in the
- following interface:
+ \bigskip Any of these data declaration over type @{text "T"} result
+ in an ML structure with the following signature:
\medskip
\begin{tabular}{ll}
@@ -348,12 +377,12 @@
\end{tabular}
\medskip
- \noindent These other operations provide access for the particular
- kind of context (theory, proof, or generic context). Note that this
- is a safe interface: there is no other way to access the
- corresponding data slot of a context. By keeping these operations
- private, a component may maintain abstract values authentically,
- without other components interfering.
+ \noindent These other operations provide exclusive access for the
+ particular kind of context (theory, proof, or generic context).
+ This interface fully observes the ML discipline for types and
+ scopes: there is no other way to access the corresponding data slot
+ of a context. By keeping these operations private, an Isabelle/ML
+ module may maintain abstract values authentically.
*}
text %mlref {*
@@ -379,32 +408,127 @@
\end{description}
*}
+text %mlex {*
+ The following artificial example demonstrates theory
+ data: we maintain a set of terms that are supposed to be wellformed
+ wrt.\ the enclosing theory. The public interface is as follows:
+*}
+
+ML {*
+ signature WELLFORMED_TERMS =
+ sig
+ val get: theory -> term list
+ val add: term -> theory -> theory
+ end;
+*}
+
+text {* \noindent The implementation uses private theory data
+ internally, and only exposes an operation that involves explicit
+ argument checking wrt.\ the given theory. *}
+
+ML {*
+ structure Wellformed_Terms: WELLFORMED_TERMS =
+ struct
+
+ structure Terms = Theory_Data
+ (
+ type T = term OrdList.T;
+ val empty = [];
+ val extend = I;
+ fun merge (ts1, ts2) =
+ OrdList.union TermOrd.fast_term_ord ts1 ts2;
+ )
+
+ val get = Terms.get;
+
+ fun add raw_t thy =
+ let val t = Sign.cert_term thy raw_t
+ in Terms.map (OrdList.insert TermOrd.fast_term_ord t) thy end;
+
+ end;
+*}
+
+text {* We use @{ML_type "term OrdList.T"} for reasonably efficient
+ representation of a set of terms: all operations are linear in the
+ number of stored elements. Here we assume that our users do not
+ care about the declaration order, since that data structure forces
+ its own arrangement of elements.
+
+ Observe how the @{verbatim merge} operation joins the data slots of
+ the two constituents: @{ML OrdList.union} prevents duplication of
+ common data from different branches, thus avoiding the danger of
+ exponential blowup. (Plain list append etc.\ must never be used for
+ theory data merges.)
+
+ \medskip Our intended invariant is achieved as follows:
+ \begin{enumerate}
+
+ \item @{ML Wellformed_Terms.add} only admits terms that have passed
+ the @{ML Sign.cert_term} check of the given theory at that point.
+
+ \item Wellformedness in the sense of @{ML Sign.cert_term} is
+ monotonic wrt.\ the sub-theory relation. So our data can move
+ upwards in the hierarchy (via extension or merges), and maintain
+ wellformedness without further checks.
+
+ \end{enumerate}
+
+ Note that all basic operations of the inference kernel (which
+ includes @{ML Sign.cert_term}) observe this monotonicity principle,
+ but other user-space tools don't. For example, fully-featured
+ type-inference via @{ML Syntax.check_term} (cf.\
+ \secref{sec:term-check}) is not necessarily monotonic wrt.\ the
+ background theory, since constraints of term constants can be
+ strengthened by later declarations, for example.
+
+ In most cases, user-space context data does not have to take such
+ invariants too seriously. The situation is different in the
+ implementation of the inference kernel itself, which uses the very
+ same data mechanisms for types, constants, axioms etc.
+*}
+
section {* Names \label{sec:names} *}
-text {*
- In principle, a name is just a string, but there are various
- convention for encoding additional structure. For example, ``@{text
- "Foo.bar.baz"}'' is considered as a qualified name consisting of
- three basic name components. The individual constituents of a name
- may have further substructure, e.g.\ the string
- ``\verb,\,\verb,<alpha>,'' encodes as a single symbol.
+text {* In principle, a name is just a string, but there are various
+ conventions for representing additional structure. For example,
+ ``@{text "Foo.bar.baz"}'' is considered as a long name consisting of
+ qualifier @{text "Foo.bar"} and base name @{text "baz"}. The
+ individual constituents of a name may have further substructure,
+ e.g.\ the string ``\verb,\,\verb,<alpha>,'' encodes as a single
+ symbol.
+
+ \medskip Subsequently, we shall introduce specific categories of
+ names. Roughly speaking these correspond to logical entities as
+ follows:
+ \begin{itemize}
+
+ \item Basic names (\secref{sec:basic-name}): free and bound
+ variables.
+
+ \item Indexed names (\secref{sec:indexname}): schematic variables.
+
+ \item Long names (\secref{sec:long-name}): constants of any kind
+ (type constructors, term constants, other concepts defined in user
+ space). Such entities are typically managed via name spaces
+ (\secref{sec:name-space}).
+
+ \end{itemize}
*}
subsection {* Strings of symbols *}
-text {*
- A \emph{symbol} constitutes the smallest textual unit in Isabelle
- --- raw characters are normally not encountered at all. Isabelle
- strings consist of a sequence of symbols, represented as a packed
- string or a list of strings. Each symbol is in itself a small
- string, which has either one of the following forms:
+text {* A \emph{symbol} constitutes the smallest textual unit in
+ Isabelle --- raw ML characters are normally not encountered at all!
+ Isabelle strings consist of a sequence of symbols, represented as a
+ packed string or an exploded list of strings. Each symbol is in
+ itself a small string, which has either one of the following forms:
\begin{enumerate}
- \item a single ASCII character ``@{text "c"}'', for example
- ``\verb,a,'',
+ \item a single ASCII character ``@{text "c"}'' or raw byte in the
+ range of 128\dots 255, for example ``\verb,a,'',
\item a regular symbol ``\verb,\,\verb,<,@{text "ident"}\verb,>,'',
for example ``\verb,\,\verb,<alpha>,'',
@@ -413,7 +537,7 @@
for example ``\verb,\,\verb,<^bold>,'',
\item a raw symbol ``\verb,\,\verb,<^raw:,@{text text}\verb,>,''
- where @{text text} constists of printable characters excluding
+ where @{text text} consists of printable characters excluding
``\verb,.,'' and ``\verb,>,'', for example
``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
@@ -432,22 +556,31 @@
may occur within regular Isabelle identifiers.
Since the character set underlying Isabelle symbols is 7-bit ASCII
- and 8-bit characters are passed through transparently, Isabelle may
- also process Unicode/UCS data in UTF-8 encoding. Unicode provides
- its own collection of mathematical symbols, but there is no built-in
- link to the standard collection of Isabelle.
+ and 8-bit characters are passed through transparently, Isabelle can
+ also process Unicode/UCS data in UTF-8 encoding.\footnote{When
+ counting precise source positions internally, bytes in the range of
+ 128\dots 191 are ignored. In UTF-8 encoding, this interval covers
+ the additional trailer bytes, so Isabelle happens to count Unicode
+ characters here, not bytes in memory. In ISO-Latin encoding, the
+ ignored range merely includes some extra punctuation characters that
+ even have replacements within the standard collection of Isabelle
+ symbols; the accented letters range is counted properly.} Unicode
+ provides its own collection of mathematical symbols, but within the
+ core Isabelle/ML world there is no link to the standard collection
+ of Isabelle regular symbols.
\medskip Output of Isabelle symbols depends on the print mode
(\secref{print-mode}). For example, the standard {\LaTeX} setup of
the Isabelle document preparation system would present
``\verb,\,\verb,<alpha>,'' as @{text "\<alpha>"}, and
``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as @{text
- "\<^bold>\<alpha>"}.
+ "\<^bold>\<alpha>"}. On-screen rendering usually works by mapping a finite
+ subset of Isabelle symbols to suitable Unicode characters.
*}
text %mlref {*
\begin{mldecls}
- @{index_ML_type "Symbol.symbol"} \\
+ @{index_ML_type "Symbol.symbol": string} \\
@{index_ML Symbol.explode: "string -> Symbol.symbol list"} \\
@{index_ML Symbol.is_letter: "Symbol.symbol -> bool"} \\
@{index_ML Symbol.is_digit: "Symbol.symbol -> bool"} \\
@@ -462,12 +595,15 @@
\begin{description}
\item @{ML_type "Symbol.symbol"} represents individual Isabelle
- symbols; this is an alias for @{ML_type "string"}.
+ symbols.
\item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
from the packed form. This function supercedes @{ML
"String.explode"} for virtually all purposes of manipulating text in
- Isabelle!
+ Isabelle!\footnote{The runtime overhead for exploded strings is
+ mainly that of the list structure: individual symbols that happen to
+ be a singleton string --- which is the most common case --- do not
+ require extra memory in Poly/ML.}
\item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
"Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
@@ -483,10 +619,20 @@
symbol into the datatype version.
\end{description}
+
+ \paragraph{Historical note.} In the original SML90 standard the
+ primitive ML type @{ML_type char} did not exists, and the basic @{ML
+ "explode: string -> string list"} operation would produce a list of
+ singleton strings as in Isabelle/ML today. When SML97 came out,
+ Isabelle did not adopt its slightly anachronistic 8-bit characters,
+ but the idea of exploding a string into a list of small strings was
+ extended to ``symbols'' as explained above. Thus Isabelle sources
+ can refer to an infinite store of user-defined symbols, without
+ having to worry about the multitude of Unicode encodings.
*}
-subsection {* Basic names \label{sec:basic-names} *}
+subsection {* Basic names \label{sec:basic-name} *}
text {*
A \emph{basic name} essentially consists of a single Isabelle
@@ -502,8 +648,8 @@
These special versions provide copies of the basic name space, apart
from anything that normally appears in the user text. For example,
system generated variables in Isar proof contexts are usually marked
- as internal, which prevents mysterious name references like @{text
- "xaa"} to appear in the text.
+ as internal, which prevents mysterious names like @{text "xaa"} to
+ appear in human-readable text.
\medskip Manipulating binding scopes often requires on-the-fly
renamings. A \emph{name context} contains a collection of already
@@ -534,6 +680,9 @@
@{index_ML Name.invents: "Name.context -> string -> int -> string list"} \\
@{index_ML Name.variants: "string list -> Name.context -> string list * Name.context"} \\
\end{mldecls}
+ \begin{mldecls}
+ @{index_ML Variable.names_of: "Proof.context -> Name.context"} \\
+ \end{mldecls}
\begin{description}
@@ -555,11 +704,20 @@
\item @{ML Name.variants}~@{text "names context"} produces fresh
variants of @{text "names"}; the result is entered into the context.
+ \item @{ML Variable.names_of}~@{text "ctxt"} retrieves the context
+ of declared type and term variable names. Projecting a proof
+ context down to a primitive name context is occasionally useful when
+ invoking lower-level operations. Regular management of ``fresh
+ variables'' is done by suitable operations of structure @{ML_struct
+ Variable}, which is also able to provide an official status of
+ ``locally fixed variable'' within the logical environment (cf.\
+ \secref{sec:variables}).
+
\end{description}
*}
-subsection {* Indexed names *}
+subsection {* Indexed names \label{sec:indexname} *}
text {*
An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic
@@ -571,9 +729,9 @@
@{text "maxidx + 1"}; the maximum index of an empty collection is
@{text "-1"}.
- Occasionally, basic names and indexed names are injected into the
- same pair type: the (improper) indexname @{text "(x, -1)"} is used
- to encode basic names.
+ Occasionally, basic names are injected into the same pair type of
+ indexed names: then @{text "(x, -1)"} is used to encode the basic
+ name @{text "x"}.
\medskip Isabelle syntax observes the following rules for
representing an indexname @{text "(x, i)"} as a packed string:
@@ -588,11 +746,12 @@
\end{itemize}
- Indexnames may acquire large index numbers over time. Results are
- normalized towards @{text "0"} at certain checkpoints, notably at
- the end of a proof. This works by producing variants of the
- corresponding basic name components. For example, the collection
- @{text "?x1, ?x7, ?x42"} becomes @{text "?x, ?xa, ?xb"}.
+ Indexnames may acquire large index numbers after several maxidx
+ shifts have been applied. Results are usually normalized towards
+ @{text "0"} at certain checkpoints, notably at the end of a proof.
+ This works by producing variants of the corresponding basic name
+ components. For example, the collection @{text "?x1, ?x7, ?x42"}
+ becomes @{text "?x, ?xa, ?xb"}.
*}
text %mlref {*
@@ -605,65 +764,35 @@
\item @{ML_type indexname} represents indexed names. This is an
abbreviation for @{ML_type "string * int"}. The second component is
usually non-negative, except for situations where @{text "(x, -1)"}
- is used to embed basic names into this type.
+ is used to inject basic names into this type. Other negative
+ indexes should not be used.
\end{description}
*}
-subsection {* Qualified names and name spaces *}
+subsection {* Long names \label{sec:long-name} *}
-text {*
- A \emph{qualified name} consists of a non-empty sequence of basic
- name components. The packed representation uses a dot as separator,
- as in ``@{text "A.b.c"}''. The last component is called \emph{base}
- name, the remaining prefix \emph{qualifier} (which may be empty).
- The idea of qualified names is to encode nested structures by
- recording the access paths as qualifiers. For example, an item
- named ``@{text "A.b.c"}'' may be understood as a local entity @{text
- "c"}, within a local structure @{text "b"}, within a global
- structure @{text "A"}. Typically, name space hierarchies consist of
- 1--2 levels of qualification, but this need not be always so.
+text {* A \emph{long name} consists of a sequence of non-empty name
+ components. The packed representation uses a dot as separator, as
+ in ``@{text "A.b.c"}''. The last component is called \emph{base
+ name}, the remaining prefix is called \emph{qualifier} (which may be
+ empty). The qualifier can be understood as the access path to the
+ named entity while passing through some nested block-structure,
+ although our free-form long names do not really enforce any strict
+ discipline.
+
+ For example, an item named ``@{text "A.b.c"}'' may be understood as
+ a local entity @{text "c"}, within a local structure @{text "b"},
+ within a global structure @{text "A"}. In practice, long names
+ usually represent 1--3 levels of qualification. User ML code should
+ not make any assumptions about the particular structure of long
+ names!
The empty name is commonly used as an indication of unnamed
- entities, whenever this makes any sense. The basic operations on
- qualified names are smart enough to pass through such improper names
- unchanged.
-
- \medskip A @{text "naming"} policy tells how to turn a name
- specification into a fully qualified internal name (by the @{text
- "full"} operation), and how fully qualified names may be accessed
- externally. For example, the default naming policy is to prefix an
- implicit path: @{text "full x"} produces @{text "path.x"}, and the
- standard accesses for @{text "path.x"} include both @{text "x"} and
- @{text "path.x"}. Normally, the naming is implicit in the theory or
- proof context; there are separate versions of the corresponding.
-
- \medskip A @{text "name space"} manages a collection of fully
- internalized names, together with a mapping between external names
- and internal names (in both directions). The corresponding @{text
- "intern"} and @{text "extern"} operations are mostly used for
- parsing and printing only! The @{text "declare"} operation augments
- a name space according to the accesses determined by the naming
- policy.
-
- \medskip As a general principle, there is a separate name space for
- each kind of formal entity, e.g.\ logical constant, type
- constructor, type class, theorem. It is usually clear from the
- occurrence in concrete syntax (or from the scope) which kind of
- entity a name refers to. For example, the very same name @{text
- "c"} may be used uniformly for a constant, type constructor, and
- type class.
-
- There are common schemes to name theorems systematically, according
- to the name of the main logical entity involved, e.g.\ @{text
- "c.intro"} for a canonical theorem related to constant @{text "c"}.
- This technique of mapping names from one space into another requires
- some care in order to avoid conflicts. In particular, theorem names
- derived from a type constructor or type class are better suffixed in
- addition to the usual qualification, e.g.\ @{text "c_type.intro"}
- and @{text "c_class.intro"} for theorems related to type @{text "c"}
- and class @{text "c"}, respectively.
+ entities, or entities that are not entered into the corresponding
+ name space, whenever this makes any sense. The basic operations on
+ long names map empty names again to empty names.
*}
text %mlref {*
@@ -674,6 +803,85 @@
@{index_ML Long_Name.implode: "string list -> string"} \\
@{index_ML Long_Name.explode: "string -> string list"} \\
\end{mldecls}
+
+ \begin{description}
+
+ \item @{ML Long_Name.base_name}~@{text "name"} returns the base name
+ of a long name.
+
+ \item @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
+ of a long name.
+
+ \item @{ML Long_Name.append}~@{text "name\<^isub>1 name\<^isub>2"} appends two long
+ names.
+
+ \item @{ML Long_Name.implode}~@{text "names"} and @{ML
+ Long_Name.explode}~@{text "name"} convert between the packed string
+ representation and the explicit list form of long names.
+
+ \end{description}
+*}
+
+
+subsection {* Name spaces \label{sec:name-space} *}
+
+text {* A @{text "name space"} manages a collection of long names,
+ together with a mapping between partially qualified external names
+ and fully qualified internal names (in both directions). Note that
+ the corresponding @{text "intern"} and @{text "extern"} operations
+ are mostly used for parsing and printing only! The @{text
+ "declare"} operation augments a name space according to the accesses
+ determined by a given binding, and a naming policy from the context.
+
+ \medskip A @{text "binding"} specifies details about the prospective
+ long name of a newly introduced formal entity. It consists of a
+ base name, prefixes for qualification (separate ones for system
+ infrastructure and user-space mechanisms), a slot for the original
+ source position, and some additional flags.
+
+ \medskip A @{text "naming"} provides some additional details for
+ producing a long name from a binding. Normally, the naming is
+ implicit in the theory or proof context. The @{text "full"}
+ operation (and its variants for different context types) produces a
+ fully qualified internal name to be entered into a name space. The
+ main equation of this ``chemical reaction'' when binding new
+ entities in a context is as follows:
+
+ \smallskip
+ \begin{tabular}{l}
+ @{text "binding + naming \<longrightarrow> long name + name space accesses"}
+ \end{tabular}
+ \smallskip
+
+ \medskip As a general principle, there is a separate name space for
+ each kind of formal entity, e.g.\ fact, logical constant, type
+ constructor, type class. It is usually clear from the occurrence in
+ concrete syntax (or from the scope) which kind of entity a name
+ refers to. For example, the very same name @{text "c"} may be used
+ uniformly for a constant, type constructor, and type class.
+
+ There are common schemes to name derived entities systematically
+ according to the name of the main logical entity involved, e.g.\
+ fact @{text "c.intro"} for a canonical introduction rule related to
+ constant @{text "c"}. This technique of mapping names from one
+ space into another requires some care in order to avoid conflicts.
+ In particular, theorem names derived from a type constructor or type
+ class are better suffixed in addition to the usual qualification,
+ e.g.\ @{text "c_type.intro"} and @{text "c_class.intro"} for
+ theorems related to type @{text "c"} and class @{text "c"},
+ respectively.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type binding} \\
+ @{index_ML Binding.empty: binding} \\
+ @{index_ML Binding.name: "string -> binding"} \\
+ @{index_ML Binding.qualify: "bool -> string -> binding -> binding"} \\
+ @{index_ML Binding.prefix: "bool -> string -> binding -> binding"} \\
+ @{index_ML Binding.conceal: "binding -> binding"} \\
+ @{index_ML Binding.str_of: "binding -> string"} \\
+ \end{mldecls}
\begin{mldecls}
@{index_ML_type Name_Space.naming} \\
@{index_ML Name_Space.default_naming: Name_Space.naming} \\
@@ -688,22 +896,41 @@
string * Name_Space.T"} \\
@{index_ML Name_Space.intern: "Name_Space.T -> string -> string"} \\
@{index_ML Name_Space.extern: "Name_Space.T -> string -> string"} \\
+ @{index_ML Name_Space.is_concealed: "Name_Space.T -> string -> bool"}
\end{mldecls}
\begin{description}
- \item @{ML Long_Name.base_name}~@{text "name"} returns the base name of a
- qualified name.
+ \item @{ML_type binding} represents the abstract concept of name
+ bindings.
+
+ \item @{ML Binding.empty} is the empty binding.
- \item @{ML Long_Name.qualifier}~@{text "name"} returns the qualifier
- of a qualified name.
+ \item @{ML Binding.name}~@{text "name"} produces a binding with base
+ name @{text "name"}.
+
+ \item @{ML Binding.qualify}~@{text "mandatory name binding"}
+ prefixes qualifier @{text "name"} to @{text "binding"}. The @{text
+ "mandatory"} flag tells if this name component always needs to be
+ given in name space accesses --- this is mostly @{text "false"} in
+ practice. Note that this part of qualification is typically used in
+ derived specification mechanisms.
- \item @{ML Long_Name.append}~@{text "name\<^isub>1 name\<^isub>2"}
- appends two qualified names.
+ \item @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but
+ affects the system prefix. This part of extra qualification is
+ typically used in the infrastructure for modular specifications,
+ notably ``local theory targets'' (see also \chref{ch:local-theory}).
- \item @{ML Long_Name.implode}~@{text "names"} and @{ML
- Long_Name.explode}~@{text "name"} convert between the packed string
- representation and the explicit list form of qualified names.
+ \item @{ML Binding.conceal}~@{text "binding"} indicates that the
+ binding shall refer to an entity that serves foundational purposes
+ only. This flag helps to mark implementation details of
+ specification mechanism etc. Other tools should not depend on the
+ particulars of concealed entities (cf.\ @{ML
+ Name_Space.is_concealed}).
+
+ \item @{ML Binding.str_of}~@{text "binding"} produces a string
+ representation for human-readable output, together with some formal
+ markup that might get used in GUI front-ends, for example.
\item @{ML_type Name_Space.naming} represents the abstract concept of
a naming policy.
@@ -747,6 +974,10 @@
This operation is mostly for printing! User code should not rely on
the precise result too much.
+ \item @{ML Name_Space.is_concealed}~@{text "space name"} indicates
+ whether @{text "name"} refers to a strictly private entity that
+ other tools are supposed to ignore!
+
\end{description}
*}
--- a/doc-src/IsarImplementation/Thy/Proof.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Proof.thy Tue Feb 09 17:06:05 2010 +0100
@@ -25,10 +25,10 @@
categories for \emph{fixed variables} (e.g.\ @{text "x"}) vs.\
\emph{schematic variables} (e.g.\ @{text "?x"}). Incidently, a
universal result @{text "\<turnstile> \<And>x. B(x)"} has the HHF normal form @{text
- "\<turnstile> B(?x)"}, which represents its generality nicely without requiring
- an explicit quantifier. The same principle works for type
- variables: @{text "\<turnstile> B(?\<alpha>)"} represents the idea of ``@{text "\<turnstile>
- \<forall>\<alpha>. B(\<alpha>)"}'' without demanding a truly polymorphic framework.
+ "\<turnstile> B(?x)"}, which represents its generality without requiring an
+ explicit quantifier. The same principle works for type variables:
+ @{text "\<turnstile> B(?\<alpha>)"} represents the idea of ``@{text "\<turnstile> \<forall>\<alpha>. B(\<alpha>)"}''
+ without demanding a truly polymorphic framework.
\medskip Additional care is required to treat type variables in a
way that facilitates type-inference. In principle, term variables
@@ -95,7 +95,8 @@
@{index_ML Variable.polymorphic: "Proof.context -> term list -> term list"} \\
@{index_ML Variable.import: "bool -> thm list -> Proof.context ->
(((ctyp * ctyp) list * (cterm * cterm) list) * thm list) * Proof.context"} \\
- @{index_ML Variable.focus: "cterm -> Proof.context -> ((string * cterm) list * cterm) * Proof.context"} \\
+ @{index_ML Variable.focus: "cterm -> Proof.context ->
+ ((string * cterm) list * cterm) * Proof.context"} \\
\end{mldecls}
\begin{description}
@@ -144,6 +145,72 @@
\end{description}
*}
+text %mlex {* The following example (in theory @{theory Pure}) shows
+ how to work with fixed term and type parameters work with
+ type-inference.
+*}
+
+typedecl foo -- {* some basic type for testing purposes *}
+
+ML {*
+ (*static compile-time context -- for testing only*)
+ val ctxt0 = @{context};
+
+ (*locally fixed parameters -- no type assignment yet*)
+ val ([x, y], ctxt1) = ctxt0 |> Variable.add_fixes ["x", "y"];
+
+ (*t1: most general fixed type; t1': most general arbitrary type*)
+ val t1 = Syntax.read_term ctxt1 "x";
+ val t1' = singleton (Variable.polymorphic ctxt1) t1;
+
+ (*term u enforces specific type assignment*)
+ val u = Syntax.read_term ctxt1 "(x::foo) \<equiv> y";
+
+ (*official declaration of u -- propagates constraints etc.*)
+ val ctxt2 = ctxt1 |> Variable.declare_term u;
+ val t2 = Syntax.read_term ctxt2 "x"; (*x::foo is enforced*)
+*}
+
+text {* In the above example, the starting context had been derived
+ from the toplevel theory, which means that fixed variables are
+ internalized literally: @{verbatim "x"} is mapped again to
+ @{verbatim "x"}, and attempting to fix it again in the subsequent
+ context is an error. Alternatively, fixed parameters can be renamed
+ explicitly as follows:
+*}
+
+ML {*
+ val ctxt0 = @{context};
+ val ([x1, x2, x3], ctxt1) =
+ ctxt0 |> Variable.variant_fixes ["x", "x", "x"];
+*}
+
+text {* \noindent Subsequent ML code can now work with the invented
+ names of @{verbatim x1}, @{verbatim x2}, @{verbatim x3}, without
+ depending on the details on the system policy for introducing these
+ variants. Recall that within a proof body the system always invents
+ fresh ``skolem constants'', e.g.\ as follows:
+*}
+
+lemma "PROP XXX"
+proof -
+ ML_prf %"ML" {*
+ val ctxt0 = @{context};
+
+ val ([x1], ctxt1) = ctxt0 |> Variable.add_fixes ["x"];
+ val ([x2], ctxt2) = ctxt1 |> Variable.add_fixes ["x"];
+ val ([x3], ctxt3) = ctxt2 |> Variable.add_fixes ["x"];
+
+ val ([y1, y2], ctxt4) =
+ ctxt3 |> Variable.variant_fixes ["y", "y"];
+ *}
+ oops
+
+text {* \noindent In this situation @{ML Variable.add_fixes} and @{ML
+ Variable.variant_fixes} are very similar, but identical name
+ proposals given in a row are only accepted by the second version.
+*}
+
section {* Assumptions \label{sec:assumptions} *}
@@ -171,21 +238,21 @@
context into a (smaller) outer context, by discharging the
difference of the assumptions as specified by the associated export
rules. Note that the discharged portion is determined by the
- difference contexts, not the facts being exported! There is a
+ difference of contexts, not the facts being exported! There is a
separate flag to indicate a goal context, where the result is meant
to refine an enclosing sub-goal of a structured proof state.
\medskip The most basic export rule discharges assumptions directly
by means of the @{text "\<Longrightarrow>"} introduction rule:
\[
- \infer[(@{text "\<Longrightarrow>_intro"})]{@{text "\<Gamma> \\ A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \infer[(@{text "\<Longrightarrow>\<dash>intro"})]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
\]
The variant for goal refinements marks the newly introduced
premises, which causes the canonical Isar goal refinement scheme to
enforce unification with local premises within the goal:
\[
- \infer[(@{text "#\<Longrightarrow>_intro"})]{@{text "\<Gamma> \\ A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \infer[(@{text "#\<Longrightarrow>\<dash>intro"})]{@{text "\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
\]
\medskip Alternative versions of assumptions may perform arbitrary
@@ -194,7 +261,7 @@
definition works by fixing @{text "x"} and assuming @{text "x \<equiv> t"},
with the following export rule to reverse the effect:
\[
- \infer[(@{text "\<equiv>-expand"})]{@{text "\<Gamma> \\ x \<equiv> t \<turnstile> B t"}}{@{text "\<Gamma> \<turnstile> B x"}}
+ \infer[(@{text "\<equiv>\<dash>expand"})]{@{text "\<Gamma> - (x \<equiv> t) \<turnstile> B t"}}{@{text "\<Gamma> \<turnstile> B x"}}
\]
This works, because the assumption @{text "x \<equiv> t"} was introduced in
a context with @{text "x"} being fresh, so @{text "x"} does not
@@ -222,8 +289,8 @@
simultaneously.
\item @{ML Assumption.assume}~@{text "A"} turns proposition @{text
- "A"} into a raw assumption @{text "A \<turnstile> A'"}, where the conclusion
- @{text "A'"} is in HHF normal form.
+ "A"} into a primitive assumption @{text "A \<turnstile> A'"}, where the
+ conclusion @{text "A'"} is in HHF normal form.
\item @{ML Assumption.add_assms}~@{text "r As"} augments the context
by assumptions @{text "As"} with export rule @{text "r"}. The
@@ -232,7 +299,8 @@
\item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
@{ML Assumption.add_assms} where the export rule performs @{text
- "\<Longrightarrow>_intro"} or @{text "#\<Longrightarrow>_intro"}, depending on goal mode.
+ "\<Longrightarrow>\<dash>intro"} or @{text "#\<Longrightarrow>\<dash>intro"}, depending on goal
+ mode.
\item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
exports result @{text "thm"} from the the @{text "inner"} context
@@ -244,8 +312,32 @@
\end{description}
*}
+text %mlex {* The following example demonstrates how rules can be
+ derived by building up a context of assumptions first, and exporting
+ some local fact afterwards. We refer to @{theory Pure} equality
+ here for testing purposes.
+*}
-section {* Results \label{sec:results} *}
+ML {*
+ (*static compile-time context -- for testing only*)
+ val ctxt0 = @{context};
+
+ val ([eq], ctxt1) =
+ ctxt0 |> Assumption.add_assumes [@{cprop "x \<equiv> y"}];
+ val eq' = Thm.symmetric eq;
+
+ (*back to original context -- discharges assumption*)
+ val r = Assumption.export false ctxt1 ctxt0 eq';
+*}
+
+text {* \noindent Note that the variables of the resulting rule are
+ not generalized. This would have required to fix them properly in
+ the context beforehand, and export wrt.\ variables afterwards (cf.\
+ @{ML Variable.export} or the combined @{ML "ProofContext.export"}).
+*}
+
+
+section {* Structured goals and results \label{sec:struct-goals} *}
text {*
Local results are established by monotonic reasoning from facts
@@ -263,6 +355,10 @@
the tactic needs to solve the conclusion, but may use the premise as
a local fact, for locally fixed variables.
+ The family of @{text "FOCUS"} combinators is similar to @{text
+ "SUBPROOF"}, but allows to retain schematic variables and pending
+ subgoals in the resulting goal state.
+
The @{text "prove"} operation provides an interface for structured
backwards reasoning under program control, with some explicit sanity
checks of the result. The goal context can be augmented by
@@ -275,7 +371,8 @@
The @{text "obtain"} operation produces results by eliminating
existing facts by means of a given tactic. This acts like a dual
conclusion: the proof demonstrates that the context may be augmented
- by certain fixed variables and assumptions. See also
+ by parameters and assumptions, without affecting any conclusions
+ that do not mention these parameters. See also
\cite{isabelle-isar-ref} for the user-level @{text "\<OBTAIN>"} and
@{text "\<GUESS>"} elements. Final results, which may not refer to
the parameters in the conclusion, need to exported explicitly into
@@ -285,7 +382,11 @@
text %mlref {*
\begin{mldecls}
@{index_ML SUBPROOF: "(Subgoal.focus -> tactic) -> Proof.context -> int -> tactic"} \\
+ @{index_ML Subgoal.FOCUS: "(Subgoal.focus -> tactic) -> Proof.context -> int -> tactic"} \\
+ @{index_ML Subgoal.FOCUS_PREMS: "(Subgoal.focus -> tactic) -> Proof.context -> int -> tactic"} \\
+ @{index_ML Subgoal.FOCUS_PARAMS: "(Subgoal.focus -> tactic) -> Proof.context -> int -> tactic"} \\
\end{mldecls}
+
\begin{mldecls}
@{index_ML Goal.prove: "Proof.context -> string list -> term list -> term ->
({prems: thm list, context: Proof.context} -> tactic) -> thm"} \\
@@ -305,6 +406,12 @@
schematic parameters of the goal are imported into the context as
fixed ones, which may not be instantiated in the sub-proof.
+ \item @{ML Subgoal.FOCUS}, @{ML Subgoal.FOCUS_PREMS}, and @{ML
+ Subgoal.FOCUS_PARAMS} are similar to @{ML SUBPROOF}, but are
+ slightly more flexible: only the specified parts of the subgoal are
+ imported into the context, and the body tactic may introduce new
+ subgoals and schematic variables.
+
\item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
"C"} in the context augmented by fixed variables @{text "xs"} and
assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
--- a/doc-src/IsarImplementation/Thy/Syntax.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Syntax.thy Tue Feb 09 17:06:05 2010 +0100
@@ -2,8 +2,14 @@
imports Base
begin
-chapter {* Syntax and type-checking *}
+chapter {* Concrete syntax and type-checking *}
text FIXME
+section {* Parsing and printing *}
+
+text FIXME
+
+section {* Checking and unchecking \label{sec:term-check} *}
+
end
--- a/doc-src/IsarImplementation/Thy/Tactic.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/Tactic.thy Tue Feb 09 17:06:05 2010 +0100
@@ -4,15 +4,13 @@
chapter {* Tactical reasoning *}
-text {*
- Tactical reasoning works by refining the initial claim in a
+text {* Tactical reasoning works by refining an initial claim in a
backwards fashion, until a solved form is reached. A @{text "goal"}
consists of several subgoals that need to be solved in order to
achieve the main statement; zero subgoals means that the proof may
be finished. A @{text "tactic"} is a refinement operation that maps
a goal to a lazy sequence of potential successors. A @{text
- "tactical"} is a combinator for composing tactics.
-*}
+ "tactical"} is a combinator for composing tactics. *}
section {* Goals \label{sec:tactical-goals} *}
@@ -40,8 +38,8 @@
The main conclusion @{text C} is internally marked as a protected
proposition, which is represented explicitly by the notation @{text
- "#C"}. This ensures that the decomposition into subgoals and main
- conclusion is well-defined for arbitrarily structured claims.
+ "#C"} here. This ensures that the decomposition into subgoals and
+ main conclusion is well-defined for arbitrarily structured claims.
\medskip Basic goal management is performed via the following
Isabelle/Pure rules:
@@ -98,26 +96,27 @@
supporting memoing.\footnote{The lack of memoing and the strict
nature of SML requires some care when working with low-level
sequence operations, to avoid duplicate or premature evaluation of
- results.}
+ results. It also means that modified runtime behavior, such as
+ timeout, is very hard to achieve for general tactics.}
An \emph{empty result sequence} means that the tactic has failed: in
- a compound tactic expressions other tactics might be tried instead,
+ a compound tactic expression other tactics might be tried instead,
or the whole refinement step might fail outright, producing a
- toplevel error message. When implementing tactics from scratch, one
- should take care to observe the basic protocol of mapping regular
- error conditions to an empty result; only serious faults should
- emerge as exceptions.
+ toplevel error message in the end. When implementing tactics from
+ scratch, one should take care to observe the basic protocol of
+ mapping regular error conditions to an empty result; only serious
+ faults should emerge as exceptions.
By enumerating \emph{multiple results}, a tactic can easily express
the potential outcome of an internal search process. There are also
combinators for building proof tools that involve search
systematically, see also \secref{sec:tacticals}.
- \medskip As explained in \secref{sec:tactical-goals}, a goal state
- essentially consists of a list of subgoals that imply the main goal
- (conclusion). Tactics may operate on all subgoals or on a
- particularly specified subgoal, but must not change the main
- conclusion (apart from instantiating schematic goal variables).
+ \medskip As explained before, a goal state essentially consists of a
+ list of subgoals that imply the main goal (conclusion). Tactics may
+ operate on all subgoals or on a particularly specified subgoal, but
+ must not change the main conclusion (apart from instantiating
+ schematic goal variables).
Tactics with explicit \emph{subgoal addressing} are of the form
@{text "int \<rightarrow> tactic"} and may be applied to a particular subgoal
@@ -139,7 +138,7 @@
Tactics with internal subgoal addressing should expose the subgoal
index as @{text "int"} argument in full generality; a hardwired
- subgoal 1 inappropriate.
+ subgoal 1 is not acceptable.
\medskip The main well-formedness conditions for proper tactics are
summarized as follows.
@@ -161,11 +160,11 @@
\end{itemize}
Some of these conditions are checked by higher-level goal
- infrastructure (\secref{sec:results}); others are not checked
+ infrastructure (\secref{sec:struct-goals}); others are not checked
explicitly, and violating them merely results in ill-behaved tactics
experienced by the user (e.g.\ tactics that insist in being
- applicable only to singleton goals, or disallow composition with
- basic tacticals).
+ applicable only to singleton goals, or prevent composition via
+ standard tacticals).
*}
text %mlref {*
@@ -356,7 +355,7 @@
"(?'a, \<tau>)"}. Type instantiations are distinguished from term
instantiations by the syntactic form of the schematic variable.
Types are instantiated before terms are. Since term instantiation
- already performs type-inference as expected, explicit type
+ already performs simple type-inference, so explicit type
instantiations are seldom necessary.
*}
@@ -389,6 +388,12 @@
names} (which need to be distinct indentifiers).
\end{description}
+
+ For historical reasons, the above instantiation tactics take
+ unparsed string arguments, which makes them hard to use in general
+ ML code. The slightly more advanced @{ML Subgoal.FOCUS} combinator
+ of \secref{sec:struct-goals} allows to refer to internal goal
+ structure with explicit context management.
*}
--- a/doc-src/IsarImplementation/Thy/document/Integration.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Integration.tex Tue Feb 09 17:06:05 2010 +0100
@@ -106,8 +106,9 @@
\item \verb|Toplevel.is_toplevel|~\isa{state} checks for an empty
toplevel state.
- \item \verb|Toplevel.theory_of|~\isa{state} selects the theory of
- a theory or proof (!), otherwise raises \verb|Toplevel.UNDEF|.
+ \item \verb|Toplevel.theory_of|~\isa{state} selects the
+ background theory of \isa{state}, raises \verb|Toplevel.UNDEF|
+ for an empty toplevel state.
\item \verb|Toplevel.proof_of|~\isa{state} selects the Isar proof
state if available, otherwise raises \verb|Toplevel.UNDEF|.
@@ -150,16 +151,16 @@
The operational part is represented as the sequential union of a
list of partial functions, which are tried in turn until the first
one succeeds. This acts like an outer case-expression for various
- alternative state transitions. For example, \isakeyword{qed} acts
+ alternative state transitions. For example, \isakeyword{qed} works
differently for a local proofs vs.\ the global ending of the main
proof.
Toplevel transitions are composed via transition transformers.
Internally, Isar commands are put together from an empty transition
- extended by name and source position (and optional source text). It
- is then left to the individual command parser to turn the given
- concrete syntax into a suitable transition transformer that adjoins
- actual operations on a theory or proof state etc.%
+ extended by name and source position. It is then left to the
+ individual command parser to turn the given concrete syntax into a
+ suitable transition transformer that adjoins actual operations on a
+ theory or proof state etc.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -240,7 +241,7 @@
the toplevel itself, and only make sense in interactive mode. Under
normal circumstances, the user encounters these only implicitly as
part of the protocol between the Isabelle/Isar system and a
- user-interface such as ProofGeneral.
+ user-interface such as Proof~General.
\begin{description}
@@ -391,9 +392,7 @@
associated with each theory, by declaring these dependencies in the
theory header as \isa{{\isasymUSES}}, and loading them consecutively
within the theory context. The system keeps track of incoming {\ML}
- sources and associates them with the current theory. The file
- \isa{A}\verb,.ML, is loaded after a theory has been concluded, in
- order to support legacy proof {\ML} proof scripts.
+ sources and associates them with the current theory.
The basic internal actions of the theory database are \isa{update}, \isa{outdate}, and \isa{remove}:
@@ -458,12 +457,14 @@
\item \verb|use_thy|~\isa{A} ensures that theory \isa{A} is fully
up-to-date wrt.\ the external file store, reloading outdated
- ancestors as required.
+ ancestors as required. In batch mode, the simultaneous \verb|use_thys| should be used exclusively.
\item \verb|use_thys| is similar to \verb|use_thy|, but handles
several theories simultaneously. Thus it acts like processing the
import header of a theory, without performing the merge of the
- result, though.
+ result. By loading a whole sub-graph of theories like that, the
+ intrinsic parallelism can be exploited by the system, to speedup
+ loading.
\item \verb|ThyInfo.touch_thy|~\isa{A} performs and \isa{outdate} action
on theory \isa{A} and all descendants.
@@ -472,7 +473,7 @@
descendants from the theory database.
\item \verb|ThyInfo.begin_theory| is the basic operation behind a
- \isa{{\isasymTHEORY}} header declaration. This is {\ML} functions is
+ \isa{{\isasymTHEORY}} header declaration. This {\ML} function is
normally not invoked directly.
\item \verb|ThyInfo.end_theory| concludes the loading of a theory
--- a/doc-src/IsarImplementation/Thy/document/Local_Theory.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Local_Theory.tex Tue Feb 09 17:06:05 2010 +0100
@@ -18,7 +18,7 @@
%
\endisadelimtheory
%
-\isamarkupchapter{Local theory specifications%
+\isamarkupchapter{Local theory specifications \label{ch:local-theory}%
}
\isamarkuptrue%
%
--- a/doc-src/IsarImplementation/Thy/document/Logic.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Logic.tex Tue Feb 09 17:06:05 2010 +0100
@@ -39,7 +39,9 @@
schematic polymorphism, which is strictly speaking outside the
logic.\footnote{This is the deeper logical reason, why the theory
context \isa{{\isasymTheta}} is separate from the proof context \isa{{\isasymGamma}}
- of the core calculus.}%
+ of the core calculus: type constructors, term constants, and facts
+ (proof constants) may involve arbitrary type schemes, but the type
+ of a locally fixed term parameter is also fixed!}%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -57,16 +59,15 @@
internally. The resulting relation is an ordering: reflexive,
transitive, and antisymmetric.
- A \emph{sort} is a list of type classes written as \isa{s\ {\isacharequal}\ {\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub m{\isacharbraceright}}, which represents symbolic
- intersection. Notationally, the curly braces are omitted for
- singleton intersections, i.e.\ any class \isa{c} may be read as
- a sort \isa{{\isacharbraceleft}c{\isacharbraceright}}. The ordering on type classes is extended to
- sorts according to the meaning of intersections: \isa{{\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}\ c\isactrlisub m{\isacharbraceright}\ {\isasymsubseteq}\ {\isacharbraceleft}d\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ d\isactrlisub n{\isacharbraceright}} iff
- \isa{{\isasymforall}j{\isachardot}\ {\isasymexists}i{\isachardot}\ c\isactrlisub i\ {\isasymsubseteq}\ d\isactrlisub j}. The empty intersection
- \isa{{\isacharbraceleft}{\isacharbraceright}} refers to the universal sort, which is the largest
- element wrt.\ the sort order. The intersections of all (finitely
- many) classes declared in the current theory are the minimal
- elements wrt.\ the sort order.
+ A \emph{sort} is a list of type classes written as \isa{s\ {\isacharequal}\ {\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub m{\isacharbraceright}}, it represents symbolic intersection. Notationally, the
+ curly braces are omitted for singleton intersections, i.e.\ any
+ class \isa{c} may be read as a sort \isa{{\isacharbraceleft}c{\isacharbraceright}}. The ordering
+ on type classes is extended to sorts according to the meaning of
+ intersections: \isa{{\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}\ c\isactrlisub m{\isacharbraceright}\ {\isasymsubseteq}\ {\isacharbraceleft}d\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ d\isactrlisub n{\isacharbraceright}} iff \isa{{\isasymforall}j{\isachardot}\ {\isasymexists}i{\isachardot}\ c\isactrlisub i\ {\isasymsubseteq}\ d\isactrlisub j}. The empty intersection \isa{{\isacharbraceleft}{\isacharbraceright}} refers to
+ the universal sort, which is the largest element wrt.\ the sort
+ order. Thus \isa{{\isacharbraceleft}{\isacharbraceright}} represents the ``full sort'', not the
+ empty one! The intersection of all (finitely many) classes declared
+ in the current theory is the least element wrt.\ the sort ordering.
\medskip A \emph{fixed type variable} is a pair of a basic name
(starting with a \isa{{\isacharprime}} character) and a sort constraint, e.g.\
@@ -76,10 +77,10 @@
printed as \isa{{\isacharquery}{\isasymalpha}\isactrlisub s}.
Note that \emph{all} syntactic components contribute to the identity
- of type variables, including the sort constraint. The core logic
- handles type variables with the same name but different sorts as
- different, although some outer layers of the system make it hard to
- produce anything like this.
+ of type variables: basic name, index, and sort constraint. The core
+ logic handles type variables with the same name but different sorts
+ as different, although the type-inference layer (which is outside
+ the core) rejects anything like that.
A \emph{type constructor} \isa{{\isasymkappa}} is a \isa{k}-ary operator
on types declared in the theory. Type constructor application is
@@ -90,8 +91,8 @@
Further notation is provided for specific constructors, notably the
right-associative infix \isa{{\isasymalpha}\ {\isasymRightarrow}\ {\isasymbeta}} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharcomma}\ {\isasymbeta}{\isacharparenright}fun}.
- A \emph{type} is defined inductively over type variables and type
- constructors as follows: \isa{{\isasymtau}\ {\isacharequal}\ {\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharquery}{\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharparenleft}{\isasymtau}\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlsub k{\isacharparenright}{\isasymkappa}}.
+ The logical category \emph{type} is defined inductively over type
+ variables and type constructors as follows: \isa{{\isasymtau}\ {\isacharequal}\ {\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharquery}{\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharparenleft}{\isasymtau}\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlsub k{\isacharparenright}{\isasymkappa}}.
A \emph{type abbreviation} is a syntactic definition \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} of an arbitrary type expression \isa{{\isasymtau}} over
variables \isa{\isactrlvec {\isasymalpha}}. Type abbreviations appear as type
@@ -127,9 +128,9 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexdef{}{ML type}{class}\verb|type class| \\
- \indexdef{}{ML type}{sort}\verb|type sort| \\
- \indexdef{}{ML type}{arity}\verb|type arity| \\
+ \indexdef{}{ML type}{class}\verb|type class = string| \\
+ \indexdef{}{ML type}{sort}\verb|type sort = class list| \\
+ \indexdef{}{ML type}{arity}\verb|type arity = string * sort list * sort| \\
\indexdef{}{ML type}{typ}\verb|type typ| \\
\indexdef{}{ML}{map\_atyps}\verb|map_atyps: (typ -> typ) -> typ -> typ| \\
\indexdef{}{ML}{fold\_atyps}\verb|fold_atyps: (typ -> 'a -> 'a) -> typ -> 'a -> 'a| \\
@@ -147,14 +148,14 @@
\begin{description}
- \item \verb|class| represents type classes; this is an alias for
- \verb|string|.
+ \item \verb|class| represents type classes.
- \item \verb|sort| represents sorts; this is an alias for
- \verb|class list|.
+ \item \verb|sort| represents sorts, i.e.\ finite intersections
+ of classes. The empty list \verb|[]: sort| refers to the empty
+ class intersection, i.e.\ the ``full sort''.
- \item \verb|arity| represents type arities; this is an alias for
- triples of the form \isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} for \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s} described above.
+ \item \verb|arity| represents type arities. A triple \isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}\ {\isacharcolon}\ arity} represents \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s} as
+ described above.
\item \verb|typ| represents types; this is a datatype with
constructors \verb|TFree|, \verb|TVar|, \verb|Type|.
@@ -207,13 +208,13 @@
with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
or \cite{paulson-ml2}), with the types being determined by the
corresponding binders. In contrast, free variables and constants
- are have an explicit name and type in each occurrence.
+ have an explicit name and type in each occurrence.
\medskip A \emph{bound variable} is a natural number \isa{b},
which accounts for the number of intermediate binders between the
variable occurrence in the body and its binding position. For
- example, the de-Bruijn term \isa{{\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isadigit{1}}\ {\isacharplus}\ {\isadigit{0}}} would
- correspond to \isa{{\isasymlambda}x\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}y\isactrlbsub nat\isactrlesub {\isachardot}\ x\ {\isacharplus}\ y} in a named
+ example, the de-Bruijn term \isa{{\isasymlambda}\isactrlbsub bool\isactrlesub {\isachardot}\ {\isasymlambda}\isactrlbsub bool\isactrlesub {\isachardot}\ {\isadigit{1}}\ {\isasymand}\ {\isadigit{0}}} would
+ correspond to \isa{{\isasymlambda}x\isactrlbsub bool\isactrlesub {\isachardot}\ {\isasymlambda}y\isactrlbsub bool\isactrlesub {\isachardot}\ x\ {\isasymand}\ y} in a named
representation. Note that a bound variable may be represented by
different de-Bruijn indices at different occurrences, depending on
the nesting of abstractions.
@@ -225,19 +226,23 @@
without any loose variables.
A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
- \isa{{\isacharparenleft}x{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed \isa{x\isactrlisub {\isasymtau}}. A
+ \isa{{\isacharparenleft}x{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed \isa{x\isactrlisub {\isasymtau}} here. A
\emph{schematic variable} is a pair of an indexname and a type,
- e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}x{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{{\isacharquery}x\isactrlisub {\isasymtau}}.
+ e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}x{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is likewise printed as \isa{{\isacharquery}x\isactrlisub {\isasymtau}}.
\medskip A \emph{constant} is a pair of a basic name and a type,
- e.g.\ \isa{{\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{c\isactrlisub {\isasymtau}}. Constants are declared in the context as polymorphic
- families \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}, meaning that all substitution instances
- \isa{c\isactrlisub {\isasymtau}} for \isa{{\isasymtau}\ {\isacharequal}\ {\isasymsigma}{\isasymvartheta}} are valid.
+ e.g.\ \isa{{\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{c\isactrlisub {\isasymtau}}
+ here. Constants are declared in the context as polymorphic families
+ \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}, meaning that all substitution instances \isa{c\isactrlisub {\isasymtau}} for \isa{{\isasymtau}\ {\isacharequal}\ {\isasymsigma}{\isasymvartheta}} are valid.
- The vector of \emph{type arguments} of constant \isa{c\isactrlisub {\isasymtau}}
- wrt.\ the declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is defined as the codomain of
- the matcher \isa{{\isasymvartheta}\ {\isacharequal}\ {\isacharbraceleft}{\isacharquery}{\isasymalpha}\isactrlisub {\isadigit{1}}\ {\isasymmapsto}\ {\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isacharquery}{\isasymalpha}\isactrlisub n\ {\isasymmapsto}\ {\isasymtau}\isactrlisub n{\isacharbraceright}} presented in canonical order \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}}. Within a given theory context,
- there is a one-to-one correspondence between any constant \isa{c\isactrlisub {\isasymtau}} and the application \isa{c{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}} of its type arguments. For example, with \isa{plus\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}}, the instance \isa{plus\isactrlbsub nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ nat\isactrlesub } corresponds to \isa{plus{\isacharparenleft}nat{\isacharparenright}}.
+ The vector of \emph{type arguments} of constant \isa{c\isactrlisub {\isasymtau}} wrt.\
+ the declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is defined as the codomain of the
+ matcher \isa{{\isasymvartheta}\ {\isacharequal}\ {\isacharbraceleft}{\isacharquery}{\isasymalpha}\isactrlisub {\isadigit{1}}\ {\isasymmapsto}\ {\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isacharquery}{\isasymalpha}\isactrlisub n\ {\isasymmapsto}\ {\isasymtau}\isactrlisub n{\isacharbraceright}} presented in
+ canonical order \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}}, corresponding to the
+ left-to-right occurrences of the \isa{{\isasymalpha}\isactrlisub i} in \isa{{\isasymsigma}}.
+ Within a given theory context, there is a one-to-one correspondence
+ between any constant \isa{c\isactrlisub {\isasymtau}} and the application \isa{c{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}} of its type arguments. For example, with \isa{plus\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}}, the instance \isa{plus\isactrlbsub nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ nat\isactrlesub } corresponds to
+ \isa{plus{\isacharparenleft}nat{\isacharparenright}}.
Constant declarations \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} may contain sort constraints
for type variables in \isa{{\isasymsigma}}. These are observed by
@@ -246,13 +251,12 @@
polymorphic constants that the user-level type-checker would reject
due to violation of type class restrictions.
- \medskip An \emph{atomic} term is either a variable or constant. A
- \emph{term} is defined inductively over atomic terms, with
- abstraction and application as follows: \isa{t\ {\isacharequal}\ b\ {\isacharbar}\ x\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isacharquery}x\isactrlisub {\isasymtau}\ {\isacharbar}\ c\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isasymlambda}\isactrlisub {\isasymtau}{\isachardot}\ t\ {\isacharbar}\ t\isactrlisub {\isadigit{1}}\ t\isactrlisub {\isadigit{2}}}.
- Parsing and printing takes care of converting between an external
- representation with named bound variables. Subsequently, we shall
- use the latter notation instead of internal de-Bruijn
- representation.
+ \medskip An \emph{atomic} term is either a variable or constant.
+ The logical category \emph{term} is defined inductively over atomic
+ terms, with abstraction and application as follows: \isa{t\ {\isacharequal}\ b\ {\isacharbar}\ x\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isacharquery}x\isactrlisub {\isasymtau}\ {\isacharbar}\ c\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isasymlambda}\isactrlisub {\isasymtau}{\isachardot}\ t\ {\isacharbar}\ t\isactrlisub {\isadigit{1}}\ t\isactrlisub {\isadigit{2}}}. Parsing and printing takes care of
+ converting between an external representation with named bound
+ variables. Subsequently, we shall use the latter notation instead
+ of internal de-Bruijn representation.
The inductive relation \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} assigns a (unique) type to a
term according to the structure of atomic terms, abstractions, and
@@ -273,16 +277,17 @@
variables, and declarations for polymorphic constants.
The identity of atomic terms consists both of the name and the type
- component. This means that different variables \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{1}}\isactrlesub } and \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{2}}\isactrlesub } may become the same after type
- instantiation. Some outer layers of the system make it hard to
- produce variables of the same name, but different types. In
- contrast, mixed instances of polymorphic constants occur frequently.
+ component. This means that different variables \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{1}}\isactrlesub } and \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{2}}\isactrlesub } may become the same after
+ type instantiation. Type-inference rejects variables of the same
+ name, but different types. In contrast, mixed instances of
+ polymorphic constants occur routinely.
\medskip The \emph{hidden polymorphism} of a term \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}
is the set of type variables occurring in \isa{t}, but not in
- \isa{{\isasymsigma}}. This means that the term implicitly depends on type
- arguments that are not accounted in the result type, i.e.\ there are
- different type instances \isa{t{\isasymvartheta}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} and \isa{t{\isasymvartheta}{\isacharprime}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with the same type. This slightly
+ its type \isa{{\isasymsigma}}. This means that the term implicitly depends
+ on type arguments that are not accounted in the result type, i.e.\
+ there are different type instances \isa{t{\isasymvartheta}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} and
+ \isa{t{\isasymvartheta}{\isacharprime}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with the same type. This slightly
pathological situation notoriously demands additional care.
\medskip A \emph{term abbreviation} is a syntactic definition \isa{c\isactrlisub {\isasymsigma}\ {\isasymequiv}\ t} of a closed term \isa{t} of type \isa{{\isasymsigma}},
@@ -435,14 +440,14 @@
\infer[\isa{{\isacharparenleft}assume{\isacharparenright}}]{\isa{A\ {\isasymturnstile}\ A}}{}
\]
\[
- \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}}
+ \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isasymdash}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}}
\qquad
- \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}a{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}
+ \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isasymdash}elim{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}a{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}
\]
\[
- \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
+ \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isasymdash}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
\qquad
- \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymunion}\ {\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ B}}{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B} & \isa{{\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ A}}
+ \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isasymdash}elim{\isacharparenright}}]{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymunion}\ {\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ B}}{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B} & \isa{{\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ A}}
\]
\caption{Primitive inferences of Pure}\label{fig:prim-rules}
\end{center}
@@ -469,17 +474,18 @@
terms, and \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} for proofs (cf.\
\cite{Berghofer-Nipkow:2000:TPHOL}).
- Observe that locally fixed parameters (as in \isa{{\isasymAnd}{\isacharunderscore}intro}) need
- not be recorded in the hypotheses, because the simple syntactic
- types of Pure are always inhabitable. ``Assumptions'' \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} for type-membership are only present as long as some \isa{x\isactrlisub {\isasymtau}} occurs in the statement body.\footnote{This is the key
- difference to ``\isa{{\isasymlambda}HOL}'' in the PTS framework
- \cite{Barendregt-Geuvers:2001}, where hypotheses \isa{x\ {\isacharcolon}\ A} are
- treated uniformly for propositions and types.}
+ Observe that locally fixed parameters (as in \isa{{\isasymAnd}{\isasymdash}intro}) need not be recorded in the hypotheses, because
+ the simple syntactic types of Pure are always inhabitable.
+ ``Assumptions'' \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} for type-membership are only
+ present as long as some \isa{x\isactrlisub {\isasymtau}} occurs in the statement
+ body.\footnote{This is the key difference to ``\isa{{\isasymlambda}HOL}'' in
+ the PTS framework \cite{Barendregt-Geuvers:2001}, where hypotheses
+ \isa{x\ {\isacharcolon}\ A} are treated uniformly for propositions and types.}
\medskip The axiomatization of a theory is implicitly closed by
forming all instances of type and term variables: \isa{{\isasymturnstile}\ A{\isasymvartheta}} holds for any substitution instance of an axiom
\isa{{\isasymturnstile}\ A}. By pushing substitutions through derivations
- inductively, we also get admissible \isa{generalize} and \isa{instance} rules as shown in \figref{fig:subst-rules}.
+ inductively, we also get admissible \isa{generalize} and \isa{instantiate} rules as shown in \figref{fig:subst-rules}.
\begin{figure}[htb]
\begin{center}
@@ -586,11 +592,11 @@
Every \verb|thm| value contains a sliding back-reference to the
enclosing theory, cf.\ \secref{sec:context-theory}.
- \item \verb|proofs| determines the detail of proof recording within
+ \item \verb|proofs| specifies the detail of proof recording within
\verb|thm| values: \verb|0| records only the names of oracles,
\verb|1| records oracle names and propositions, \verb|2| additionally
records full proof terms. Officially named theorems that contribute
- to a result are always recorded.
+ to a result are recorded in any case.
\item \verb|Thm.assume|, \verb|Thm.forall_intr|, \verb|Thm.forall_elim|, \verb|Thm.implies_intr|, and \verb|Thm.implies_elim|
correspond to the primitive inferences of \figref{fig:prim-rules}.
@@ -646,8 +652,8 @@
\begin{figure}[htb]
\begin{center}
\begin{tabular}{ll}
- \isa{conjunction\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & (infix \isa{{\isacharampersand}}) \\
- \isa{{\isasymturnstile}\ A\ {\isacharampersand}\ B\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}C{\isachardot}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isacharparenright}} \\[1ex]
+ \isa{conjunction\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & (infix \isa{{\isacharampersand}{\isacharampersand}{\isacharampersand}}) \\
+ \isa{{\isasymturnstile}\ A\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ B\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}C{\isachardot}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isacharparenright}} \\[1ex]
\isa{prop\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop} & (prefix \isa{{\isacharhash}}, suppressed) \\
\isa{{\isacharhash}A\ {\isasymequiv}\ A} \\[1ex]
\isa{term\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & (prefix \isa{TERM}) \\
@@ -659,12 +665,15 @@
\end{center}
\end{figure}
- Derived conjunction rules include introduction \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isacharampersand}\ B}, and destructions \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ A} and \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ B}.
- Conjunction allows to treat simultaneous assumptions and conclusions
- uniformly. For example, multiple claims are intermediately
- represented as explicit conjunction, but this is refined into
- separate sub-goals before the user continues the proof; the final
- result is projected into a list of theorems (cf.\
+ The introduction \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ B}, and eliminations
+ (projections) \isa{A\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ B\ {\isasymLongrightarrow}\ A} and \isa{A\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ B\ {\isasymLongrightarrow}\ B} are
+ available as derived rules. Conjunction allows to treat
+ simultaneous assumptions and conclusions uniformly, e.g.\ consider
+ \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ D}. In particular, the goal mechanism
+ represents multiple claims as explicit conjunction internally, but
+ this is refined (via backwards introduction) into separate sub-goals
+ before the user commences the proof; the final result is projected
+ into a list of theorems using eliminations (cf.\
\secref{sec:tactical-goals}).
The \isa{prop} marker (\isa{{\isacharhash}}) makes arbitrarily complex
@@ -680,7 +689,7 @@
the unspecified type \isa{{\isasymalpha}\ itself}; it essentially injects the
language of types into that of terms. There is specific notation
\isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} for \isa{TYPE\isactrlbsub {\isasymtau}\ itself\isactrlesub }.
- Although being devoid of any particular meaning, the \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} accounts for the type \isa{{\isasymtau}} within the term
+ Although being devoid of any particular meaning, the term \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} accounts for the type \isa{{\isasymtau}} within the term
language. In particular, \isa{TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}} may be used as formal
argument in primitive definitions, in order to circumvent hidden
polymorphism (cf.\ \secref{sec:terms}). For example, \isa{c\ TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}\ {\isasymequiv}\ A{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} defines \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself\ {\isasymRightarrow}\ prop} in terms of
@@ -707,10 +716,10 @@
\begin{description}
- \item \verb|Conjunction.intr| derives \isa{A\ {\isacharampersand}\ B} from \isa{A} and \isa{B}.
+ \item \verb|Conjunction.intr| derives \isa{A\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ B} from \isa{A} and \isa{B}.
\item \verb|Conjunction.elim| derives \isa{A} and \isa{B}
- from \isa{A\ {\isacharampersand}\ B}.
+ from \isa{A\ {\isacharampersand}{\isacharampersand}{\isacharampersand}\ B}.
\item \verb|Drule.mk_term| derives \isa{TERM\ t}.
@@ -792,7 +801,8 @@
prefix of parameters and compound premises, concluding an atomic
proposition. Typical examples are \isa{{\isasymlongrightarrow}}-introduction \isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymlongrightarrow}\ B} or mathematical induction \isa{P\ {\isadigit{0}}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}n{\isachardot}\ P\ n\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}Suc\ n{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ P\ n}. Even deeper nesting occurs in well-founded
induction \isa{{\isacharparenleft}{\isasymAnd}x{\isachardot}\ {\isacharparenleft}{\isasymAnd}y{\isachardot}\ y\ {\isasymprec}\ x\ {\isasymLongrightarrow}\ P\ y{\isacharparenright}\ {\isasymLongrightarrow}\ P\ x{\isacharparenright}\ {\isasymLongrightarrow}\ P\ x}, but this
- already marks the limit of rule complexity seen in practice.
+ already marks the limit of rule complexity that is usually seen in
+ practice.
\medskip Regular user-level inferences in Isabelle/Pure always
maintain the following canonical form of results:
@@ -917,9 +927,9 @@
\begin{description}
- \item \isa{rule\isactrlsub {\isadigit{1}}\ RS\ rule\isactrlsub {\isadigit{2}}} resolves \isa{rule\isactrlsub {\isadigit{1}}} with \isa{rule\isactrlsub {\isadigit{2}}} according to the
- \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} principle explained above. Note that the
- corresponding attribute in the Isar language is called \hyperlink{attribute.THEN}{\mbox{\isa{THEN}}}.
+ \item \isa{rule\isactrlsub {\isadigit{1}}\ RS\ rule\isactrlsub {\isadigit{2}}} resolves \isa{rule\isactrlsub {\isadigit{1}}} with \isa{rule\isactrlsub {\isadigit{2}}} according to the \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} principle
+ explained above. Note that the corresponding rule attribute in the
+ Isar language is called \hyperlink{attribute.THEN}{\mbox{\isa{THEN}}}.
\item \isa{rule\ OF\ rules} resolves a list of rules with the
first rule, addressing its premises \isa{{\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ length\ rules}
--- a/doc-src/IsarImplementation/Thy/document/Prelim.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Prelim.tex Tue Feb 09 17:06:05 2010 +0100
@@ -93,39 +93,46 @@
\isamarkuptrue%
%
\begin{isamarkuptext}%
-A \emph{theory} is a data container with explicit name and unique
- identifier. Theories are related by a (nominal) sub-theory
+A \emph{theory} is a data container with explicit name and
+ unique identifier. Theories are related by a (nominal) sub-theory
relation, which corresponds to the dependency graph of the original
construction; each theory is derived from a certain sub-graph of
- ancestor theories.
-
- The \isa{merge} operation produces the least upper bound of two
- theories, which actually degenerates into absorption of one theory
- into the other (due to the nominal sub-theory relation).
+ ancestor theories. To this end, the system maintains a set of
+ symbolic ``identification stamps'' within each theory.
- The \isa{begin} operation starts a new theory by importing
- several parent theories and entering a special \isa{draft} mode,
- which is sustained until the final \isa{end} operation. A draft
- theory acts like a linear type, where updates invalidate earlier
- versions. An invalidated draft is called ``stale''.
+ In order to avoid the full-scale overhead of explicit sub-theory
+ identification of arbitrary intermediate stages, a theory is
+ switched into \isa{draft} mode under certain circumstances. A
+ draft theory acts like a linear type, where updates invalidate
+ earlier versions. An invalidated draft is called \emph{stale}.
- The \isa{checkpoint} operation produces an intermediate stepping
- stone that will survive the next update: both the original and the
- changed theory remain valid and are related by the sub-theory
- relation. Checkpointing essentially recovers purely functional
- theory values, at the expense of some extra internal bookkeeping.
+ The \isa{checkpoint} operation produces a safe stepping stone
+ that will survive the next update without becoming stale: both the
+ old and the new theory remain valid and are related by the
+ sub-theory relation. Checkpointing essentially recovers purely
+ functional theory values, at the expense of some extra internal
+ bookkeeping.
The \isa{copy} operation produces an auxiliary version that has
the same data content, but is unrelated to the original: updates of
the copy do not affect the original, neither does the sub-theory
relation hold.
+ The \isa{merge} operation produces the least upper bound of two
+ theories, which actually degenerates into absorption of one theory
+ into the other (according to the nominal sub-theory relation).
+
+ The \isa{begin} operation starts a new theory by importing
+ several parent theories and entering a special mode of nameless
+ incremental updates, until the final \isa{end} operation is
+ performed.
+
\medskip The example in \figref{fig:ex-theory} below shows a theory
graph derived from \isa{Pure}, with theory \isa{Length}
importing \isa{Nat} and \isa{List}. The body of \isa{Length} consists of a sequence of updates, working mostly on
- drafts. Intermediate checkpoints may occur as well, due to the
- history mechanism provided by the Isar top-level, cf.\
- \secref{sec:isar-toplevel}.
+ drafts internally, while transaction boundaries of Isar top-level
+ commands (\secref{sec:isar-toplevel}) are guaranteed to be safe
+ checkpoints.
\begin{figure}[htb]
\begin{center}
@@ -172,9 +179,10 @@
\begin{mldecls}
\indexdef{}{ML type}{theory}\verb|type theory| \\
\indexdef{}{ML}{Theory.subthy}\verb|Theory.subthy: theory * theory -> bool| \\
- \indexdef{}{ML}{Theory.merge}\verb|Theory.merge: theory * theory -> theory| \\
\indexdef{}{ML}{Theory.checkpoint}\verb|Theory.checkpoint: theory -> theory| \\
\indexdef{}{ML}{Theory.copy}\verb|Theory.copy: theory -> theory| \\
+ \indexdef{}{ML}{Theory.merge}\verb|Theory.merge: theory * theory -> theory| \\
+ \indexdef{}{ML}{Theory.begin\_theory}\verb|Theory.begin_theory: string -> theory list -> theory| \\
\end{mldecls}
\begin{mldecls}
\indexdef{}{ML type}{theory\_ref}\verb|type theory_ref| \\
@@ -185,24 +193,31 @@
\begin{description}
\item \verb|theory| represents theory contexts. This is
- essentially a linear type! Most operations destroy the original
- version, which then becomes ``stale''.
+ essentially a linear type, with explicit runtime checking! Most
+ internal theory operations destroy the original version, which then
+ becomes ``stale''.
- \item \verb|Theory.subthy|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}}
- compares theories according to the inherent graph structure of the
- construction. This sub-theory relation is a nominal approximation
- of inclusion (\isa{{\isasymsubseteq}}) of the corresponding content.
-
- \item \verb|Theory.merge|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}}
- absorbs one theory into the other. This fails for unrelated
- theories!
+ \item \verb|Theory.subthy|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}} compares theories
+ according to the intrinsic graph structure of the construction.
+ This sub-theory relation is a nominal approximation of inclusion
+ (\isa{{\isasymsubseteq}}) of the corresponding content (according to the
+ semantics of the ML modules that implement the data).
\item \verb|Theory.checkpoint|~\isa{thy} produces a safe
- stepping stone in the linear development of \isa{thy}. The next
- update will result in two related, valid theories.
+ stepping stone in the linear development of \isa{thy}. This
+ changes the old theory, but the next update will result in two
+ related, valid theories.
+
+ \item \verb|Theory.copy|~\isa{thy} produces a variant of \isa{thy} with the same data. The copy is not related to the original,
+ but the original is unchanged.
- \item \verb|Theory.copy|~\isa{thy} produces a variant of \isa{thy} with the same data. The result is not related to the
- original; the original is unchanged.
+ \item \verb|Theory.merge|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}} absorbs one theory
+ into the other, without changing \isa{thy\isactrlsub {\isadigit{1}}} or \isa{thy\isactrlsub {\isadigit{2}}}.
+ This version of ad-hoc theory merge fails for unrelated theories!
+
+ \item \verb|Theory.begin_theory|~\isa{name\ parents} constructs
+ a new theory based on the given parents. This {\ML} function is
+ normally not invoked directly.
\item \verb|theory_ref| represents a sliding reference to an
always valid theory; updates on the original are propagated
@@ -229,30 +244,32 @@
\isamarkuptrue%
%
\begin{isamarkuptext}%
-A proof context is a container for pure data with a back-reference
- to the theory it belongs to. The \isa{init} operation creates a
- proof context from a given theory. Modifications to draft theories
- are propagated to the proof context as usual, but there is also an
- explicit \isa{transfer} operation to force resynchronization
- with more substantial updates to the underlying theory. The actual
- context data does not require any special bookkeeping, thanks to the
- lack of destructive features.
+A proof context is a container for pure data with a
+ back-reference to the theory it belongs to. The \isa{init}
+ operation creates a proof context from a given theory.
+ Modifications to draft theories are propagated to the proof context
+ as usual, but there is also an explicit \isa{transfer} operation
+ to force resynchronization with more substantial updates to the
+ underlying theory.
- Entities derived in a proof context need to record inherent logical
+ Entities derived in a proof context need to record logical
requirements explicitly, since there is no separate context
- identification as for theories. For example, hypotheses used in
- primitive derivations (cf.\ \secref{sec:thms}) are recorded
- separately within the sequent \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}}, just to make double
- sure. Results could still leak into an alien proof context due to
- programming errors, but Isabelle/Isar includes some extra validity
- checks in critical positions, notably at the end of a sub-proof.
+ identification or symbolic inclusion as for theories. For example,
+ hypotheses used in primitive derivations (cf.\ \secref{sec:thms})
+ are recorded separately within the sequent \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}}, just to
+ make double sure. Results could still leak into an alien proof
+ context due to programming errors, but Isabelle/Isar includes some
+ extra validity checks in critical positions, notably at the end of a
+ sub-proof.
Proof contexts may be manipulated arbitrarily, although the common
discipline is to follow block structure as a mental model: a given
context is extended consecutively, and results are exported back
- into the original context. Note that the Isar proof states model
+ into the original context. Note that an Isar proof state models
block-structured reasoning explicitly, using a stack of proof
- contexts internally.%
+ contexts internally. For various technical reasons, the background
+ theory of an Isar proof state must not be changed while the proof is
+ still under construction!%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -311,7 +328,8 @@
Moreover, there are total operations \isa{theory{\isacharunderscore}of} and \isa{proof{\isacharunderscore}of} to convert a generic context into either kind: a theory
can always be selected from the sum, while a proof context might
- have to be constructed by an ad-hoc \isa{init} operation.%
+ have to be constructed by an ad-hoc \isa{init} operation, which
+ incurs a small runtime overhead.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -376,6 +394,15 @@
\noindent The \isa{empty} value acts as initial default for
\emph{any} theory that does not declare actual data content; \isa{extend} is acts like a unitary version of \isa{merge}.
+ Implementing \isa{merge} can be tricky. The general idea is
+ that \isa{merge\ {\isacharparenleft}data\isactrlsub {\isadigit{1}}{\isacharcomma}\ data\isactrlsub {\isadigit{2}}{\isacharparenright}} inserts those parts of \isa{data\isactrlsub {\isadigit{2}}} into \isa{data\isactrlsub {\isadigit{1}}} that are not yet present, while
+ keeping the general order of things. The \verb|Library.merge|
+ function on plain lists may serve as canonical template.
+
+ Particularly note that shared parts of the data must not be
+ duplicated by naive concatenation, or a theory graph that is like a
+ chain of diamonds would cause an exponential blowup!
+
\paragraph{Proof context data} declarations need to implement the
following SML signature:
@@ -387,15 +414,18 @@
\medskip
\noindent The \isa{init} operation is supposed to produce a pure
- value from the given background theory.
+ value from the given background theory and should be somehow
+ ``immediate''. Whenever a proof context is initialized, which
+ happens frequently, the the system invokes the \isa{init}
+ operation of \emph{all} theory data slots ever declared.
\paragraph{Generic data} provides a hybrid interface for both theory
and proof data. The \isa{init} operation for proof contexts is
predefined to select the current data value from the background
theory.
- \bigskip A data declaration of type \isa{T} results in the
- following interface:
+ \bigskip Any of these data declaration over type \isa{T} result
+ in an ML structure with the following signature:
\medskip
\begin{tabular}{ll}
@@ -405,12 +435,12 @@
\end{tabular}
\medskip
- \noindent These other operations provide access for the particular
- kind of context (theory, proof, or generic context). Note that this
- is a safe interface: there is no other way to access the
- corresponding data slot of a context. By keeping these operations
- private, a component may maintain abstract values authentically,
- without other components interfering.%
+ \noindent These other operations provide exclusive access for the
+ particular kind of context (theory, proof, or generic context).
+ This interface fully observes the ML discipline for types and
+ scopes: there is no other way to access the corresponding data slot
+ of a context. By keeping these operations private, an Isabelle/ML
+ module may maintain abstract values authentically.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -451,16 +481,157 @@
%
\endisadelimmlref
%
+\isadelimmlex
+%
+\endisadelimmlex
+%
+\isatagmlex
+%
+\begin{isamarkuptext}%
+The following artificial example demonstrates theory
+ data: we maintain a set of terms that are supposed to be wellformed
+ wrt.\ the enclosing theory. The public interface is as follows:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlex
+{\isafoldmlex}%
+%
+\isadelimmlex
+%
+\endisadelimmlex
+%
+\isadelimML
+%
+\endisadelimML
+%
+\isatagML
+\isacommand{ML}\isamarkupfalse%
+\ {\isacharverbatimopen}\isanewline
+\ \ signature\ WELLFORMED{\isacharunderscore}TERMS\ {\isacharequal}\isanewline
+\ \ sig\isanewline
+\ \ \ \ val\ get{\isacharcolon}\ theory\ {\isacharminus}{\isachargreater}\ term\ list\isanewline
+\ \ \ \ val\ add{\isacharcolon}\ term\ {\isacharminus}{\isachargreater}\ theory\ {\isacharminus}{\isachargreater}\ theory\isanewline
+\ \ end{\isacharsemicolon}\isanewline
+{\isacharverbatimclose}%
+\endisatagML
+{\isafoldML}%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\begin{isamarkuptext}%
+\noindent The implementation uses private theory data
+ internally, and only exposes an operation that involves explicit
+ argument checking wrt.\ the given theory.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\isatagML
+\isacommand{ML}\isamarkupfalse%
+\ {\isacharverbatimopen}\isanewline
+\ \ structure\ Wellformed{\isacharunderscore}Terms{\isacharcolon}\ WELLFORMED{\isacharunderscore}TERMS\ {\isacharequal}\isanewline
+\ \ struct\isanewline
+\isanewline
+\ \ structure\ Terms\ {\isacharequal}\ Theory{\isacharunderscore}Data\isanewline
+\ \ {\isacharparenleft}\isanewline
+\ \ \ \ type\ T\ {\isacharequal}\ term\ OrdList{\isachardot}T{\isacharsemicolon}\isanewline
+\ \ \ \ val\ empty\ {\isacharequal}\ {\isacharbrackleft}{\isacharbrackright}{\isacharsemicolon}\isanewline
+\ \ \ \ val\ extend\ {\isacharequal}\ I{\isacharsemicolon}\isanewline
+\ \ \ \ fun\ merge\ {\isacharparenleft}ts{\isadigit{1}}{\isacharcomma}\ ts{\isadigit{2}}{\isacharparenright}\ {\isacharequal}\isanewline
+\ \ \ \ \ \ OrdList{\isachardot}union\ TermOrd{\isachardot}fast{\isacharunderscore}term{\isacharunderscore}ord\ ts{\isadigit{1}}\ ts{\isadigit{2}}{\isacharsemicolon}\isanewline
+\ \ {\isacharparenright}\isanewline
+\isanewline
+\ \ val\ get\ {\isacharequal}\ Terms{\isachardot}get{\isacharsemicolon}\isanewline
+\isanewline
+\ \ fun\ add\ raw{\isacharunderscore}t\ thy\ {\isacharequal}\isanewline
+\ \ \ \ let\ val\ t\ {\isacharequal}\ Sign{\isachardot}cert{\isacharunderscore}term\ thy\ raw{\isacharunderscore}t\isanewline
+\ \ \ \ in\ Terms{\isachardot}map\ {\isacharparenleft}OrdList{\isachardot}insert\ TermOrd{\isachardot}fast{\isacharunderscore}term{\isacharunderscore}ord\ t{\isacharparenright}\ thy\ end{\isacharsemicolon}\isanewline
+\isanewline
+\ \ end{\isacharsemicolon}\isanewline
+{\isacharverbatimclose}%
+\endisatagML
+{\isafoldML}%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\begin{isamarkuptext}%
+We use \verb|term OrdList.T| for reasonably efficient
+ representation of a set of terms: all operations are linear in the
+ number of stored elements. Here we assume that our users do not
+ care about the declaration order, since that data structure forces
+ its own arrangement of elements.
+
+ Observe how the \verb|merge| operation joins the data slots of
+ the two constituents: \verb|OrdList.union| prevents duplication of
+ common data from different branches, thus avoiding the danger of
+ exponential blowup. (Plain list append etc.\ must never be used for
+ theory data merges.)
+
+ \medskip Our intended invariant is achieved as follows:
+ \begin{enumerate}
+
+ \item \verb|Wellformed_Terms.add| only admits terms that have passed
+ the \verb|Sign.cert_term| check of the given theory at that point.
+
+ \item Wellformedness in the sense of \verb|Sign.cert_term| is
+ monotonic wrt.\ the sub-theory relation. So our data can move
+ upwards in the hierarchy (via extension or merges), and maintain
+ wellformedness without further checks.
+
+ \end{enumerate}
+
+ Note that all basic operations of the inference kernel (which
+ includes \verb|Sign.cert_term|) observe this monotonicity principle,
+ but other user-space tools don't. For example, fully-featured
+ type-inference via \verb|Syntax.check_term| (cf.\
+ \secref{sec:term-check}) is not necessarily monotonic wrt.\ the
+ background theory, since constraints of term constants can be
+ strengthened by later declarations, for example.
+
+ In most cases, user-space context data does not have to take such
+ invariants too seriously. The situation is different in the
+ implementation of the inference kernel itself, which uses the very
+ same data mechanisms for types, constants, axioms etc.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
\isamarkupsection{Names \label{sec:names}%
}
\isamarkuptrue%
%
\begin{isamarkuptext}%
In principle, a name is just a string, but there are various
- convention for encoding additional structure. For example, ``\isa{Foo{\isachardot}bar{\isachardot}baz}'' is considered as a qualified name consisting of
- three basic name components. The individual constituents of a name
- may have further substructure, e.g.\ the string
- ``\verb,\,\verb,<alpha>,'' encodes as a single symbol.%
+ conventions for representing additional structure. For example,
+ ``\isa{Foo{\isachardot}bar{\isachardot}baz}'' is considered as a long name consisting of
+ qualifier \isa{Foo{\isachardot}bar} and base name \isa{baz}. The
+ individual constituents of a name may have further substructure,
+ e.g.\ the string ``\verb,\,\verb,<alpha>,'' encodes as a single
+ symbol.
+
+ \medskip Subsequently, we shall introduce specific categories of
+ names. Roughly speaking these correspond to logical entities as
+ follows:
+ \begin{itemize}
+
+ \item Basic names (\secref{sec:basic-name}): free and bound
+ variables.
+
+ \item Indexed names (\secref{sec:indexname}): schematic variables.
+
+ \item Long names (\secref{sec:long-name}): constants of any kind
+ (type constructors, term constants, other concepts defined in user
+ space). Such entities are typically managed via name spaces
+ (\secref{sec:name-space}).
+
+ \end{itemize}%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -469,16 +640,16 @@
\isamarkuptrue%
%
\begin{isamarkuptext}%
-A \emph{symbol} constitutes the smallest textual unit in Isabelle
- --- raw characters are normally not encountered at all. Isabelle
- strings consist of a sequence of symbols, represented as a packed
- string or a list of strings. Each symbol is in itself a small
- string, which has either one of the following forms:
+A \emph{symbol} constitutes the smallest textual unit in
+ Isabelle --- raw ML characters are normally not encountered at all!
+ Isabelle strings consist of a sequence of symbols, represented as a
+ packed string or an exploded list of strings. Each symbol is in
+ itself a small string, which has either one of the following forms:
\begin{enumerate}
- \item a single ASCII character ``\isa{c}'', for example
- ``\verb,a,'',
+ \item a single ASCII character ``\isa{c}'' or raw byte in the
+ range of 128\dots 255, for example ``\verb,a,'',
\item a regular symbol ``\verb,\,\verb,<,\isa{ident}\verb,>,'',
for example ``\verb,\,\verb,<alpha>,'',
@@ -487,7 +658,7 @@
for example ``\verb,\,\verb,<^bold>,'',
\item a raw symbol ``\verb,\,\verb,<^raw:,\isa{text}\verb,>,''
- where \isa{text} constists of printable characters excluding
+ where \isa{text} consists of printable characters excluding
``\verb,.,'' and ``\verb,>,'', for example
``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
@@ -503,16 +674,25 @@
may occur within regular Isabelle identifiers.
Since the character set underlying Isabelle symbols is 7-bit ASCII
- and 8-bit characters are passed through transparently, Isabelle may
- also process Unicode/UCS data in UTF-8 encoding. Unicode provides
- its own collection of mathematical symbols, but there is no built-in
- link to the standard collection of Isabelle.
+ and 8-bit characters are passed through transparently, Isabelle can
+ also process Unicode/UCS data in UTF-8 encoding.\footnote{When
+ counting precise source positions internally, bytes in the range of
+ 128\dots 191 are ignored. In UTF-8 encoding, this interval covers
+ the additional trailer bytes, so Isabelle happens to count Unicode
+ characters here, not bytes in memory. In ISO-Latin encoding, the
+ ignored range merely includes some extra punctuation characters that
+ even have replacements within the standard collection of Isabelle
+ symbols; the accented letters range is counted properly.} Unicode
+ provides its own collection of mathematical symbols, but within the
+ core Isabelle/ML world there is no link to the standard collection
+ of Isabelle regular symbols.
\medskip Output of Isabelle symbols depends on the print mode
(\secref{print-mode}). For example, the standard {\LaTeX} setup of
the Isabelle document preparation system would present
``\verb,\,\verb,<alpha>,'' as \isa{{\isasymalpha}}, and
- ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as \isa{\isactrlbold {\isasymalpha}}.%
+ ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as \isa{\isactrlbold {\isasymalpha}}. On-screen rendering usually works by mapping a finite
+ subset of Isabelle symbols to suitable Unicode characters.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -524,7 +704,7 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexdef{}{ML type}{Symbol.symbol}\verb|type Symbol.symbol| \\
+ \indexdef{}{ML type}{Symbol.symbol}\verb|type Symbol.symbol = string| \\
\indexdef{}{ML}{Symbol.explode}\verb|Symbol.explode: string -> Symbol.symbol list| \\
\indexdef{}{ML}{Symbol.is\_letter}\verb|Symbol.is_letter: Symbol.symbol -> bool| \\
\indexdef{}{ML}{Symbol.is\_digit}\verb|Symbol.is_digit: Symbol.symbol -> bool| \\
@@ -539,11 +719,14 @@
\begin{description}
\item \verb|Symbol.symbol| represents individual Isabelle
- symbols; this is an alias for \verb|string|.
+ symbols.
\item \verb|Symbol.explode|~\isa{str} produces a symbol list
from the packed form. This function supercedes \verb|String.explode| for virtually all purposes of manipulating text in
- Isabelle!
+ Isabelle!\footnote{The runtime overhead for exploded strings is
+ mainly that of the list structure: individual symbols that happen to
+ be a singleton string --- which is the most common case --- do not
+ require extra memory in Poly/ML.}
\item \verb|Symbol.is_letter|, \verb|Symbol.is_digit|, \verb|Symbol.is_quasi|, \verb|Symbol.is_blank| classify standard
symbols according to fixed syntactic conventions of Isabelle, cf.\
@@ -555,7 +738,16 @@
\item \verb|Symbol.decode| converts the string representation of a
symbol into the datatype version.
- \end{description}%
+ \end{description}
+
+ \paragraph{Historical note.} In the original SML90 standard the
+ primitive ML type \verb|char| did not exists, and the basic \verb|explode: string -> string list| operation would produce a list of
+ singleton strings as in Isabelle/ML today. When SML97 came out,
+ Isabelle did not adopt its slightly anachronistic 8-bit characters,
+ but the idea of exploding a string into a list of small strings was
+ extended to ``symbols'' as explained above. Thus Isabelle sources
+ can refer to an infinite store of user-defined symbols, without
+ having to worry about the multitude of Unicode encodings.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -566,7 +758,7 @@
%
\endisadelimmlref
%
-\isamarkupsubsection{Basic names \label{sec:basic-names}%
+\isamarkupsubsection{Basic names \label{sec:basic-name}%
}
\isamarkuptrue%
%
@@ -583,7 +775,8 @@
These special versions provide copies of the basic name space, apart
from anything that normally appears in the user text. For example,
system generated variables in Isar proof contexts are usually marked
- as internal, which prevents mysterious name references like \isa{xaa} to appear in the text.
+ as internal, which prevents mysterious names like \isa{xaa} to
+ appear in human-readable text.
\medskip Manipulating binding scopes often requires on-the-fly
renamings. A \emph{name context} contains a collection of already
@@ -618,6 +811,9 @@
\indexdef{}{ML}{Name.invents}\verb|Name.invents: Name.context -> string -> int -> string list| \\
\indexdef{}{ML}{Name.variants}\verb|Name.variants: string list -> Name.context -> string list * Name.context| \\
\end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML}{Variable.names\_of}\verb|Variable.names_of: Proof.context -> Name.context| \\
+ \end{mldecls}
\begin{description}
@@ -638,6 +834,14 @@
\item \verb|Name.variants|~\isa{names\ context} produces fresh
variants of \isa{names}; the result is entered into the context.
+ \item \verb|Variable.names_of|~\isa{ctxt} retrieves the context
+ of declared type and term variable names. Projecting a proof
+ context down to a primitive name context is occasionally useful when
+ invoking lower-level operations. Regular management of ``fresh
+ variables'' is done by suitable operations of structure \verb|Variable|, which is also able to provide an official status of
+ ``locally fixed variable'' within the logical environment (cf.\
+ \secref{sec:variables}).
+
\end{description}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -649,7 +853,7 @@
%
\endisadelimmlref
%
-\isamarkupsubsection{Indexed names%
+\isamarkupsubsection{Indexed names \label{sec:indexname}%
}
\isamarkuptrue%
%
@@ -663,9 +867,9 @@
\isa{maxidx\ {\isacharplus}\ {\isadigit{1}}}; the maximum index of an empty collection is
\isa{{\isacharminus}{\isadigit{1}}}.
- Occasionally, basic names and indexed names are injected into the
- same pair type: the (improper) indexname \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}} is used
- to encode basic names.
+ Occasionally, basic names are injected into the same pair type of
+ indexed names: then \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}} is used to encode the basic
+ name \isa{x}.
\medskip Isabelle syntax observes the following rules for
representing an indexname \isa{{\isacharparenleft}x{\isacharcomma}\ i{\isacharparenright}} as a packed string:
@@ -680,11 +884,12 @@
\end{itemize}
- Indexnames may acquire large index numbers over time. Results are
- normalized towards \isa{{\isadigit{0}}} at certain checkpoints, notably at
- the end of a proof. This works by producing variants of the
- corresponding basic name components. For example, the collection
- \isa{{\isacharquery}x{\isadigit{1}}{\isacharcomma}\ {\isacharquery}x{\isadigit{7}}{\isacharcomma}\ {\isacharquery}x{\isadigit{4}}{\isadigit{2}}} becomes \isa{{\isacharquery}x{\isacharcomma}\ {\isacharquery}xa{\isacharcomma}\ {\isacharquery}xb}.%
+ Indexnames may acquire large index numbers after several maxidx
+ shifts have been applied. Results are usually normalized towards
+ \isa{{\isadigit{0}}} at certain checkpoints, notably at the end of a proof.
+ This works by producing variants of the corresponding basic name
+ components. For example, the collection \isa{{\isacharquery}x{\isadigit{1}}{\isacharcomma}\ {\isacharquery}x{\isadigit{7}}{\isacharcomma}\ {\isacharquery}x{\isadigit{4}}{\isadigit{2}}}
+ becomes \isa{{\isacharquery}x{\isacharcomma}\ {\isacharquery}xa{\isacharcomma}\ {\isacharquery}xb}.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -704,7 +909,8 @@
\item \verb|indexname| represents indexed names. This is an
abbreviation for \verb|string * int|. The second component is
usually non-negative, except for situations where \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}}
- is used to embed basic names into this type.
+ is used to inject basic names into this type. Other negative
+ indexes should not be used.
\end{description}%
\end{isamarkuptext}%
@@ -717,56 +923,31 @@
%
\endisadelimmlref
%
-\isamarkupsubsection{Qualified names and name spaces%
+\isamarkupsubsection{Long names \label{sec:long-name}%
}
\isamarkuptrue%
%
\begin{isamarkuptext}%
-A \emph{qualified name} consists of a non-empty sequence of basic
- name components. The packed representation uses a dot as separator,
- as in ``\isa{A{\isachardot}b{\isachardot}c}''. The last component is called \emph{base}
- name, the remaining prefix \emph{qualifier} (which may be empty).
- The idea of qualified names is to encode nested structures by
- recording the access paths as qualifiers. For example, an item
- named ``\isa{A{\isachardot}b{\isachardot}c}'' may be understood as a local entity \isa{c}, within a local structure \isa{b}, within a global
- structure \isa{A}. Typically, name space hierarchies consist of
- 1--2 levels of qualification, but this need not be always so.
+A \emph{long name} consists of a sequence of non-empty name
+ components. The packed representation uses a dot as separator, as
+ in ``\isa{A{\isachardot}b{\isachardot}c}''. The last component is called \emph{base
+ name}, the remaining prefix is called \emph{qualifier} (which may be
+ empty). The qualifier can be understood as the access path to the
+ named entity while passing through some nested block-structure,
+ although our free-form long names do not really enforce any strict
+ discipline.
+
+ For example, an item named ``\isa{A{\isachardot}b{\isachardot}c}'' may be understood as
+ a local entity \isa{c}, within a local structure \isa{b},
+ within a global structure \isa{A}. In practice, long names
+ usually represent 1--3 levels of qualification. User ML code should
+ not make any assumptions about the particular structure of long
+ names!
The empty name is commonly used as an indication of unnamed
- entities, whenever this makes any sense. The basic operations on
- qualified names are smart enough to pass through such improper names
- unchanged.
-
- \medskip A \isa{naming} policy tells how to turn a name
- specification into a fully qualified internal name (by the \isa{full} operation), and how fully qualified names may be accessed
- externally. For example, the default naming policy is to prefix an
- implicit path: \isa{full\ x} produces \isa{path{\isachardot}x}, and the
- standard accesses for \isa{path{\isachardot}x} include both \isa{x} and
- \isa{path{\isachardot}x}. Normally, the naming is implicit in the theory or
- proof context; there are separate versions of the corresponding.
-
- \medskip A \isa{name\ space} manages a collection of fully
- internalized names, together with a mapping between external names
- and internal names (in both directions). The corresponding \isa{intern} and \isa{extern} operations are mostly used for
- parsing and printing only! The \isa{declare} operation augments
- a name space according to the accesses determined by the naming
- policy.
-
- \medskip As a general principle, there is a separate name space for
- each kind of formal entity, e.g.\ logical constant, type
- constructor, type class, theorem. It is usually clear from the
- occurrence in concrete syntax (or from the scope) which kind of
- entity a name refers to. For example, the very same name \isa{c} may be used uniformly for a constant, type constructor, and
- type class.
-
- There are common schemes to name theorems systematically, according
- to the name of the main logical entity involved, e.g.\ \isa{c{\isachardot}intro} for a canonical theorem related to constant \isa{c}.
- This technique of mapping names from one space into another requires
- some care in order to avoid conflicts. In particular, theorem names
- derived from a type constructor or type class are better suffixed in
- addition to the usual qualification, e.g.\ \isa{c{\isacharunderscore}type{\isachardot}intro}
- and \isa{c{\isacharunderscore}class{\isachardot}intro} for theorems related to type \isa{c}
- and class \isa{c}, respectively.%
+ entities, or entities that are not entered into the corresponding
+ name space, whenever this makes any sense. The basic operations on
+ long names map empty names again to empty names.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -784,6 +965,100 @@
\indexdef{}{ML}{Long\_Name.implode}\verb|Long_Name.implode: string list -> string| \\
\indexdef{}{ML}{Long\_Name.explode}\verb|Long_Name.explode: string -> string list| \\
\end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Long_Name.base_name|~\isa{name} returns the base name
+ of a long name.
+
+ \item \verb|Long_Name.qualifier|~\isa{name} returns the qualifier
+ of a long name.
+
+ \item \verb|Long_Name.append|~\isa{name\isactrlisub {\isadigit{1}}\ name\isactrlisub {\isadigit{2}}} appends two long
+ names.
+
+ \item \verb|Long_Name.implode|~\isa{names} and \verb|Long_Name.explode|~\isa{name} convert between the packed string
+ representation and the explicit list form of long names.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Name spaces \label{sec:name-space}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \isa{name\ space} manages a collection of long names,
+ together with a mapping between partially qualified external names
+ and fully qualified internal names (in both directions). Note that
+ the corresponding \isa{intern} and \isa{extern} operations
+ are mostly used for parsing and printing only! The \isa{declare} operation augments a name space according to the accesses
+ determined by a given binding, and a naming policy from the context.
+
+ \medskip A \isa{binding} specifies details about the prospective
+ long name of a newly introduced formal entity. It consists of a
+ base name, prefixes for qualification (separate ones for system
+ infrastructure and user-space mechanisms), a slot for the original
+ source position, and some additional flags.
+
+ \medskip A \isa{naming} provides some additional details for
+ producing a long name from a binding. Normally, the naming is
+ implicit in the theory or proof context. The \isa{full}
+ operation (and its variants for different context types) produces a
+ fully qualified internal name to be entered into a name space. The
+ main equation of this ``chemical reaction'' when binding new
+ entities in a context is as follows:
+
+ \smallskip
+ \begin{tabular}{l}
+ \isa{binding\ {\isacharplus}\ naming\ {\isasymlongrightarrow}\ long\ name\ {\isacharplus}\ name\ space\ accesses}
+ \end{tabular}
+ \smallskip
+
+ \medskip As a general principle, there is a separate name space for
+ each kind of formal entity, e.g.\ fact, logical constant, type
+ constructor, type class. It is usually clear from the occurrence in
+ concrete syntax (or from the scope) which kind of entity a name
+ refers to. For example, the very same name \isa{c} may be used
+ uniformly for a constant, type constructor, and type class.
+
+ There are common schemes to name derived entities systematically
+ according to the name of the main logical entity involved, e.g.\
+ fact \isa{c{\isachardot}intro} for a canonical introduction rule related to
+ constant \isa{c}. This technique of mapping names from one
+ space into another requires some care in order to avoid conflicts.
+ In particular, theorem names derived from a type constructor or type
+ class are better suffixed in addition to the usual qualification,
+ e.g.\ \isa{c{\isacharunderscore}type{\isachardot}intro} and \isa{c{\isacharunderscore}class{\isachardot}intro} for
+ theorems related to type \isa{c} and class \isa{c},
+ respectively.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{binding}\verb|type binding| \\
+ \indexdef{}{ML}{Binding.empty}\verb|Binding.empty: binding| \\
+ \indexdef{}{ML}{Binding.name}\verb|Binding.name: string -> binding| \\
+ \indexdef{}{ML}{Binding.qualify}\verb|Binding.qualify: bool -> string -> binding -> binding| \\
+ \indexdef{}{ML}{Binding.prefix}\verb|Binding.prefix: bool -> string -> binding -> binding| \\
+ \indexdef{}{ML}{Binding.conceal}\verb|Binding.conceal: binding -> binding| \\
+ \indexdef{}{ML}{Binding.str\_of}\verb|Binding.str_of: binding -> string| \\
+ \end{mldecls}
\begin{mldecls}
\indexdef{}{ML type}{Name\_Space.naming}\verb|type Name_Space.naming| \\
\indexdef{}{ML}{Name\_Space.default\_naming}\verb|Name_Space.default_naming: Name_Space.naming| \\
@@ -798,21 +1073,39 @@
\verb| string * Name_Space.T| \\
\indexdef{}{ML}{Name\_Space.intern}\verb|Name_Space.intern: Name_Space.T -> string -> string| \\
\indexdef{}{ML}{Name\_Space.extern}\verb|Name_Space.extern: Name_Space.T -> string -> string| \\
+ \indexdef{}{ML}{Name\_Space.is\_concealed}\verb|Name_Space.is_concealed: Name_Space.T -> string -> bool|
\end{mldecls}
\begin{description}
- \item \verb|Long_Name.base_name|~\isa{name} returns the base name of a
- qualified name.
+ \item \verb|binding| represents the abstract concept of name
+ bindings.
+
+ \item \verb|Binding.empty| is the empty binding.
- \item \verb|Long_Name.qualifier|~\isa{name} returns the qualifier
- of a qualified name.
+ \item \verb|Binding.name|~\isa{name} produces a binding with base
+ name \isa{name}.
+
+ \item \verb|Binding.qualify|~\isa{mandatory\ name\ binding}
+ prefixes qualifier \isa{name} to \isa{binding}. The \isa{mandatory} flag tells if this name component always needs to be
+ given in name space accesses --- this is mostly \isa{false} in
+ practice. Note that this part of qualification is typically used in
+ derived specification mechanisms.
- \item \verb|Long_Name.append|~\isa{name\isactrlisub {\isadigit{1}}\ name\isactrlisub {\isadigit{2}}}
- appends two qualified names.
+ \item \verb|Binding.prefix| is similar to \verb|Binding.qualify|, but
+ affects the system prefix. This part of extra qualification is
+ typically used in the infrastructure for modular specifications,
+ notably ``local theory targets'' (see also \chref{ch:local-theory}).
- \item \verb|Long_Name.implode|~\isa{names} and \verb|Long_Name.explode|~\isa{name} convert between the packed string
- representation and the explicit list form of qualified names.
+ \item \verb|Binding.conceal|~\isa{binding} indicates that the
+ binding shall refer to an entity that serves foundational purposes
+ only. This flag helps to mark implementation details of
+ specification mechanism etc. Other tools should not depend on the
+ particulars of concealed entities (cf.\ \verb|Name_Space.is_concealed|).
+
+ \item \verb|Binding.str_of|~\isa{binding} produces a string
+ representation for human-readable output, together with some formal
+ markup that might get used in GUI front-ends, for example.
\item \verb|Name_Space.naming| represents the abstract concept of
a naming policy.
@@ -853,6 +1146,10 @@
This operation is mostly for printing! User code should not rely on
the precise result too much.
+ \item \verb|Name_Space.is_concealed|~\isa{space\ name} indicates
+ whether \isa{name} refers to a strictly private entity that
+ other tools are supposed to ignore!
+
\end{description}%
\end{isamarkuptext}%
\isamarkuptrue%
--- a/doc-src/IsarImplementation/Thy/document/Proof.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Proof.tex Tue Feb 09 17:06:05 2010 +0100
@@ -42,9 +42,10 @@
or outside the current scope by providing separate syntactic
categories for \emph{fixed variables} (e.g.\ \isa{x}) vs.\
\emph{schematic variables} (e.g.\ \isa{{\isacharquery}x}). Incidently, a
- universal result \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} has the HHF normal form \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}x{\isacharparenright}}, which represents its generality nicely without requiring
- an explicit quantifier. The same principle works for type
- variables: \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}{\isasymalpha}{\isacharparenright}} represents the idea of ``\isa{{\isasymturnstile}\ {\isasymforall}{\isasymalpha}{\isachardot}\ B{\isacharparenleft}{\isasymalpha}{\isacharparenright}}'' without demanding a truly polymorphic framework.
+ universal result \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} has the HHF normal form \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}x{\isacharparenright}}, which represents its generality without requiring an
+ explicit quantifier. The same principle works for type variables:
+ \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}{\isasymalpha}{\isacharparenright}} represents the idea of ``\isa{{\isasymturnstile}\ {\isasymforall}{\isasymalpha}{\isachardot}\ B{\isacharparenleft}{\isasymalpha}{\isacharparenright}}''
+ without demanding a truly polymorphic framework.
\medskip Additional care is required to treat type variables in a
way that facilitates type-inference. In principle, term variables
@@ -113,7 +114,8 @@
\indexdef{}{ML}{Variable.polymorphic}\verb|Variable.polymorphic: Proof.context -> term list -> term list| \\
\indexdef{}{ML}{Variable.import}\verb|Variable.import: bool -> thm list -> Proof.context ->|\isasep\isanewline%
\verb| (((ctyp * ctyp) list * (cterm * cterm) list) * thm list) * Proof.context| \\
- \indexdef{}{ML}{Variable.focus}\verb|Variable.focus: cterm -> Proof.context -> ((string * cterm) list * cterm) * Proof.context| \\
+ \indexdef{}{ML}{Variable.focus}\verb|Variable.focus: cterm -> Proof.context ->|\isasep\isanewline%
+\verb| ((string * cterm) list * cterm) * Proof.context| \\
\end{mldecls}
\begin{description}
@@ -167,6 +169,160 @@
%
\endisadelimmlref
%
+\isadelimmlex
+%
+\endisadelimmlex
+%
+\isatagmlex
+%
+\begin{isamarkuptext}%
+The following example (in theory \hyperlink{theory.Pure}{\mbox{\isa{Pure}}}) shows
+ how to work with fixed term and type parameters work with
+ type-inference.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlex
+{\isafoldmlex}%
+%
+\isadelimmlex
+%
+\endisadelimmlex
+\isacommand{typedecl}\isamarkupfalse%
+\ foo\ \ %
+\isamarkupcmt{some basic type for testing purposes%
+}
+\isanewline
+%
+\isadelimML
+\isanewline
+%
+\endisadelimML
+%
+\isatagML
+\isacommand{ML}\isamarkupfalse%
+\ {\isacharverbatimopen}\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}static\ compile{\isacharminus}time\ context\ {\isacharminus}{\isacharminus}\ for\ testing\ only{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ ctxt{\isadigit{0}}\ {\isacharequal}\ %
+\isaantiq
+context%
+\endisaantiq
+{\isacharsemicolon}\isanewline
+\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}locally\ fixed\ parameters\ {\isacharminus}{\isacharminus}\ no\ type\ assignment\ yet{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ {\isacharparenleft}{\isacharbrackleft}x{\isacharcomma}\ y{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{1}}{\isacharparenright}\ {\isacharequal}\ ctxt{\isadigit{0}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}add{\isacharunderscore}fixes\ {\isacharbrackleft}{\isachardoublequote}x{\isachardoublequote}{\isacharcomma}\ {\isachardoublequote}y{\isachardoublequote}{\isacharbrackright}{\isacharsemicolon}\isanewline
+\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}t{\isadigit{1}}{\isacharcolon}\ most\ general\ fixed\ type{\isacharsemicolon}\ t{\isadigit{1}}{\isacharprime}{\isacharcolon}\ most\ general\ arbitrary\ type{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ t{\isadigit{1}}\ {\isacharequal}\ Syntax{\isachardot}read{\isacharunderscore}term\ ctxt{\isadigit{1}}\ {\isachardoublequote}x{\isachardoublequote}{\isacharsemicolon}\isanewline
+\ \ val\ t{\isadigit{1}}{\isacharprime}\ {\isacharequal}\ singleton\ {\isacharparenleft}Variable{\isachardot}polymorphic\ ctxt{\isadigit{1}}{\isacharparenright}\ t{\isadigit{1}}{\isacharsemicolon}\isanewline
+\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}term\ u\ enforces\ specific\ type\ assignment{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ u\ {\isacharequal}\ Syntax{\isachardot}read{\isacharunderscore}term\ ctxt{\isadigit{1}}\ {\isachardoublequote}{\isacharparenleft}x{\isacharcolon}{\isacharcolon}foo{\isacharparenright}\ {\isasymequiv}\ y{\isachardoublequote}{\isacharsemicolon}\isanewline
+\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}official\ declaration\ of\ u\ {\isacharminus}{\isacharminus}\ propagates\ constraints\ etc{\isachardot}{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ ctxt{\isadigit{2}}\ {\isacharequal}\ ctxt{\isadigit{1}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}declare{\isacharunderscore}term\ u{\isacharsemicolon}\isanewline
+\ \ val\ t{\isadigit{2}}\ {\isacharequal}\ Syntax{\isachardot}read{\isacharunderscore}term\ ctxt{\isadigit{2}}\ {\isachardoublequote}x{\isachardoublequote}{\isacharsemicolon}\ \ {\isacharparenleft}{\isacharasterisk}x{\isacharcolon}{\isacharcolon}foo\ is\ enforced{\isacharasterisk}{\isacharparenright}\isanewline
+{\isacharverbatimclose}%
+\endisatagML
+{\isafoldML}%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\begin{isamarkuptext}%
+In the above example, the starting context had been derived
+ from the toplevel theory, which means that fixed variables are
+ internalized literally: \verb|x| is mapped again to
+ \verb|x|, and attempting to fix it again in the subsequent
+ context is an error. Alternatively, fixed parameters can be renamed
+ explicitly as follows:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\isatagML
+\isacommand{ML}\isamarkupfalse%
+\ {\isacharverbatimopen}\isanewline
+\ \ val\ ctxt{\isadigit{0}}\ {\isacharequal}\ %
+\isaantiq
+context%
+\endisaantiq
+{\isacharsemicolon}\isanewline
+\ \ val\ {\isacharparenleft}{\isacharbrackleft}x{\isadigit{1}}{\isacharcomma}\ x{\isadigit{2}}{\isacharcomma}\ x{\isadigit{3}}{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{1}}{\isacharparenright}\ {\isacharequal}\isanewline
+\ \ \ \ ctxt{\isadigit{0}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}variant{\isacharunderscore}fixes\ {\isacharbrackleft}{\isachardoublequote}x{\isachardoublequote}{\isacharcomma}\ {\isachardoublequote}x{\isachardoublequote}{\isacharcomma}\ {\isachardoublequote}x{\isachardoublequote}{\isacharbrackright}{\isacharsemicolon}\isanewline
+{\isacharverbatimclose}%
+\endisatagML
+{\isafoldML}%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\begin{isamarkuptext}%
+\noindent Subsequent ML code can now work with the invented
+ names of \verb|x1|, \verb|x2|, \verb|x3|, without
+ depending on the details on the system policy for introducing these
+ variants. Recall that within a proof body the system always invents
+ fresh ``skolem constants'', e.g.\ as follows:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{lemma}\isamarkupfalse%
+\ {\isachardoublequoteopen}PROP\ XXX{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+%
+\isadelimML
+\ \ %
+\endisadelimML
+%
+\isatagML
+\isacommand{ML{\isacharunderscore}prf}\isamarkupfalse%
+\ {\isacharverbatimopen}\isanewline
+\ \ \ \ val\ ctxt{\isadigit{0}}\ {\isacharequal}\ %
+\isaantiq
+context%
+\endisaantiq
+{\isacharsemicolon}\isanewline
+\isanewline
+\ \ \ \ val\ {\isacharparenleft}{\isacharbrackleft}x{\isadigit{1}}{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{1}}{\isacharparenright}\ {\isacharequal}\ ctxt{\isadigit{0}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}add{\isacharunderscore}fixes\ {\isacharbrackleft}{\isachardoublequote}x{\isachardoublequote}{\isacharbrackright}{\isacharsemicolon}\isanewline
+\ \ \ \ val\ {\isacharparenleft}{\isacharbrackleft}x{\isadigit{2}}{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{2}}{\isacharparenright}\ {\isacharequal}\ ctxt{\isadigit{1}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}add{\isacharunderscore}fixes\ {\isacharbrackleft}{\isachardoublequote}x{\isachardoublequote}{\isacharbrackright}{\isacharsemicolon}\isanewline
+\ \ \ \ val\ {\isacharparenleft}{\isacharbrackleft}x{\isadigit{3}}{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{3}}{\isacharparenright}\ {\isacharequal}\ ctxt{\isadigit{2}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}add{\isacharunderscore}fixes\ {\isacharbrackleft}{\isachardoublequote}x{\isachardoublequote}{\isacharbrackright}{\isacharsemicolon}\isanewline
+\isanewline
+\ \ \ \ val\ {\isacharparenleft}{\isacharbrackleft}y{\isadigit{1}}{\isacharcomma}\ y{\isadigit{2}}{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{4}}{\isacharparenright}\ {\isacharequal}\isanewline
+\ \ \ \ \ \ ctxt{\isadigit{3}}\ {\isacharbar}{\isachargreater}\ Variable{\isachardot}variant{\isacharunderscore}fixes\ {\isacharbrackleft}{\isachardoublequote}y{\isachardoublequote}{\isacharcomma}\ {\isachardoublequote}y{\isachardoublequote}{\isacharbrackright}{\isacharsemicolon}\isanewline
+\ \ {\isacharverbatimclose}\isanewline
+\ \ \isacommand{oops}\isamarkupfalse%
+%
+\endisatagML
+{\isafoldML}%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\begin{isamarkuptext}%
+\noindent In this situation \verb|Variable.add_fixes| and \verb|Variable.variant_fixes| are very similar, but identical name
+ proposals given in a row are only accepted by the second version.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
\isamarkupsection{Assumptions \label{sec:assumptions}%
}
\isamarkuptrue%
@@ -192,21 +348,21 @@
context into a (smaller) outer context, by discharging the
difference of the assumptions as specified by the associated export
rules. Note that the discharged portion is determined by the
- difference contexts, not the facts being exported! There is a
+ difference of contexts, not the facts being exported! There is a
separate flag to indicate a goal context, where the result is meant
to refine an enclosing sub-goal of a structured proof state.
\medskip The most basic export rule discharges assumptions directly
by means of the \isa{{\isasymLongrightarrow}} introduction rule:
\[
- \infer[(\isa{{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
+ \infer[(\isa{{\isasymLongrightarrow}{\isasymdash}intro})]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
\]
The variant for goal refinements marks the newly introduced
premises, which causes the canonical Isar goal refinement scheme to
enforce unification with local premises within the goal:
\[
- \infer[(\isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ {\isacharhash}A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
+ \infer[(\isa{{\isacharhash}{\isasymLongrightarrow}{\isasymdash}intro})]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ {\isacharhash}A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
\]
\medskip Alternative versions of assumptions may perform arbitrary
@@ -215,7 +371,7 @@
definition works by fixing \isa{x} and assuming \isa{x\ {\isasymequiv}\ t},
with the following export rule to reverse the effect:
\[
- \infer[(\isa{{\isasymequiv}{\isacharminus}expand})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ x\ {\isasymequiv}\ t\ {\isasymturnstile}\ B\ t}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B\ x}}
+ \infer[(\isa{{\isasymequiv}{\isasymdash}expand})]{\isa{{\isasymGamma}\ {\isacharminus}\ {\isacharparenleft}x\ {\isasymequiv}\ t{\isacharparenright}\ {\isasymturnstile}\ B\ t}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B\ x}}
\]
This works, because the assumption \isa{x\ {\isasymequiv}\ t} was introduced in
a context with \isa{x} being fresh, so \isa{x} does not
@@ -247,8 +403,8 @@
where the \verb|bool| indicates goal mode, and the \verb|cterm list| the collection of assumptions to be discharged
simultaneously.
- \item \verb|Assumption.assume|~\isa{A} turns proposition \isa{A} into a raw assumption \isa{A\ {\isasymturnstile}\ A{\isacharprime}}, where the conclusion
- \isa{A{\isacharprime}} is in HHF normal form.
+ \item \verb|Assumption.assume|~\isa{A} turns proposition \isa{A} into a primitive assumption \isa{A\ {\isasymturnstile}\ A{\isacharprime}}, where the
+ conclusion \isa{A{\isacharprime}} is in HHF normal form.
\item \verb|Assumption.add_assms|~\isa{r\ As} augments the context
by assumptions \isa{As} with export rule \isa{r}. The
@@ -256,7 +412,8 @@
\verb|Assumption.assume|.
\item \verb|Assumption.add_assumes|~\isa{As} is a special case of
- \verb|Assumption.add_assms| where the export rule performs \isa{{\isasymLongrightarrow}{\isacharunderscore}intro} or \isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro}, depending on goal mode.
+ \verb|Assumption.add_assms| where the export rule performs \isa{{\isasymLongrightarrow}{\isasymdash}intro} or \isa{{\isacharhash}{\isasymLongrightarrow}{\isasymdash}intro}, depending on goal
+ mode.
\item \verb|Assumption.export|~\isa{is{\isacharunderscore}goal\ inner\ outer\ thm}
exports result \isa{thm} from the the \isa{inner} context
@@ -276,7 +433,68 @@
%
\endisadelimmlref
%
-\isamarkupsection{Results \label{sec:results}%
+\isadelimmlex
+%
+\endisadelimmlex
+%
+\isatagmlex
+%
+\begin{isamarkuptext}%
+The following example demonstrates how rules can be
+ derived by building up a context of assumptions first, and exporting
+ some local fact afterwards. We refer to \hyperlink{theory.Pure}{\mbox{\isa{Pure}}} equality
+ here for testing purposes.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlex
+{\isafoldmlex}%
+%
+\isadelimmlex
+%
+\endisadelimmlex
+%
+\isadelimML
+%
+\endisadelimML
+%
+\isatagML
+\isacommand{ML}\isamarkupfalse%
+\ {\isacharverbatimopen}\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}static\ compile{\isacharminus}time\ context\ {\isacharminus}{\isacharminus}\ for\ testing\ only{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ ctxt{\isadigit{0}}\ {\isacharequal}\ %
+\isaantiq
+context%
+\endisaantiq
+{\isacharsemicolon}\isanewline
+\isanewline
+\ \ val\ {\isacharparenleft}{\isacharbrackleft}eq{\isacharbrackright}{\isacharcomma}\ ctxt{\isadigit{1}}{\isacharparenright}\ {\isacharequal}\isanewline
+\ \ \ \ ctxt{\isadigit{0}}\ {\isacharbar}{\isachargreater}\ Assumption{\isachardot}add{\isacharunderscore}assumes\ {\isacharbrackleft}%
+\isaantiq
+cprop\ {\isachardoublequote}x\ {\isasymequiv}\ y{\isachardoublequote}%
+\endisaantiq
+{\isacharbrackright}{\isacharsemicolon}\isanewline
+\ \ val\ eq{\isacharprime}\ {\isacharequal}\ Thm{\isachardot}symmetric\ eq{\isacharsemicolon}\isanewline
+\isanewline
+\ \ {\isacharparenleft}{\isacharasterisk}back\ to\ original\ context\ {\isacharminus}{\isacharminus}\ discharges\ assumption{\isacharasterisk}{\isacharparenright}\isanewline
+\ \ val\ r\ {\isacharequal}\ Assumption{\isachardot}export\ false\ ctxt{\isadigit{1}}\ ctxt{\isadigit{0}}\ eq{\isacharprime}{\isacharsemicolon}\isanewline
+{\isacharverbatimclose}%
+\endisatagML
+{\isafoldML}%
+%
+\isadelimML
+%
+\endisadelimML
+%
+\begin{isamarkuptext}%
+\noindent Note that the variables of the resulting rule are
+ not generalized. This would have required to fix them properly in
+ the context beforehand, and export wrt.\ variables afterwards (cf.\
+ \verb|Variable.export| or the combined \verb|ProofContext.export|).%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Structured goals and results \label{sec:struct-goals}%
}
\isamarkuptrue%
%
@@ -296,6 +514,9 @@
the tactic needs to solve the conclusion, but may use the premise as
a local fact, for locally fixed variables.
+ The family of \isa{FOCUS} combinators is similar to \isa{SUBPROOF}, but allows to retain schematic variables and pending
+ subgoals in the resulting goal state.
+
The \isa{prove} operation provides an interface for structured
backwards reasoning under program control, with some explicit sanity
checks of the result. The goal context can be augmented by
@@ -308,7 +529,8 @@
The \isa{obtain} operation produces results by eliminating
existing facts by means of a given tactic. This acts like a dual
conclusion: the proof demonstrates that the context may be augmented
- by certain fixed variables and assumptions. See also
+ by parameters and assumptions, without affecting any conclusions
+ that do not mention these parameters. See also
\cite{isabelle-isar-ref} for the user-level \isa{{\isasymOBTAIN}} and
\isa{{\isasymGUESS}} elements. Final results, which may not refer to
the parameters in the conclusion, need to exported explicitly into
@@ -325,7 +547,11 @@
\begin{isamarkuptext}%
\begin{mldecls}
\indexdef{}{ML}{SUBPROOF}\verb|SUBPROOF: (Subgoal.focus -> tactic) -> Proof.context -> int -> tactic| \\
+ \indexdef{}{ML}{Subgoal.FOCUS}\verb|Subgoal.FOCUS: (Subgoal.focus -> tactic) -> Proof.context -> int -> tactic| \\
+ \indexdef{}{ML}{Subgoal.FOCUS\_PREMS}\verb|Subgoal.FOCUS_PREMS: (Subgoal.focus -> tactic) -> Proof.context -> int -> tactic| \\
+ \indexdef{}{ML}{Subgoal.FOCUS\_PARAMS}\verb|Subgoal.FOCUS_PARAMS: (Subgoal.focus -> tactic) -> Proof.context -> int -> tactic| \\
\end{mldecls}
+
\begin{mldecls}
\indexdef{}{ML}{Goal.prove}\verb|Goal.prove: Proof.context -> string list -> term list -> term ->|\isasep\isanewline%
\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm| \\
@@ -345,6 +571,11 @@
schematic parameters of the goal are imported into the context as
fixed ones, which may not be instantiated in the sub-proof.
+ \item \verb|Subgoal.FOCUS|, \verb|Subgoal.FOCUS_PREMS|, and \verb|Subgoal.FOCUS_PARAMS| are similar to \verb|SUBPROOF|, but are
+ slightly more flexible: only the specified parts of the subgoal are
+ imported into the context, and the body tactic may introduce new
+ subgoals and schematic variables.
+
\item \verb|Goal.prove|~\isa{ctxt\ xs\ As\ C\ tac} states goal \isa{C} in the context augmented by fixed variables \isa{xs} and
assumptions \isa{As}, and applies tactic \isa{tac} to solve
it. The latter may depend on the local assumptions being presented
--- a/doc-src/IsarImplementation/Thy/document/Syntax.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Syntax.tex Tue Feb 09 17:06:05 2010 +0100
@@ -18,7 +18,7 @@
%
\endisadelimtheory
%
-\isamarkupchapter{Syntax and type-checking%
+\isamarkupchapter{Concrete syntax and type-checking%
}
\isamarkuptrue%
%
@@ -27,6 +27,19 @@
\end{isamarkuptext}%
\isamarkuptrue%
%
+\isamarkupsection{Parsing and printing%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Checking and unchecking \label{sec:term-check}%
+}
+\isamarkuptrue%
+%
\isadelimtheory
%
\endisadelimtheory
--- a/doc-src/IsarImplementation/Thy/document/Tactic.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/Thy/document/Tactic.tex Tue Feb 09 17:06:05 2010 +0100
@@ -23,7 +23,7 @@
\isamarkuptrue%
%
\begin{isamarkuptext}%
-Tactical reasoning works by refining the initial claim in a
+Tactical reasoning works by refining an initial claim in a
backwards fashion, until a solved form is reached. A \isa{goal}
consists of several subgoals that need to be solved in order to
achieve the main statement; zero subgoals means that the proof may
@@ -54,8 +54,8 @@
nesting rarely exceeds 1--2 in practice.
The main conclusion \isa{C} is internally marked as a protected
- proposition, which is represented explicitly by the notation \isa{{\isacharhash}C}. This ensures that the decomposition into subgoals and main
- conclusion is well-defined for arbitrarily structured claims.
+ proposition, which is represented explicitly by the notation \isa{{\isacharhash}C} here. This ensures that the decomposition into subgoals and
+ main conclusion is well-defined for arbitrarily structured claims.
\medskip Basic goal management is performed via the following
Isabelle/Pure rules:
@@ -129,26 +129,27 @@
supporting memoing.\footnote{The lack of memoing and the strict
nature of SML requires some care when working with low-level
sequence operations, to avoid duplicate or premature evaluation of
- results.}
+ results. It also means that modified runtime behavior, such as
+ timeout, is very hard to achieve for general tactics.}
An \emph{empty result sequence} means that the tactic has failed: in
- a compound tactic expressions other tactics might be tried instead,
+ a compound tactic expression other tactics might be tried instead,
or the whole refinement step might fail outright, producing a
- toplevel error message. When implementing tactics from scratch, one
- should take care to observe the basic protocol of mapping regular
- error conditions to an empty result; only serious faults should
- emerge as exceptions.
+ toplevel error message in the end. When implementing tactics from
+ scratch, one should take care to observe the basic protocol of
+ mapping regular error conditions to an empty result; only serious
+ faults should emerge as exceptions.
By enumerating \emph{multiple results}, a tactic can easily express
the potential outcome of an internal search process. There are also
combinators for building proof tools that involve search
systematically, see also \secref{sec:tacticals}.
- \medskip As explained in \secref{sec:tactical-goals}, a goal state
- essentially consists of a list of subgoals that imply the main goal
- (conclusion). Tactics may operate on all subgoals or on a
- particularly specified subgoal, but must not change the main
- conclusion (apart from instantiating schematic goal variables).
+ \medskip As explained before, a goal state essentially consists of a
+ list of subgoals that imply the main goal (conclusion). Tactics may
+ operate on all subgoals or on a particularly specified subgoal, but
+ must not change the main conclusion (apart from instantiating
+ schematic goal variables).
Tactics with explicit \emph{subgoal addressing} are of the form
\isa{int\ {\isasymrightarrow}\ tactic} and may be applied to a particular subgoal
@@ -170,7 +171,7 @@
Tactics with internal subgoal addressing should expose the subgoal
index as \isa{int} argument in full generality; a hardwired
- subgoal 1 inappropriate.
+ subgoal 1 is not acceptable.
\medskip The main well-formedness conditions for proper tactics are
summarized as follows.
@@ -192,11 +193,11 @@
\end{itemize}
Some of these conditions are checked by higher-level goal
- infrastructure (\secref{sec:results}); others are not checked
+ infrastructure (\secref{sec:struct-goals}); others are not checked
explicitly, and violating them merely results in ill-behaved tactics
experienced by the user (e.g.\ tactics that insist in being
- applicable only to singleton goals, or disallow composition with
- basic tacticals).%
+ applicable only to singleton goals, or prevent composition via
+ standard tacticals).%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -414,7 +415,7 @@
Type instantiations may be given as well, via pairs like \isa{{\isacharparenleft}{\isacharquery}{\isacharprime}a{\isacharcomma}\ {\isasymtau}{\isacharparenright}}. Type instantiations are distinguished from term
instantiations by the syntactic form of the schematic variable.
Types are instantiated before terms are. Since term instantiation
- already performs type-inference as expected, explicit type
+ already performs simple type-inference, so explicit type
instantiations are seldom necessary.%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -452,7 +453,13 @@
\item \verb|rename_tac|~\isa{names\ i} renames the innermost
parameters of subgoal \isa{i} according to the provided \isa{names} (which need to be distinct indentifiers).
- \end{description}%
+ \end{description}
+
+ For historical reasons, the above instantiation tactics take
+ unparsed string arguments, which makes them hard to use in general
+ ML code. The slightly more advanced \verb|Subgoal.FOCUS| combinator
+ of \secref{sec:struct-goals} allows to refer to internal goal
+ structure with explicit context management.%
\end{isamarkuptext}%
\isamarkuptrue%
%
--- a/doc-src/IsarImplementation/implementation.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/implementation.tex Tue Feb 09 17:06:05 2010 +0100
@@ -25,7 +25,7 @@
\begin{document}
-\maketitle
+\maketitle
\begin{abstract}
We describe the key concepts underlying the Isabelle/Isar
@@ -37,7 +37,7 @@
\vspace*{2.5cm}
\begin{quote}
-
+
{\small\em Isabelle was not designed; it evolved. Not everyone
likes this idea. Specification experts rightly abhor
trial-and-error programming. They suggest that no one should
@@ -45,17 +45,28 @@
specification. But university departments are not software houses.
Programs like Isabelle are not products: when they have served
their purpose, they are discarded.}
-
+
Lawrence C. Paulson, ``Isabelle: The Next 700 Theorem Provers''
\vspace*{1cm}
-
+
{\small\em As I did 20 years ago, I still fervently believe that the
only way to make software secure, reliable, and fast is to make it
small. Fight features.}
-
+
Andrew S. Tanenbaum
+ \vspace*{1cm}
+
+ {\small\em One thing that UNIX does not need is more features. It is
+ successful in part because it has a small number of good ideas
+ that work well together. Merely adding features does not make it
+ easier for users to do things --- it just makes the manual
+ thicker. The right solution in the right place is always more
+ effective than haphazard hacking.}
+
+ Rob Pike and Brian W. Kernighan
+
\end{quote}
\thispagestyle{empty}\clearpage
@@ -89,7 +100,7 @@
\end{document}
-%%% Local Variables:
+%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
-%%% End:
+%%% End:
--- a/doc-src/IsarImplementation/style.sty Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/IsarImplementation/style.sty Tue Feb 09 17:06:05 2010 +0100
@@ -19,7 +19,6 @@
\pagestyle{headings}
\sloppy
\binperiod
-\underscoreon
\renewcommand{\isadigit}[1]{\isamath{#1}}
@@ -30,6 +29,13 @@
\renewcommand{\isatagmlref}{\subsection*{\makebox[0pt][r]{\fbox{\ML}~~}Reference}\begingroup\def\isastyletext{\rm}\small}
\renewcommand{\endisatagmlref}{\endgroup}
+\isakeeptag{mlex}
+\renewcommand{\isatagmlex}{\subsection*{\makebox[0pt][r]{\fbox{\ML}~~}Examples}}
+\renewcommand{\endisatagmlex}{}
+
+\renewcommand{\isatagML}{\begingroup\isabellestyle{default}\isastyle\def\isadigit##1{##1}}
+\renewcommand{\endisatagML}{\endgroup}
+
\newcommand{\minorcmd}[1]{{\sf #1}}
\newcommand{\isasymtype}{\minorcmd{type}}
\newcommand{\isasymval}{\minorcmd{val}}
@@ -49,9 +55,7 @@
\newcommand{\isasymDEFINITION}{\isakeyword{definition}}
\isabellestyle{it}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "implementation"
-%%% End:
+\underscoreon
+\renewcommand{\isacharunderscore}{\_}
+\renewcommand{\isacharunderscorekeyword}{\_}
+\newcommand{\isasymdash}{\mbox{-}}
--- a/doc-src/Main/Docs/Main_Doc.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/Main/Docs/Main_Doc.thy Tue Feb 09 17:06:05 2010 +0100
@@ -48,8 +48,8 @@
\smallskip
\begin{supertabular}{@ {} l @ {~::~} l l @ {}}
-@{const HOL.less_eq} & @{typeof HOL.less_eq} & (\verb$<=$)\\
-@{const HOL.less} & @{typeof HOL.less}\\
+@{const Algebras.less_eq} & @{typeof Algebras.less_eq} & (\verb$<=$)\\
+@{const Algebras.less} & @{typeof Algebras.less}\\
@{const Orderings.Least} & @{typeof Orderings.Least}\\
@{const Orderings.min} & @{typeof Orderings.min}\\
@{const Orderings.max} & @{typeof Orderings.max}\\
@@ -297,7 +297,7 @@
\section{Algebra}
-Theories @{theory OrderedGroup}, @{theory Ring_and_Field} and @{theory
+Theories @{theory Groups}, @{theory Rings}, @{theory Fields} and @{theory
Divides} define a large collection of classes describing common algebraic
structures from semigroups up to fields. Everything is done in terms of
overloaded operators:
--- a/doc-src/Main/Docs/document/Main_Doc.tex Tue Feb 09 16:07:51 2010 +0100
+++ b/doc-src/Main/Docs/document/Main_Doc.tex Tue Feb 09 17:06:05 2010 +0100
@@ -308,7 +308,7 @@
\section{Algebra}
-Theories \isa{OrderedGroup}, \isa{Ring{\isacharunderscore}and{\isacharunderscore}Field} and \isa{Divides} define a large collection of classes describing common algebraic
+Theories \isa{Groups}, \isa{Rings}, \isa{Fields} and \isa{Divides} define a large collection of classes describing common algebraic
structures from semigroups up to fields. Everything is done in terms of
overloaded operators:
--- a/etc/settings Tue Feb 09 16:07:51 2010 +0100
+++ b/etc/settings Tue Feb 09 17:06:05 2010 +0100
@@ -50,12 +50,6 @@
#ML_PLATFORM=$(eval $("$ML_HOME/.arch-n-opsys" 2>/dev/null); echo "$HEAP_SUFFIX")
#SMLNJ_CYGWIN_RUNTIME=1
-# Moscow ML 2.00 (experimental!)
-#ML_SYSTEM=mosml
-#ML_HOME="/usr/local/mosml/bin"
-#ML_OPTIONS=""
-#ML_PLATFORM=""
-
###
### JVM components (Scala or Java)
--- a/lib/Tools/keywords Tue Feb 09 16:07:51 2010 +0100
+++ b/lib/Tools/keywords Tue Feb 09 17:06:05 2010 +0100
@@ -66,5 +66,4 @@
gzip -dc "$LOG"
fi
echo
-done | \
-perl -w "$ISABELLE_HOME/lib/scripts/keywords.pl" "$KEYWORDS_NAME" "$SESSIONS"
+done | "$ISABELLE_HOME/lib/scripts/keywords" "$KEYWORDS_NAME" "$SESSIONS"
--- a/lib/Tools/unsymbolize Tue Feb 09 16:07:51 2010 +0100
+++ b/lib/Tools/unsymbolize Tue Feb 09 17:06:05 2010 +0100
@@ -34,4 +34,4 @@
## main
find $SPECS \( -name \*.ML -o -name \*.thy \) -print | \
- xargs perl -w "$ISABELLE_HOME/lib/scripts/unsymbolize.pl"
+ xargs "$ISABELLE_HOME/lib/scripts/unsymbolize"
--- a/lib/Tools/yxml Tue Feb 09 16:07:51 2010 +0100
+++ b/lib/Tools/yxml Tue Feb 09 17:06:05 2010 +0100
@@ -31,4 +31,4 @@
## main
-exec perl -w "$ISABELLE_HOME/lib/scripts/yxml.pl"
+exec "$ISABELLE_HOME/lib/scripts/yxml"
--- a/lib/fonts/IsabelleText.sfd Tue Feb 09 16:07:51 2010 +0100
+++ b/lib/fonts/IsabelleText.sfd Tue Feb 09 17:06:05 2010 +0100
@@ -13,13 +13,14 @@
LayerCount: 2
Layer: 0 1 "Back" 1
Layer: 1 1 "Fore" 0
+NeedsXUIDChange: 1
XUID: [1021 906 1711068302 4288927]
FSType: 4
OS2Version: 1
OS2_WeightWidthSlopeOnly: 0
OS2_UseTypoMetrics: 1
CreationTime: 1050361371
-ModificationTime: 1263327886
+ModificationTime: 1265397211
PfmFamily: 17
TTFWeight: 400
TTFWidth: 5
@@ -2240,7 +2241,7 @@
DisplaySize: -48
AntiAlias: 1
FitToEm: 1
-WinInfo: 160 16 10
+WinInfo: 8800 16 10
TeXData: 1 0 0 631296 315648 210432 572416 -1048576 210432 783286 444596 497025 792723 393216 433062 380633 303038 157286 324010 404750 52429 2506097 1059062 262144
BeginChars: 1114189 648
@@ -13869,61 +13870,67 @@
LayerCount: 2
Fore
SplineSet
-1401 1505 m 6,0,-1
- 402 -440 l 2,1,2
- 382 -480 382 -480 353 -480 c 0,3,4
- 332 -480 332 -480 320.5 -465 c 128,-1,5
- 309 -450 309 -450 309 -435 c 0,6,7
- 309 -426 309 -426 324 -395 c 2,8,-1
- 1323 1550 l 2,9,10
- 1343 1590 1343 1590 1372 1590 c 0,11,12
- 1393 1590 1393 1590 1404.5 1575 c 128,-1,13
- 1416 1560 1416 1560 1416 1545 c 0,14,15
- 1417 1536 1417 1536 1401 1505 c 6,0,-1
-1525 726 m 2,16,-1
- 200 726 l 2,17,18
- 186 726 186 726 178.5 726.5 c 128,-1,19
- 171 727 171 727 159 729 c 128,-1,20
- 147 731 147 731 140.5 735.5 c 128,-1,21
- 134 740 134 740 129 749 c 128,-1,22
- 124 758 124 758 124 770 c 0,23,24
- 124 786 124 786 130.5 796 c 128,-1,25
- 137 806 137 806 150 809.5 c 128,-1,26
- 163 813 163 813 172 814 c 128,-1,27
- 181 815 181 815 198 815 c 2,28,-1
- 1527 815 l 2,29,30
- 1543 815 1543 815 1552.5 814 c 128,-1,31
- 1562 813 1562 813 1575 809.5 c 128,-1,32
- 1588 806 1588 806 1594.5 796 c 128,-1,33
- 1601 786 1601 786 1601 770 c 0,34,35
- 1601 757 1601 757 1595.5 748.5 c 128,-1,36
- 1590 740 1590 740 1584 735.5 c 128,-1,37
- 1578 731 1578 731 1566 729 c 128,-1,38
- 1554 727 1554 727 1546.5 726.5 c 128,-1,39
- 1539 726 1539 726 1525 726 c 2,16,-1
-1527 295 m 2,40,-1
- 198 295 l 2,41,42
- 182 295 182 295 172.5 296 c 128,-1,43
- 163 297 163 297 150 300.5 c 128,-1,44
- 137 304 137 304 130.5 314 c 128,-1,45
- 124 324 124 324 124 340 c 0,46,47
- 124 353 124 353 129 361.5 c 128,-1,48
- 134 370 134 370 140.5 374.5 c 128,-1,49
- 147 379 147 379 159 381 c 128,-1,50
- 171 383 171 383 178.5 383.5 c 128,-1,51
- 186 384 186 384 200 384 c 2,52,-1
- 1525 384 l 2,53,54
- 1539 384 1539 384 1546.5 383.5 c 128,-1,55
- 1554 383 1554 383 1566 381 c 128,-1,56
- 1578 379 1578 379 1584 374.5 c 128,-1,57
- 1590 370 1590 370 1595.5 361 c 128,-1,58
- 1601 352 1601 352 1601 340 c 0,59,60
- 1601 324 1601 324 1594.5 314 c 128,-1,61
- 1588 304 1588 304 1575 300.5 c 128,-1,62
- 1562 297 1562 297 1552.5 296 c 128,-1,63
- 1543 295 1543 295 1527 295 c 2,40,-1
-EndSplineSet
-Validated: 5
+945 815 m 1,0,-1
+ 1323 1550 l 2,1,2
+ 1343 1590 1343 1590 1372 1590 c 0,3,4
+ 1393 1590 1393 1590 1404.5 1575 c 128,-1,5
+ 1416 1560 1416 1560 1416 1545 c 1,6,7
+ 1417 1536 1417 1536 1401 1505 c 2,8,-1
+ 1047 815 l 1,9,-1
+ 1527 815 l 2,10,11
+ 1543 815 1543 815 1552.5 814 c 128,-1,12
+ 1562 813 1562 813 1575 809.5 c 128,-1,13
+ 1588 806 1588 806 1594.5 796 c 128,-1,14
+ 1601 786 1601 786 1601 770 c 0,15,16
+ 1601 757 1601 757 1595.5 748.5 c 128,-1,17
+ 1590 740 1590 740 1584 735.5 c 128,-1,18
+ 1578 731 1578 731 1566 729 c 128,-1,19
+ 1554 727 1554 727 1546.5 726.5 c 128,-1,20
+ 1539 726 1539 726 1525 726 c 2,21,-1
+ 1001 726 l 1,22,-1
+ 825 384 l 1,23,-1
+ 1525 384 l 2,24,25
+ 1539 384 1539 384 1546.5 383.5 c 128,-1,26
+ 1554 383 1554 383 1566 381 c 128,-1,27
+ 1578 379 1578 379 1584 374.5 c 128,-1,28
+ 1590 370 1590 370 1595.5 361 c 128,-1,29
+ 1601 352 1601 352 1601 340 c 0,30,31
+ 1601 324 1601 324 1594.5 314 c 128,-1,32
+ 1588 304 1588 304 1575 300.5 c 128,-1,33
+ 1562 297 1562 297 1552.5 296 c 128,-1,34
+ 1543 295 1543 295 1527 295 c 2,35,-1
+ 780 295 l 1,36,-1
+ 402 -440 l 2,37,38
+ 382 -480 382 -480 353 -480 c 0,39,40
+ 332 -480 332 -480 320.5 -465 c 128,-1,41
+ 309 -450 309 -450 309 -435 c 0,42,43
+ 309 -426 309 -426 324 -395 c 2,44,-1
+ 678 295 l 1,45,-1
+ 198 295 l 2,46,47
+ 182 295 182 295 172.5 296 c 128,-1,48
+ 163 297 163 297 150 300.5 c 128,-1,49
+ 137 304 137 304 130.5 314 c 128,-1,50
+ 124 324 124 324 124 340 c 0,51,52
+ 124 353 124 353 129 361.5 c 128,-1,53
+ 134 370 134 370 140.5 374.5 c 128,-1,54
+ 147 379 147 379 159 381 c 128,-1,55
+ 171 383 171 383 178.5 383.5 c 128,-1,56
+ 186 384 186 384 200 384 c 2,57,-1
+ 724 384 l 1,58,-1
+ 900 726 l 1,59,-1
+ 200 726 l 2,60,61
+ 186 726 186 726 178.5 726.5 c 128,-1,62
+ 171 727 171 727 159 729 c 128,-1,63
+ 147 731 147 731 140.5 735.5 c 128,-1,64
+ 134 740 134 740 129 749 c 128,-1,65
+ 124 758 124 758 124 770 c 0,66,67
+ 124 786 124 786 130 796 c 0,68,69
+ 137 806 137 806 150 809.5 c 128,-1,70
+ 163 813 163 813 172 814 c 128,-1,71
+ 181 815 181 815 198 815 c 2,72,-1
+ 945 815 l 1,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: AE
@@ -14371,6 +14378,7 @@
1544 695 1544 695 1527 695 c 2,55,-1
908 695 l 1,0,-1
EndSplineSet
+Validated: 1
EndChar
StartChar: lessequal
@@ -17238,6 +17246,7 @@
163 598 163 598 172.5 599 c 128,-1,43
182 600 182 600 198 600 c 2,24,-1
EndSplineSet
+Validated: 33
EndChar
StartChar: lozenge
@@ -17930,6 +17939,7 @@
357 672 357 672 391.5 637.5 c 128,-1,0
426 603 426 603 426 555 c 128,-1,1
EndSplineSet
+Validated: 33
EndChar
StartChar: quotesinglbase
@@ -18068,7 +18078,7 @@
2098 665 2098 665 2080 634.5 c 128,-1,13
2062 604 2062 604 2049 563.5 c 128,-1,14
2036 523 2036 523 2027.5 460.5 c 128,-1,15
- 2019 398 2019 398 2019 322 c 0,16,17
+ 2019 398 2019 398 2019 322 c 128,-1,17
2019 246 2019 246 2027.5 184 c 128,-1,18
2036 122 2036 122 2049 81.5 c 128,-1,19
2062 41 2062 41 2080.5 11 c 128,-1,20
@@ -18145,7 +18155,7 @@
353 1560 353 1560 335 1529.5 c 128,-1,110
317 1499 317 1499 304 1458.5 c 128,-1,111
291 1418 291 1418 282.5 1355.5 c 128,-1,112
- 274 1293 274 1293 274 1217 c 0,113,114
+ 274 1293 274 1293 274 1217 c 128,-1,114
274 1141 274 1141 282.5 1079 c 128,-1,115
291 1017 291 1017 304 976.5 c 128,-1,116
317 936 317 936 335.5 905.5 c 128,-1,117
@@ -18166,7 +18176,7 @@
1337 665 1337 665 1318.5 634.5 c 128,-1,134
1300 604 1300 604 1287 563.5 c 128,-1,135
1274 523 1274 523 1266 460.5 c 128,-1,136
- 1258 398 1258 398 1258 322 c 0,137,138
+ 1258 398 1258 398 1258 322 c 128,-1,138
1258 246 1258 246 1266 184 c 128,-1,139
1274 122 1274 122 1287.5 81.5 c 128,-1,140
1301 41 1301 41 1319 11 c 128,-1,141
@@ -20091,6 +20101,7 @@
1389 1018 1389 1018 1370 999 c 2,41,-1
928 555 l 1,0,-1
EndSplineSet
+Validated: 1
EndChar
StartChar: uni00B9
@@ -28736,75 +28747,63 @@
LayerCount: 2
Fore
SplineSet
-213 511 m 5,0,-1
- 213 215 l 2,1,2
- 213 199 213 199 212 190 c 128,-1,3
- 211 181 211 181 207.5 168 c 128,-1,4
- 204 155 204 155 194 148.5 c 128,-1,5
- 184 142 184 142 168.5 142 c 128,-1,6
- 153 142 153 142 143 148.5 c 128,-1,7
- 133 155 133 155 129.5 168 c 128,-1,8
- 126 181 126 181 125 190 c 128,-1,9
- 124 199 124 199 124 215 c 2,10,-1
- 124 895 l 2,11,12
- 124 911 124 911 125 920 c 128,-1,13
- 126 929 126 929 129.5 942 c 128,-1,14
- 133 955 133 955 143 961.5 c 128,-1,15
- 153 968 153 968 168.5 968 c 128,-1,16
- 184 968 184 968 194 961.5 c 128,-1,17
- 204 955 204 955 207.5 942 c 128,-1,18
- 211 929 211 929 212 920 c 128,-1,19
- 213 911 213 911 213 895 c 2,20,-1
- 213 599 l 1,21,22
- 275 599 275 599 275 555 c 128,-1,23
- 275 511 275 511 213 511 c 5,0,-1
-1905 511 m 5,24,-1
- 198 511 l 2,25,26
- 184 511 184 511 177 511 c 128,-1,27
- 170 511 170 511 158 513.5 c 128,-1,28
- 146 516 146 516 140 520.5 c 128,-1,29
- 134 525 134 525 129 533.5 c 128,-1,30
- 124 542 124 542 124 555 c 128,-1,31
- 124 568 124 568 129 576.5 c 128,-1,32
- 134 585 134 585 140 589.5 c 128,-1,33
- 146 594 146 594 158 596.5 c 128,-1,34
- 170 599 170 599 177 599 c 128,-1,35
- 184 599 184 599 198 599 c 2,36,-1
- 1905 599 l 1,37,38
- 1847 641 1847 641 1798 698 c 128,-1,39
- 1749 755 1749 755 1722 802 c 128,-1,40
- 1695 849 1695 849 1680 882 c 128,-1,41
- 1665 915 1665 915 1665 924 c 0,42,43
- 1665 938 1665 938 1675 944 c 128,-1,44
- 1685 950 1685 950 1698 950 c 0,45,46
- 1715 950 1715 950 1721.5 943.5 c 128,-1,47
- 1728 937 1728 937 1736 919 c 0,48,49
- 1769 846 1769 846 1809.5 788.5 c 128,-1,50
- 1850 731 1850 731 1896.5 691.5 c 128,-1,51
- 1943 652 1943 652 1983.5 626.5 c 128,-1,52
- 2024 601 2024 601 2076 577 c 1,53,54
- 2079 577 2079 577 2085 571 c 128,-1,55
- 2091 565 2091 565 2091 555 c 0,56,57
- 2091 544 2091 544 2086 539 c 128,-1,58
- 2081 534 2081 534 2060 524 c 0,59,60
- 2014 502 2014 502 1973.5 475.5 c 128,-1,61
- 1933 449 1933 449 1904 424.5 c 128,-1,62
- 1875 400 1875 400 1848.5 369 c 128,-1,63
- 1822 338 1822 338 1806.5 316 c 128,-1,64
- 1791 294 1791 294 1774.5 264 c 128,-1,65
- 1758 234 1758 234 1751 220 c 128,-1,66
- 1744 206 1744 206 1734 182 c 0,67,68
- 1730 174 1730 174 1728.5 171.5 c 128,-1,69
- 1727 169 1727 169 1719 164.5 c 128,-1,70
- 1711 160 1711 160 1698 160 c 0,71,72
- 1684 160 1684 160 1674.5 166.5 c 128,-1,73
- 1665 173 1665 173 1665 186 c 0,74,75
- 1665 195 1665 195 1680 228 c 128,-1,76
- 1695 261 1695 261 1722 308 c 128,-1,77
- 1749 355 1749 355 1798 412 c 128,-1,78
- 1847 469 1847 469 1905 511 c 5,24,-1
-EndSplineSet
-Validated: 5
+213 599 m 1,0,-1
+ 1905 599 l 1,1,2
+ 1847 641 1847 641 1798 698 c 128,-1,3
+ 1749 755 1749 755 1722 802 c 128,-1,4
+ 1695 849 1695 849 1680 882 c 128,-1,5
+ 1665 915 1665 915 1665 924 c 0,6,7
+ 1665 938 1665 938 1675 944 c 128,-1,8
+ 1685 950 1685 950 1698 950 c 0,9,10
+ 1715 950 1715 950 1722 944 c 0,11,12
+ 1728 937 1728 937 1736 919 c 0,13,14
+ 1767 848 1767 848 1810 788 c 0,15,16
+ 1850 731 1850 731 1896.5 691.5 c 128,-1,17
+ 1943 652 1943 652 1983.5 626.5 c 128,-1,18
+ 2024 601 2024 601 2076 577 c 1,19,20
+ 2079 577 2079 577 2085 571 c 128,-1,21
+ 2091 565 2091 565 2091 555 c 0,22,23
+ 2091 544 2091 544 2086 539 c 128,-1,24
+ 2081 534 2081 534 2060 524 c 0,25,26
+ 2014 502 2014 502 1973.5 475.5 c 128,-1,27
+ 1933 449 1933 449 1904 424.5 c 128,-1,28
+ 1875 400 1875 400 1848.5 369 c 128,-1,29
+ 1822 338 1822 338 1806.5 316 c 128,-1,30
+ 1791 294 1791 294 1774.5 264 c 128,-1,31
+ 1758 234 1758 234 1751 220 c 128,-1,32
+ 1744 206 1744 206 1734 182 c 0,33,34
+ 1730 174 1730 174 1728.5 171.5 c 128,-1,35
+ 1727 169 1727 169 1719 164.5 c 128,-1,36
+ 1711 160 1711 160 1698 160 c 0,37,38
+ 1684 160 1684 160 1674 166 c 0,39,40
+ 1665 173 1665 173 1665 186 c 0,41,42
+ 1665 195 1665 195 1680 228 c 128,-1,43
+ 1695 261 1695 261 1722 308 c 128,-1,44
+ 1749 355 1749 355 1798 412 c 128,-1,45
+ 1847 469 1847 469 1905 511 c 1,46,-1
+ 213 511 l 1,47,-1
+ 213 215 l 2,48,49
+ 213 199 213 199 212 190 c 128,-1,50
+ 211 181 211 181 207.5 168 c 128,-1,51
+ 204 155 204 155 194 148.5 c 128,-1,52
+ 184 142 184 142 168.5 142 c 128,-1,53
+ 153 142 153 142 143 148.5 c 128,-1,54
+ 133 155 133 155 129.5 168 c 128,-1,55
+ 126 181 126 181 125 190 c 128,-1,56
+ 124 199 124 199 124 215 c 2,57,-1
+ 124 555 l 1,58,-1
+ 124 895 l 2,59,60
+ 124 911 124 911 125 920 c 128,-1,61
+ 126 929 126 929 129.5 942 c 128,-1,62
+ 133 955 133 955 143 961.5 c 128,-1,63
+ 153 968 153 968 168.5 968 c 128,-1,64
+ 184 968 184 968 194 961.5 c 128,-1,65
+ 204 955 204 955 207.5 942 c 128,-1,66
+ 211 929 211 929 212 920 c 128,-1,67
+ 213 911 213 911 213 895 c 2,68,-1
+ 213 599 l 1,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni21A9
@@ -28817,78 +28816,68 @@
LayerCount: 2
Fore
SplineSet
-2045 511 m 6,0,-1
- 337 511 l 1,1,2
- 395 469 395 469 444.5 412 c 128,-1,3
- 494 355 494 355 521 308 c 128,-1,4
- 548 261 548 261 562.5 228 c 128,-1,5
- 577 195 577 195 577 186 c 0,6,7
- 577 172 577 172 567 166 c 128,-1,8
- 557 160 557 160 544 160 c 0,9,10
- 527 160 527 160 520.5 166.5 c 128,-1,11
- 514 173 514 173 506 191 c 0,12,13
- 455 301 455 301 380 383.5 c 128,-1,14
- 305 466 305 466 178 526 c 0,15,16
- 172 529 172 529 167 532 c 128,-1,17
- 162 535 162 535 158.5 538 c 128,-1,18
- 155 541 155 541 153 545 c 128,-1,19
- 151 549 151 549 151 555 c 0,20,21
- 151 561 151 561 151.5 564 c 128,-1,22
- 152 567 152 567 158 571.5 c 128,-1,23
- 164 576 164 576 166 577 c 128,-1,24
- 168 578 168 578 182 586 c 0,25,26
- 245 616 245 616 298 655.5 c 128,-1,27
- 351 695 351 695 383 728.5 c 128,-1,28
- 415 762 415 762 443 805 c 128,-1,29
- 471 848 471 848 482.5 871.5 c 128,-1,30
- 494 895 494 895 508 928 c 0,31,32
- 512 936 512 936 514 938.5 c 128,-1,33
- 516 941 516 941 524 945.5 c 128,-1,34
- 532 950 532 950 544 950 c 0,35,36
- 558 950 558 950 567.5 943.5 c 128,-1,37
- 577 937 577 937 577 924 c 0,38,39
- 577 915 577 915 562.5 882 c 128,-1,40
- 548 849 548 849 521 802 c 128,-1,41
- 494 755 494 755 444.5 698.5 c 128,-1,42
- 395 642 395 642 337 599 c 1,43,-1
- 2045 599 l 2,44,45
- 2059 599 2059 599 2066 599 c 128,-1,46
- 2073 599 2073 599 2084.5 596.5 c 128,-1,47
- 2096 594 2096 594 2102 589.5 c 128,-1,48
- 2108 585 2108 585 2113 576.5 c 128,-1,49
- 2118 568 2118 568 2118 555 c 128,-1,50
- 2118 542 2118 542 2113 533.5 c 128,-1,51
- 2108 525 2108 525 2102 520.5 c 128,-1,52
- 2096 516 2096 516 2084.5 513.5 c 128,-1,53
- 2073 511 2073 511 2066 511 c 128,-1,54
- 2059 511 2059 511 2045 511 c 6,0,-1
-2402 770.5 m 132,-1,56
- 2402 720 2402 720 2382 676.5 c 128,-1,57
- 2362 633 2362 633 2329.5 603 c 128,-1,58
- 2297 573 2297 573 2255.5 551.5 c 128,-1,59
- 2214 530 2214 530 2171 520.5 c 128,-1,60
- 2128 511 2128 511 2087 511 c 0,61,62
- 2031 511 2031 511 2031 555 c 0,63,64
- 2031 558 2031 558 2032 563 c 128,-1,65
- 2033 568 2033 568 2037 577 c 128,-1,66
- 2041 586 2041 586 2051.5 592.5 c 128,-1,67
- 2062 599 2062 599 2078 599 c 0,68,69
- 2194 604 2194 604 2253.5 653.5 c 128,-1,70
- 2313 703 2313 703 2313 770 c 0,71,72
- 2313 798 2313 798 2302 824.5 c 128,-1,73
- 2291 851 2291 851 2266 877 c 128,-1,74
- 2241 903 2241 903 2192 920.5 c 128,-1,75
- 2143 938 2143 938 2076 941 c 0,76,77
- 2054 943 2054 943 2042.5 956 c 128,-1,78
- 2031 969 2031 969 2031 986 c 0,79,80
- 2031 1030 2031 1030 2087 1030 c 0,81,82
- 2128 1030 2128 1030 2171 1020 c 128,-1,83
- 2214 1010 2214 1010 2255.5 989 c 128,-1,84
- 2297 968 2297 968 2329.5 938 c 128,-1,85
- 2362 908 2362 908 2382 864.5 c 128,-1,55
- 2402 821 2402 821 2402 770.5 c 132,-1,56
-EndSplineSet
-Validated: 5
+2402 770.5 m 128,-1,1
+ 2402 720 2402 720 2382 676.5 c 128,-1,2
+ 2362 633 2362 633 2329.5 603 c 128,-1,3
+ 2297 573 2297 573 2256 552 c 0,4,5
+ 2211 528 2211 528 2171 520 c 0,6,7
+ 2126 511 2126 511 2087 511 c 0,8,9
+ 2080 511 2080 511 2074 512 c 1,10,11
+ 2070 511 2070 511 2066 511 c 2,12,-1
+ 2045 511 l 1,13,-1
+ 337 511 l 1,14,15
+ 395 469 395 469 444.5 412 c 128,-1,16
+ 494 355 494 355 521 308 c 128,-1,17
+ 548 261 548 261 562.5 228 c 128,-1,18
+ 577 195 577 195 577 186 c 0,19,20
+ 577 172 577 172 567 166 c 128,-1,21
+ 557 160 557 160 544 160 c 0,22,23
+ 527 160 527 160 520.5 166.5 c 128,-1,24
+ 514 173 514 173 506 191 c 0,25,26
+ 455 301 455 301 380 383.5 c 128,-1,27
+ 305 466 305 466 178 526 c 0,28,29
+ 172 529 172 529 167 532 c 128,-1,30
+ 162 535 162 535 158.5 538 c 128,-1,31
+ 155 541 155 541 153 545 c 128,-1,32
+ 151 549 151 549 151 555 c 128,-1,33
+ 151 561 151 561 151.5 564 c 128,-1,34
+ 152 567 152 567 158 571.5 c 128,-1,35
+ 164 576 164 576 166 577 c 128,-1,36
+ 168 578 168 578 182 586 c 1,37,38
+ 245 616 245 616 298 655.5 c 128,-1,39
+ 351 695 351 695 383 728 c 0,40,41
+ 415 762 415 762 443 805 c 128,-1,42
+ 471 848 471 848 482.5 871.5 c 128,-1,43
+ 494 895 494 895 508 928 c 0,44,45
+ 512 936 512 936 514 938.5 c 128,-1,46
+ 516 941 516 941 524 945.5 c 128,-1,47
+ 532 950 532 950 544 950 c 0,48,49
+ 558 950 558 950 567.5 943.5 c 128,-1,50
+ 577 937 577 937 577 924 c 0,51,52
+ 577 915 577 915 562.5 882 c 128,-1,53
+ 548 849 548 849 521 802 c 128,-1,54
+ 494 755 494 755 444.5 698.5 c 128,-1,55
+ 395 642 395 642 337 599 c 1,56,-1
+ 2045 599 l 1,57,-1
+ 2066 599 l 2,58,59
+ 2068 599 2068 599 2071 599 c 128,-1,60
+ 2074 599 2074 599 2078 599 c 0,61,62
+ 2188 599 2188 599 2254 654 c 0,63,64
+ 2313 703 2313 703 2313 770 c 0,65,66
+ 2313 798 2313 798 2302 824.5 c 128,-1,67
+ 2291 851 2291 851 2266 877 c 128,-1,68
+ 2241 903 2241 903 2192 920.5 c 128,-1,69
+ 2143 938 2143 938 2076 941 c 1,70,71
+ 2054 943 2054 943 2042 956 c 0,72,73
+ 2031 969 2031 969 2031 986 c 0,74,75
+ 2031 1030 2031 1030 2087 1030 c 0,76,77
+ 2128 1030 2128 1030 2171 1020 c 0,78,79
+ 2216 1010 2216 1010 2256 989 c 0,80,81
+ 2297 968 2297 968 2330 938 c 0,82,83
+ 2362 908 2362 908 2382 864.5 c 128,-1,0
+ 2402 821 2402 821 2402 770.5 c 128,-1,1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni21AA
@@ -28901,78 +28890,69 @@
LayerCount: 2
Fore
SplineSet
-2211 511 m 5,0,-1
- 504 511 l 2,1,2
- 490 511 490 511 483 511 c 128,-1,3
- 476 511 476 511 464.5 513.5 c 128,-1,4
- 453 516 453 516 446.5 520.5 c 128,-1,5
- 440 525 440 525 435.5 533.5 c 128,-1,6
- 431 542 431 542 431 555 c 128,-1,7
- 431 568 431 568 435.5 576.5 c 128,-1,8
- 440 585 440 585 446.5 589.5 c 128,-1,9
- 453 594 453 594 464.5 596.5 c 128,-1,10
- 476 599 476 599 483 599 c 128,-1,11
- 490 599 490 599 504 599 c 2,12,-1
- 2211 599 l 1,13,14
- 2153 641 2153 641 2104 698 c 128,-1,15
- 2055 755 2055 755 2028 802 c 128,-1,16
- 2001 849 2001 849 1986 882 c 128,-1,17
- 1971 915 1971 915 1971 924 c 0,18,19
- 1971 938 1971 938 1981 944 c 128,-1,20
- 1991 950 1991 950 2005 950 c 0,21,22
- 2022 950 2022 950 2028 943.5 c 128,-1,23
- 2034 937 2034 937 2042 919 c 0,24,25
- 2075 846 2075 846 2115.5 788.5 c 128,-1,26
- 2156 731 2156 731 2202.5 691.5 c 128,-1,27
- 2249 652 2249 652 2290 626.5 c 128,-1,28
- 2331 601 2331 601 2382 577 c 1,29,30
- 2385 577 2385 577 2391.5 571 c 128,-1,31
- 2398 565 2398 565 2398 555 c 0,32,33
- 2398 544 2398 544 2393 539 c 128,-1,34
- 2388 534 2388 534 2367 524 c 0,35,36
- 2321 502 2321 502 2280.5 475.5 c 128,-1,37
- 2240 449 2240 449 2211 424.5 c 128,-1,38
- 2182 400 2182 400 2155.5 369 c 128,-1,39
- 2129 338 2129 338 2113 316 c 128,-1,40
- 2097 294 2097 294 2080.5 264 c 128,-1,41
- 2064 234 2064 234 2057.5 220 c 128,-1,42
- 2051 206 2051 206 2040 182 c 0,43,44
- 2036 174 2036 174 2034.5 171.5 c 128,-1,45
- 2033 169 2033 169 2025 164.5 c 128,-1,46
- 2017 160 2017 160 2005 160 c 0,47,48
- 1991 160 1991 160 1981 166.5 c 128,-1,49
- 1971 173 1971 173 1971 186 c 0,50,51
- 1971 195 1971 195 1986 228 c 128,-1,52
- 2001 261 2001 261 2028 308 c 128,-1,53
- 2055 355 2055 355 2104 412 c 128,-1,54
- 2153 469 2153 469 2211 511 c 5,0,-1
-526 555 m 4,55,56
- 526 511 526 511 471 511 c 0,57,58
- 430 511 430 511 387 520.5 c 128,-1,59
- 344 530 344 530 302.5 551.5 c 128,-1,60
- 261 573 261 573 228 603 c 128,-1,61
- 195 633 195 633 175 676.5 c 128,-1,62
- 155 720 155 720 155 770.5 c 128,-1,63
- 155 821 155 821 175 864.5 c 128,-1,64
- 195 908 195 908 228 938 c 128,-1,65
- 261 968 261 968 302.5 989 c 128,-1,66
- 344 1010 344 1010 387 1020 c 128,-1,67
- 430 1030 430 1030 471 1030 c 0,68,69
- 527 1030 527 1030 526 986 c 0,70,71
- 526 983 526 983 525 978 c 128,-1,72
- 524 973 524 973 520.5 964 c 128,-1,73
- 517 955 517 955 506.5 948.5 c 128,-1,74
- 496 942 496 942 480 941 c 0,75,76
- 364 936 364 936 304 887 c 128,-1,77
- 244 838 244 838 244 770 c 0,78,79
- 244 742 244 742 255.5 716 c 128,-1,80
- 267 690 267 690 292 663.5 c 128,-1,81
- 317 637 317 637 366 620 c 128,-1,82
- 415 603 415 603 482 599 c 0,83,84
- 504 597 504 597 515 584.5 c 128,-1,85
- 526 572 526 572 526 555 c 4,55,56
-EndSplineSet
-Validated: 37
+2211 511 m 1,0,-1
+ 504 511 l 1,1,-1
+ 483 511 l 2,2,3
+ 481 511 481 511 478 511 c 128,-1,4
+ 475 511 475 511 471 511 c 0,5,6
+ 430 511 430 511 387 520.5 c 128,-1,7
+ 344 530 344 530 302.5 551.5 c 128,-1,8
+ 261 573 261 573 228 603 c 128,-1,9
+ 195 633 195 633 175 676.5 c 128,-1,10
+ 155 720 155 720 155 770.5 c 128,-1,11
+ 155 821 155 821 175 864.5 c 128,-1,12
+ 195 908 195 908 228 938 c 128,-1,13
+ 261 968 261 968 302.5 989 c 128,-1,14
+ 344 1010 344 1010 387 1020 c 128,-1,15
+ 430 1030 430 1030 471 1030 c 0,16,17
+ 527 1030 527 1030 526 986 c 0,18,19
+ 526 983 526 983 525 978 c 128,-1,20
+ 524 973 524 973 520.5 964 c 128,-1,21
+ 517 955 517 955 506.5 948.5 c 128,-1,22
+ 496 942 496 942 480 941 c 0,23,24
+ 364 936 364 936 304 887 c 128,-1,25
+ 244 838 244 838 244 770 c 0,26,27
+ 244 742 244 742 255.5 716 c 128,-1,28
+ 267 690 267 690 292 663.5 c 128,-1,29
+ 317 637 317 637 366 620 c 128,-1,30
+ 415 603 415 603 482 599 c 0,31,32
+ 483 599 483 599 483 599 c 2,33,-1
+ 504 599 l 1,34,-1
+ 2211 599 l 1,35,36
+ 2153 641 2153 641 2104 698 c 128,-1,37
+ 2055 755 2055 755 2028 802 c 128,-1,38
+ 2001 849 2001 849 1986 882 c 128,-1,39
+ 1971 915 1971 915 1971 924 c 0,40,41
+ 1971 938 1971 938 1981 944 c 128,-1,42
+ 1991 950 1991 950 2005 950 c 0,43,44
+ 2022 950 2022 950 2028 944 c 0,45,46
+ 2034 937 2034 937 2042 919 c 0,47,48
+ 2073 848 2073 848 2116 788 c 0,49,50
+ 2156 731 2156 731 2202.5 691.5 c 128,-1,51
+ 2249 652 2249 652 2290 626.5 c 128,-1,52
+ 2331 601 2331 601 2382 577 c 1,53,54
+ 2385 577 2385 577 2391.5 571 c 128,-1,55
+ 2398 565 2398 565 2398 555 c 0,56,57
+ 2398 544 2398 544 2393 539 c 128,-1,58
+ 2388 534 2388 534 2367 524 c 0,59,60
+ 2321 502 2321 502 2280.5 475.5 c 128,-1,61
+ 2240 449 2240 449 2211 424.5 c 128,-1,62
+ 2182 400 2182 400 2155.5 369 c 128,-1,63
+ 2129 338 2129 338 2113 316 c 128,-1,64
+ 2097 294 2097 294 2080.5 264 c 128,-1,65
+ 2064 234 2064 234 2058 220 c 2,66,-1
+ 2040 182 l 2,67,68
+ 2036 174 2036 174 2034.5 171.5 c 128,-1,69
+ 2033 169 2033 169 2025 164.5 c 128,-1,70
+ 2017 160 2017 160 2005 160 c 0,71,72
+ 1991 160 1991 160 1981 166.5 c 128,-1,73
+ 1971 173 1971 173 1971 186 c 0,74,75
+ 1971 195 1971 195 1986 228 c 128,-1,76
+ 2001 261 2001 261 2028 308 c 128,-1,77
+ 2055 355 2055 355 2104 412 c 128,-1,78
+ 2153 469 2153 469 2211 511 c 1,0,-1
+EndSplineSet
+Validated: 33
EndChar
StartChar: carriagereturn
@@ -30183,65 +30163,78 @@
LayerCount: 2
Fore
SplineSet
-1221 511 m 2,0,-1
- 275 511 l 1,1,2
- 288 365 288 365 370.5 247.5 c 128,-1,3
- 453 130 453 130 583.5 65 c 128,-1,4
- 714 0 714 0 868 0 c 2,5,-1
- 1221 0 l 2,6,7
- 1237 0 1237 0 1246 -1 c 128,-1,8
- 1255 -2 1255 -2 1268.5 -5.5 c 128,-1,9
- 1282 -9 1282 -9 1288 -19 c 128,-1,10
- 1294 -29 1294 -29 1294 -44.5 c 128,-1,11
- 1294 -60 1294 -60 1288 -70 c 128,-1,12
- 1282 -80 1282 -80 1268.5 -83.5 c 128,-1,13
- 1255 -87 1255 -87 1246 -88 c 128,-1,14
- 1237 -89 1237 -89 1221 -89 c 2,15,-1
- 861 -89 l 2,16,17
- 727 -89 727 -89 603 -39.5 c 128,-1,18
- 479 10 479 10 387 94.5 c 128,-1,19
- 295 179 295 179 239.5 299.5 c 128,-1,20
- 184 420 184 420 184 555 c 0,21,22
- 184 691 184 691 240 812 c 128,-1,23
- 296 933 296 933 388.5 1017 c 128,-1,24
- 481 1101 481 1101 604 1150 c 128,-1,25
- 727 1199 727 1199 859 1199 c 2,26,-1
- 1221 1199 l 2,27,28
- 1237 1199 1237 1199 1246 1198 c 128,-1,29
- 1255 1197 1255 1197 1268.5 1193.5 c 128,-1,30
- 1282 1190 1282 1190 1288 1180 c 128,-1,31
- 1294 1170 1294 1170 1294 1154.5 c 128,-1,32
- 1294 1139 1294 1139 1288 1129 c 128,-1,33
- 1282 1119 1282 1119 1268.5 1115.5 c 128,-1,34
- 1255 1112 1255 1112 1246 1111 c 128,-1,35
- 1237 1110 1237 1110 1221 1110 c 2,36,-1
- 866 1110 l 2,37,38
- 754 1110 754 1110 650.5 1072.5 c 128,-1,39
- 547 1035 547 1035 467.5 968.5 c 128,-1,40
- 388 902 388 902 336.5 806 c 128,-1,41
- 285 710 285 710 275 599 c 1,42,-1
- 1221 599 l 2,43,44
- 1237 599 1237 599 1246 598.5 c 128,-1,45
- 1255 598 1255 598 1268.5 594 c 128,-1,46
- 1282 590 1282 590 1288 580.5 c 128,-1,47
- 1294 571 1294 571 1294 555 c 128,-1,48
- 1294 539 1294 539 1288 529.5 c 128,-1,49
- 1282 520 1282 520 1268.5 516 c 128,-1,50
- 1255 512 1255 512 1246 511.5 c 128,-1,51
- 1237 511 1237 511 1221 511 c 2,0,-1
-1281 1505 m 2,52,-1
- 282 -440 l 2,53,54
- 262 -480 262 -480 233 -480 c 0,55,56
- 212 -480 212 -480 200.5 -465 c 128,-1,57
- 189 -450 189 -450 189 -435 c 0,58,59
- 189 -426 189 -426 204 -395 c 2,60,-1
+715 599 m 1,0,-1
+ 977 1110 l 1,1,-1
+ 866 1110 l 2,2,3
+ 754 1110 754 1110 650.5 1072.5 c 128,-1,4
+ 547 1035 547 1035 467.5 968.5 c 128,-1,5
+ 388 902 388 902 336.5 806 c 128,-1,6
+ 285 710 285 710 275 599 c 1,7,-1
+ 715 599 l 1,0,-1
+816 599 m 1,8,-1
+ 1221 599 l 2,9,10
+ 1237 599 1237 599 1246 598.5 c 128,-1,11
+ 1255 598 1255 598 1268.5 594 c 128,-1,12
+ 1282 590 1282 590 1288 580.5 c 128,-1,13
+ 1294 571 1294 571 1294 555 c 128,-1,14
+ 1294 539 1294 539 1288 529.5 c 128,-1,15
+ 1282 520 1282 520 1268 516 c 0,16,17
+ 1255 512 1255 512 1246 511.5 c 128,-1,18
+ 1237 511 1237 511 1221 511 c 2,19,-1
+ 770 511 l 1,20,-1
+ 550 83 l 1,21,22
+ 567 73 567 73 584 65 c 0,23,24
+ 718 0 718 0 868 0 c 2,25,-1
+ 1221 0 l 2,26,27
+ 1237 0 1237 0 1246 -1 c 128,-1,28
+ 1255 -2 1255 -2 1268.5 -5.5 c 128,-1,29
+ 1282 -9 1282 -9 1288 -19 c 128,-1,30
+ 1294 -29 1294 -29 1294 -44.5 c 128,-1,31
+ 1294 -60 1294 -60 1288 -70 c 128,-1,32
+ 1282 -80 1282 -80 1268.5 -83.5 c 128,-1,33
+ 1255 -87 1255 -87 1246 -88 c 128,-1,34
+ 1237 -89 1237 -89 1221 -89 c 2,35,-1
+ 861 -89 l 2,36,37
+ 723 -89 723 -89 603 -40 c 0,38,39
+ 554 -20 554 -20 510 5 c 1,40,-1
+ 282 -440 l 2,41,42
+ 262 -480 262 -480 233 -480 c 0,43,44
+ 212 -480 212 -480 200.5 -465 c 128,-1,45
+ 189 -450 189 -450 189 -435 c 0,46,47
+ 189 -426 189 -426 204 -395 c 2,48,-1
+ 435 55 l 1,49,50
+ 407 75 407 75 387 94 c 0,51,52
+ 294 182 294 182 240 300 c 0,53,54
+ 184 419 184 419 184 555 c 128,-1,55
+ 184 691 184 691 240 812 c 128,-1,56
+ 296 933 296 933 388.5 1017 c 128,-1,57
+ 481 1101 481 1101 604 1150 c 128,-1,58
+ 727 1199 727 1199 859 1199 c 2,59,-1
+ 1023 1199 l 1,60,-1
1203 1550 l 2,61,62
1223 1590 1223 1590 1252 1590 c 0,63,64
1273 1590 1273 1590 1284.5 1575 c 128,-1,65
- 1296 1560 1296 1560 1296 1545 c 0,66,67
- 1297 1536 1297 1536 1281 1505 c 2,52,-1
-EndSplineSet
-Validated: 5
+ 1296 1560 1296 1560 1296 1545 c 1,66,67
+ 1297 1536 1297 1536 1281 1505 c 2,68,-1
+ 1124 1199 l 1,69,-1
+ 1221 1199 l 2,70,71
+ 1237 1199 1237 1199 1246 1198 c 128,-1,72
+ 1255 1197 1255 1197 1268.5 1193.5 c 128,-1,73
+ 1282 1190 1282 1190 1288 1180 c 128,-1,74
+ 1294 1170 1294 1170 1294 1154.5 c 128,-1,75
+ 1294 1139 1294 1139 1288 1129 c 128,-1,76
+ 1282 1119 1282 1119 1268.5 1115.5 c 128,-1,77
+ 1255 1112 1255 1112 1246 1111 c 128,-1,78
+ 1237 1110 1237 1110 1221 1110 c 2,79,-1
+ 1078 1110 l 1,80,-1
+ 816 599 l 1,8,-1
+476 134 m 1,81,-1
+ 669 511 l 1,82,-1
+ 275 511 l 1,83,84
+ 288 364 288 364 370 248 c 0,85,86
+ 416 183 416 183 476 134 c 1,81,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni220D
@@ -32320,56 +32313,62 @@
LayerCount: 2
Fore
SplineSet
-1401 1505 m 2,0,-1
- 402 -440 l 2,1,2
- 382 -480 382 -480 353 -480 c 0,3,4
- 332 -480 332 -480 320.5 -465 c 128,-1,5
- 309 -450 309 -450 309 -435 c 0,6,7
- 309 -426 309 -426 324 -395 c 2,8,-1
- 1323 1550 l 2,9,10
- 1343 1590 1343 1590 1372 1590 c 0,11,12
- 1393 1590 1393 1590 1404.5 1575 c 128,-1,13
- 1416 1560 1416 1560 1416 1545 c 0,14,15
- 1417 1536 1417 1536 1401 1505 c 2,0,-1
-1467 1110 m 2,16,-1
- 866 1110 l 2,17,18
- 743 1110 743 1110 633 1066.5 c 128,-1,19
- 523 1023 523 1023 444 948.5 c 128,-1,20
- 365 874 365 874 319 771.5 c 128,-1,21
- 273 669 273 669 273 555 c 128,-1,22
- 273 441 273 441 319.5 338.5 c 128,-1,23
- 366 236 366 236 445 161.5 c 128,-1,24
- 524 87 524 87 634 43.5 c 128,-1,25
- 744 0 744 0 866 0 c 2,26,-1
- 1467 0 l 2,27,28
- 1483 0 1483 0 1492.5 -1 c 128,-1,29
- 1502 -2 1502 -2 1515 -5.5 c 128,-1,30
- 1528 -9 1528 -9 1534.5 -19 c 128,-1,31
- 1541 -29 1541 -29 1541 -44.5 c 128,-1,32
- 1541 -60 1541 -60 1534.5 -70 c 128,-1,33
- 1528 -80 1528 -80 1515 -83.5 c 128,-1,34
- 1502 -87 1502 -87 1493 -88 c 128,-1,35
- 1484 -89 1484 -89 1467 -89 c 2,36,-1
- 859 -89 l 2,37,38
- 727 -89 727 -89 604 -40 c 128,-1,39
- 481 9 481 9 388.5 93 c 128,-1,40
- 296 177 296 177 240 298 c 128,-1,41
- 184 419 184 419 184 555 c 128,-1,42
- 184 691 184 691 240 812 c 128,-1,43
- 296 933 296 933 388.5 1017 c 128,-1,44
- 481 1101 481 1101 604 1150 c 128,-1,45
- 727 1199 727 1199 859 1199 c 2,46,-1
- 1467 1199 l 2,47,48
- 1483 1199 1483 1199 1492.5 1198 c 128,-1,49
- 1502 1197 1502 1197 1515 1193.5 c 128,-1,50
- 1528 1190 1528 1190 1534.5 1180 c 128,-1,51
- 1541 1170 1541 1170 1541 1154.5 c 128,-1,52
- 1541 1139 1541 1139 1534.5 1129 c 128,-1,53
- 1528 1119 1528 1119 1515 1115.5 c 128,-1,54
- 1502 1112 1502 1112 1492.5 1111 c 128,-1,55
- 1483 1110 1483 1110 1467 1110 c 2,16,-1
-EndSplineSet
-Validated: 5
+1143 1199 m 1,0,-1
+ 1323 1550 l 2,1,2
+ 1343 1590 1343 1590 1372 1590 c 0,3,4
+ 1393 1590 1393 1590 1404.5 1575 c 128,-1,5
+ 1416 1560 1416 1560 1416 1545 c 1,6,7
+ 1417 1536 1417 1536 1401 1505 c 2,8,-1
+ 1244 1199 l 1,9,-1
+ 1467 1199 l 2,10,11
+ 1483 1199 1483 1199 1492.5 1198 c 128,-1,12
+ 1502 1197 1502 1197 1515 1193.5 c 128,-1,13
+ 1528 1190 1528 1190 1534.5 1180 c 128,-1,14
+ 1541 1170 1541 1170 1541 1154.5 c 128,-1,15
+ 1541 1139 1541 1139 1534.5 1129 c 128,-1,16
+ 1528 1119 1528 1119 1515 1115.5 c 128,-1,17
+ 1502 1112 1502 1112 1492.5 1111 c 128,-1,18
+ 1483 1110 1483 1110 1467 1110 c 2,19,-1
+ 1198 1110 l 1,20,-1
+ 648 38 l 1,21,22
+ 752 0 752 0 866 0 c 2,23,-1
+ 1467 0 l 2,24,25
+ 1483 0 1483 0 1492.5 -1 c 128,-1,26
+ 1502 -2 1502 -2 1515 -5.5 c 128,-1,27
+ 1528 -9 1528 -9 1534.5 -19 c 128,-1,28
+ 1541 -29 1541 -29 1541 -44.5 c 128,-1,29
+ 1541 -60 1541 -60 1534.5 -70 c 128,-1,30
+ 1528 -80 1528 -80 1515 -83.5 c 128,-1,31
+ 1502 -87 1502 -87 1493 -88 c 128,-1,32
+ 1484 -89 1484 -89 1467 -89 c 2,33,-1
+ 859 -89 l 2,34,35
+ 729 -89 729 -89 607 -41 c 1,36,-1
+ 402 -440 l 2,37,38
+ 382 -480 382 -480 353 -480 c 0,39,40
+ 332 -480 332 -480 320.5 -465 c 128,-1,41
+ 309 -450 309 -450 309 -435 c 0,42,43
+ 309 -426 309 -426 324 -395 c 2,44,-1
+ 525 -3 l 1,45,46
+ 450 37 450 37 388 93 c 0,47,48
+ 296 177 296 177 240 298 c 128,-1,49
+ 184 419 184 419 184 555 c 128,-1,50
+ 184 691 184 691 240 812 c 128,-1,51
+ 296 933 296 933 388.5 1017 c 128,-1,52
+ 481 1101 481 1101 604 1150 c 128,-1,53
+ 727 1199 727 1199 859 1199 c 2,54,-1
+ 1143 1199 l 1,0,-1
+566 75 m 1,55,-1
+ 1097 1110 l 1,56,-1
+ 866 1110 l 2,57,58
+ 743 1110 743 1110 633 1066.5 c 128,-1,59
+ 523 1023 523 1023 444 948.5 c 128,-1,60
+ 365 874 365 874 319 771.5 c 128,-1,61
+ 273 669 273 669 273 555 c 0,62,63
+ 273 443 273 443 320 338 c 0,64,65
+ 365 237 365 237 445 162 c 0,66,67
+ 499 110 499 110 566 75 c 1,55,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: reflexsubset
@@ -33613,77 +33612,60 @@
Width: 1971
VWidth: 2220
Flags: W
-HStem: 295 89<294 1679> 726 89<294 1679>
-LayerCount: 2
-Fore
-SplineSet
-1657 726 m 6,0,-1
- 332 726 l 2,1,2
- 318 726 318 726 310.5 726.5 c 128,-1,3
- 303 727 303 727 291 729 c 128,-1,4
- 279 731 279 731 273 735.5 c 128,-1,5
- 267 740 267 740 261.5 749 c 128,-1,6
- 256 758 256 758 256 770 c 0,7,8
- 256 786 256 786 262.5 796 c 128,-1,9
- 269 806 269 806 282 809.5 c 128,-1,10
- 295 813 295 813 304 814 c 128,-1,11
- 313 815 313 815 330 815 c 2,12,-1
- 1659 815 l 2,13,14
- 1675 815 1675 815 1684.5 814 c 128,-1,15
- 1694 813 1694 813 1707 809.5 c 128,-1,16
- 1720 806 1720 806 1726.5 796 c 128,-1,17
- 1733 786 1733 786 1733 770 c 0,18,19
- 1733 757 1733 757 1728 748.5 c 128,-1,20
- 1723 740 1723 740 1716.5 735.5 c 128,-1,21
- 1710 731 1710 731 1698 729 c 128,-1,22
- 1686 727 1686 727 1678.5 726.5 c 128,-1,23
- 1671 726 1671 726 1657 726 c 6,0,-1
-1659 295 m 6,24,-1
- 330 295 l 2,25,26
- 314 295 314 295 304.5 296 c 128,-1,27
- 295 297 295 297 282 300.5 c 128,-1,28
- 269 304 269 304 262.5 314 c 128,-1,29
- 256 324 256 324 256 340 c 0,30,31
- 256 353 256 353 261.5 361.5 c 128,-1,32
- 267 370 267 370 273 374.5 c 128,-1,33
- 279 379 279 379 291 381 c 128,-1,34
- 303 383 303 383 310.5 383.5 c 128,-1,35
- 318 384 318 384 332 384 c 2,36,-1
- 1657 384 l 2,37,38
- 1671 384 1671 384 1678.5 383.5 c 128,-1,39
- 1686 383 1686 383 1698 381 c 128,-1,40
- 1710 379 1710 379 1716.5 374.5 c 128,-1,41
- 1723 370 1723 370 1728 361 c 128,-1,42
- 1733 352 1733 352 1733 340 c 0,43,44
- 1733 324 1733 324 1726.5 314 c 128,-1,45
- 1720 304 1720 304 1707 300.5 c 128,-1,46
- 1694 297 1694 297 1684.5 296 c 128,-1,47
- 1675 295 1675 295 1659 295 c 6,24,-1
-374 1567 m 2,48,-1
- 374 -457 l 2,49,50
- 374 -472 374 -472 373.5 -481 c 128,-1,51
- 373 -490 373 -490 369.5 -505.5 c 128,-1,52
- 366 -521 366 -521 359.5 -530.5 c 128,-1,53
- 353 -540 353 -540 339 -547.5 c 128,-1,54
- 325 -555 325 -555 305 -555 c 0,55,56
- 286 -555 286 -555 272.5 -547.5 c 128,-1,57
- 259 -540 259 -540 252.5 -530.5 c 128,-1,58
- 246 -521 246 -521 243 -505 c 128,-1,59
- 240 -489 240 -489 239.5 -480 c 128,-1,60
- 239 -471 239 -471 239 -457 c 2,61,-1
- 239 1567 l 2,62,63
- 239 1582 239 1582 239.5 1591 c 128,-1,64
- 240 1600 240 1600 243.5 1615.5 c 128,-1,65
- 247 1631 247 1631 253.5 1640.5 c 128,-1,66
- 260 1650 260 1650 274 1657.5 c 128,-1,67
- 288 1665 288 1665 307 1665 c 0,68,69
- 326 1665 326 1665 339.5 1657.5 c 128,-1,70
- 353 1650 353 1650 359.5 1640.5 c 128,-1,71
- 366 1631 366 1631 369.5 1615 c 128,-1,72
- 373 1599 373 1599 373.5 1590 c 128,-1,73
- 374 1581 374 1581 374 1567 c 2,48,-1
-EndSplineSet
-Validated: 5
+HStem: 295 89<374 1726.3> 726 89<374 1726.3>
+VStem: 239 135<-551.614 295 384 726 815 1661.61>
+LayerCount: 2
+Fore
+SplineSet
+374 815 m 1,0,-1
+ 1659 815 l 2,1,2
+ 1675 815 1675 815 1684.5 814 c 128,-1,3
+ 1694 813 1694 813 1707 809.5 c 128,-1,4
+ 1720 806 1720 806 1726.5 796 c 128,-1,5
+ 1733 786 1733 786 1733 770 c 0,6,7
+ 1733 757 1733 757 1728 748.5 c 128,-1,8
+ 1723 740 1723 740 1716.5 735.5 c 128,-1,9
+ 1710 731 1710 731 1698 729 c 128,-1,10
+ 1686 727 1686 727 1678.5 726.5 c 128,-1,11
+ 1671 726 1671 726 1657 726 c 2,12,-1
+ 374 726 l 1,13,-1
+ 374 384 l 1,14,-1
+ 1657 384 l 2,15,16
+ 1671 384 1671 384 1678.5 383.5 c 128,-1,17
+ 1686 383 1686 383 1698 381 c 128,-1,18
+ 1710 379 1710 379 1716.5 374.5 c 128,-1,19
+ 1723 370 1723 370 1728 361 c 128,-1,20
+ 1733 352 1733 352 1733 340 c 0,21,22
+ 1733 324 1733 324 1726.5 314 c 128,-1,23
+ 1720 304 1720 304 1707 300.5 c 128,-1,24
+ 1694 297 1694 297 1684.5 296 c 128,-1,25
+ 1675 295 1675 295 1659 295 c 2,26,-1
+ 374 295 l 1,27,-1
+ 374 -457 l 2,28,29
+ 374 -472 374 -472 373.5 -481 c 128,-1,30
+ 373 -490 373 -490 369.5 -505.5 c 128,-1,31
+ 366 -521 366 -521 359.5 -530.5 c 128,-1,32
+ 353 -540 353 -540 339 -547.5 c 128,-1,33
+ 325 -555 325 -555 305 -555 c 0,34,35
+ 286 -555 286 -555 272.5 -547.5 c 128,-1,36
+ 259 -540 259 -540 252.5 -530.5 c 128,-1,37
+ 246 -521 246 -521 243 -505 c 128,-1,38
+ 240 -489 240 -489 239.5 -480 c 128,-1,39
+ 239 -471 239 -471 239 -457 c 2,40,-1
+ 239 1567 l 2,41,42
+ 239 1582 239 1582 239.5 1591 c 128,-1,43
+ 240 1600 240 1600 243.5 1615.5 c 128,-1,44
+ 247 1631 247 1631 254 1640 c 0,45,46
+ 260 1650 260 1650 274 1657.5 c 128,-1,47
+ 288 1665 288 1665 307 1665 c 128,-1,48
+ 326 1665 326 1665 339.5 1657.5 c 128,-1,49
+ 353 1650 353 1650 359.5 1640.5 c 128,-1,50
+ 366 1631 366 1631 369.5 1615 c 128,-1,51
+ 373 1599 373 1599 373.5 1590 c 128,-1,52
+ 374 1581 374 1581 374 1567 c 2,53,-1
+ 374 815 l 1,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni22A9
@@ -33691,73 +33673,67 @@
Width: 1445
VWidth: 2220
Flags: W
-HStem: 0 47G<147 191 369 413> 726 89<213 435 435 435 435 1431> 1496 44G<147 191 369 413>
-VStem: 124 89<22 726 815 1519> 346 89<22 726 815 1519>
-LayerCount: 2
-Fore
-SplineSet
-1157 726 m 6,0,-1
- 213 726 l 1,1,-1
- 213 73 l 2,2,3
- 213 57 213 57 212 48 c 128,-1,4
- 211 39 211 39 207.5 26 c 128,-1,5
- 204 13 204 13 194 6.5 c 128,-1,6
- 184 0 184 0 168.5 0 c 128,-1,7
- 153 0 153 0 143 6.5 c 128,-1,8
- 133 13 133 13 129.5 26.5 c 128,-1,9
- 126 40 126 40 125 49.5 c 128,-1,10
- 124 59 124 59 124 75 c 2,11,-1
- 124 1465 l 2,12,13
- 124 1481 124 1481 125 1490.5 c 128,-1,14
- 126 1500 126 1500 129.5 1513.5 c 128,-1,15
- 133 1527 133 1527 143 1534 c 128,-1,16
- 153 1541 153 1541 168.5 1541 c 128,-1,17
- 184 1541 184 1541 194 1534.5 c 128,-1,18
- 204 1528 204 1528 207.5 1515 c 128,-1,19
- 211 1502 211 1502 212 1493 c 128,-1,20
- 213 1484 213 1484 213 1467 c 2,21,-1
- 213 815 l 1,22,-1
- 1157 815 l 2,23,24
- 1173 815 1173 815 1182 814 c 128,-1,25
- 1191 813 1191 813 1204 809.5 c 128,-1,26
- 1217 806 1217 806 1223.5 796 c 128,-1,27
- 1230 786 1230 786 1230 770.5 c 128,-1,28
- 1230 755 1230 755 1223.5 745 c 128,-1,29
- 1217 735 1217 735 1204 731.5 c 128,-1,30
- 1191 728 1191 728 1182 727 c 128,-1,31
- 1173 726 1173 726 1157 726 c 6,0,-1
-1379 726 m 2,32,-1
- 435 726 l 1,33,-1
- 435 73 l 2,34,35
- 435 57 435 57 434 48 c 128,-1,36
- 433 39 433 39 429.5 26 c 128,-1,37
- 426 13 426 13 416 6.5 c 128,-1,38
- 406 0 406 0 390.5 0 c 128,-1,39
- 375 0 375 0 365 6.5 c 128,-1,40
- 355 13 355 13 351.5 26.5 c 128,-1,41
- 348 40 348 40 347 49.5 c 128,-1,42
- 346 59 346 59 346 75 c 2,43,-1
- 346 1465 l 2,44,45
- 346 1481 346 1481 347 1490.5 c 128,-1,46
- 348 1500 348 1500 351.5 1513.5 c 128,-1,47
- 355 1527 355 1527 365 1534 c 128,-1,48
- 375 1541 375 1541 390.5 1541 c 128,-1,49
- 406 1541 406 1541 416 1534.5 c 128,-1,50
- 426 1528 426 1528 429.5 1515 c 128,-1,51
- 433 1502 433 1502 434 1493 c 128,-1,52
- 435 1484 435 1484 435 1467 c 2,53,-1
- 435 815 l 1,54,-1
- 1379 815 l 2,55,56
- 1395 815 1395 815 1404 814 c 128,-1,57
- 1413 813 1413 813 1426 809.5 c 128,-1,58
- 1439 806 1439 806 1445.5 796 c 128,-1,59
- 1452 786 1452 786 1452 770.5 c 128,-1,60
- 1452 755 1452 755 1445.5 745 c 128,-1,61
- 1439 735 1439 735 1426 731.5 c 128,-1,62
- 1413 728 1413 728 1404 727 c 128,-1,63
- 1395 726 1395 726 1379 726 c 2,32,-1
-EndSplineSet
-Validated: 5
+HStem: 0 21G<160.75 176.25 382.75 398.25> 726 89<213 346 435 1452> 1521 20G<160.75 176.25 382.75 398.25>
+VStem: 124 89<6.70483 726 815 1533.78> 346 89<6.70483 726 815 1533.78>
+LayerCount: 2
+Fore
+SplineSet
+1157 815 m 1,0,-1
+ 1379 815 l 2,1,2
+ 1395 815 1395 815 1404 814 c 128,-1,3
+ 1413 813 1413 813 1426 809.5 c 128,-1,4
+ 1439 806 1439 806 1445.5 796 c 128,-1,5
+ 1452 786 1452 786 1452 770.5 c 128,-1,6
+ 1452 755 1452 755 1445.5 745 c 128,-1,7
+ 1439 735 1439 735 1426 731.5 c 128,-1,8
+ 1413 728 1413 728 1404 727 c 128,-1,9
+ 1395 726 1395 726 1379 726 c 2,10,-1
+ 1157 726 l 1,11,-1
+ 435 726 l 1,12,-1
+ 435 73 l 2,13,14
+ 435 57 435 57 434 48 c 0,15,16
+ 432 34 432 34 430 26 c 0,17,18
+ 426 13 426 13 416 6 c 0,19,20
+ 406 0 406 0 390.5 0 c 128,-1,21
+ 375 0 375 0 365 6 c 0,22,23
+ 355 13 355 13 351.5 26.5 c 128,-1,24
+ 348 40 348 40 347 49.5 c 128,-1,25
+ 346 59 346 59 346 75 c 2,26,-1
+ 346 726 l 1,27,-1
+ 213 726 l 1,28,-1
+ 213 73 l 2,29,30
+ 213 57 213 57 212 48 c 0,31,32
+ 210 34 210 34 208 26 c 0,33,34
+ 204 13 204 13 194 6 c 0,35,36
+ 184 0 184 0 168.5 0 c 128,-1,37
+ 153 0 153 0 143 6 c 0,38,39
+ 133 13 133 13 129.5 26.5 c 128,-1,40
+ 126 40 126 40 125 49.5 c 128,-1,41
+ 124 59 124 59 124 75 c 2,42,-1
+ 124 1465 l 2,43,44
+ 124 1481 124 1481 125 1490.5 c 128,-1,45
+ 126 1500 126 1500 129.5 1513.5 c 128,-1,46
+ 133 1527 133 1527 143 1534 c 128,-1,47
+ 153 1541 153 1541 168.5 1541 c 128,-1,48
+ 184 1541 184 1541 194 1534.5 c 128,-1,49
+ 204 1528 204 1528 207.5 1515 c 128,-1,50
+ 211 1502 211 1502 212 1493 c 128,-1,51
+ 213 1484 213 1484 213 1467 c 2,52,-1
+ 213 815 l 1,53,-1
+ 346 815 l 1,54,-1
+ 346 1465 l 2,55,56
+ 346 1481 346 1481 347 1490.5 c 128,-1,57
+ 348 1500 348 1500 351.5 1513.5 c 128,-1,58
+ 355 1527 355 1527 365 1534 c 128,-1,59
+ 375 1541 375 1541 390.5 1541 c 128,-1,60
+ 406 1541 406 1541 416 1534.5 c 128,-1,61
+ 426 1528 426 1528 429.5 1515 c 128,-1,62
+ 433 1502 433 1502 434 1493 c 128,-1,63
+ 435 1484 435 1484 435 1467 c 2,64,-1
+ 435 815 l 1,65,-1
+ 1157 815 l 1,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni22AB
@@ -33765,12 +33741,12 @@
Width: 2148
VWidth: 2220
Flags: W
-HStem: 295 89<565 1978> 726 89<565 1978>
-VStem: 264 89<-534 1644> 523 118<295 384 726 815>
-LayerCount: 2
-Fore
-SplineSet
-353 1592 m 6,0,-1
+HStem: 295 89<640 1992.3> 726 89<640 1992.3>
+VStem: 264 89<-554.998 1665> 505 135<-551.614 295 384 726 815 1661.61>
+LayerCount: 2
+Fore
+SplineSet
+353 1592 m 2,0,-1
353 -482 l 2,1,2
353 -498 353 -498 352 -507 c 128,-1,3
351 -516 351 -516 347.5 -529 c 128,-1,4
@@ -33788,74 +33764,56 @@
324 1665 324 1665 334 1658.5 c 128,-1,17
344 1652 344 1652 347.5 1639 c 128,-1,18
351 1626 351 1626 352 1617 c 128,-1,19
- 353 1608 353 1608 353 1592 c 6,0,-1
-1924 726 m 2,20,-1
- 598 726 l 2,21,22
- 584 726 584 726 577 726.5 c 128,-1,23
- 570 727 570 727 558 729 c 128,-1,24
- 546 731 546 731 539.5 735.5 c 128,-1,25
- 533 740 533 740 528 749 c 128,-1,26
- 523 758 523 758 523 770 c 0,27,28
- 523 786 523 786 529 796 c 128,-1,29
- 535 806 535 806 548.5 809.5 c 128,-1,30
- 562 813 562 813 571 814 c 128,-1,31
- 580 815 580 815 596 815 c 2,32,-1
- 1926 815 l 2,33,34
- 1942 815 1942 815 1951 814 c 128,-1,35
- 1960 813 1960 813 1973 809.5 c 128,-1,36
- 1986 806 1986 806 1992.5 796 c 128,-1,37
- 1999 786 1999 786 1999 770 c 0,38,39
- 1999 757 1999 757 1994 748.5 c 128,-1,40
- 1989 740 1989 740 1982.5 735.5 c 128,-1,41
- 1976 731 1976 731 1964 729 c 128,-1,42
- 1952 727 1952 727 1945 726.5 c 128,-1,43
- 1938 726 1938 726 1924 726 c 2,20,-1
-1926 295 m 2,44,-1
- 596 295 l 2,45,46
- 580 295 580 295 571 296 c 128,-1,47
- 562 297 562 297 548.5 300.5 c 128,-1,48
- 535 304 535 304 529 314 c 128,-1,49
- 523 324 523 324 523 340 c 0,50,51
- 523 353 523 353 528 361.5 c 128,-1,52
- 533 370 533 370 539.5 374.5 c 128,-1,53
- 546 379 546 379 558 381 c 128,-1,54
- 570 383 570 383 577.5 383.5 c 128,-1,55
- 585 384 585 384 598 384 c 2,56,-1
- 1924 384 l 2,57,58
- 1938 384 1938 384 1945 383.5 c 128,-1,59
- 1952 383 1952 383 1964 381 c 128,-1,60
- 1976 379 1976 379 1982.5 374.5 c 128,-1,61
- 1989 370 1989 370 1994 361 c 128,-1,62
- 1999 352 1999 352 1999 340 c 0,63,64
- 1999 324 1999 324 1992.5 314 c 128,-1,65
- 1986 304 1986 304 1973 300.5 c 128,-1,66
- 1960 297 1960 297 1951 296 c 128,-1,67
- 1942 295 1942 295 1926 295 c 2,44,-1
-640 1567 m 2,68,-1
- 640 -457 l 2,69,70
- 640 -472 640 -472 640 -481 c 128,-1,71
- 640 -490 640 -490 636 -505.5 c 128,-1,72
- 632 -521 632 -521 625.5 -530.5 c 128,-1,73
- 619 -540 619 -540 605 -547.5 c 128,-1,74
- 591 -555 591 -555 572 -555 c 0,75,76
- 553 -555 553 -555 539.5 -547.5 c 128,-1,77
- 526 -540 526 -540 519.5 -530.5 c 128,-1,78
- 513 -521 513 -521 509.5 -505 c 128,-1,79
- 506 -489 506 -489 505.5 -480 c 128,-1,80
- 505 -471 505 -471 505 -457 c 2,81,-1
- 505 1567 l 2,82,83
- 505 1582 505 1582 505.5 1591 c 128,-1,84
- 506 1600 506 1600 509.5 1615.5 c 128,-1,85
- 513 1631 513 1631 519.5 1640.5 c 128,-1,86
- 526 1650 526 1650 540 1657.5 c 128,-1,87
- 554 1665 554 1665 574 1665 c 0,88,89
- 593 1665 593 1665 606.5 1657.5 c 128,-1,90
- 620 1650 620 1650 626.5 1640.5 c 128,-1,91
- 633 1631 633 1631 636.5 1615 c 128,-1,92
- 640 1599 640 1599 640 1590 c 128,-1,93
- 640 1581 640 1581 640 1567 c 2,68,-1
-EndSplineSet
-Validated: 1029
+ 353 1608 353 1608 353 1592 c 2,0,-1
+640 384 m 1,20,-1
+ 1924 384 l 2,21,22
+ 1938 384 1938 384 1945 383.5 c 128,-1,23
+ 1952 383 1952 383 1964 381 c 128,-1,24
+ 1976 379 1976 379 1982.5 374.5 c 128,-1,25
+ 1989 370 1989 370 1994 361 c 128,-1,26
+ 1999 352 1999 352 1999 340 c 0,27,28
+ 1999 324 1999 324 1992.5 314 c 128,-1,29
+ 1986 304 1986 304 1973 300.5 c 128,-1,30
+ 1960 297 1960 297 1951 296 c 128,-1,31
+ 1942 295 1942 295 1926 295 c 2,32,-1
+ 640 295 l 1,33,-1
+ 640 -457 l 1,34,-1
+ 640 -481 l 2,35,36
+ 640 -490 640 -490 636 -505.5 c 128,-1,37
+ 632 -521 632 -521 625.5 -530.5 c 128,-1,38
+ 619 -540 619 -540 605 -547.5 c 128,-1,39
+ 591 -555 591 -555 572 -555 c 128,-1,40
+ 553 -555 553 -555 539.5 -547.5 c 128,-1,41
+ 526 -540 526 -540 519.5 -530.5 c 128,-1,42
+ 513 -521 513 -521 509.5 -505 c 128,-1,43
+ 506 -489 506 -489 505.5 -480 c 128,-1,44
+ 505 -471 505 -471 505 -457 c 2,45,-1
+ 505 1567 l 2,46,47
+ 505 1582 505 1582 505.5 1591 c 128,-1,48
+ 506 1600 506 1600 509.5 1615.5 c 128,-1,49
+ 513 1631 513 1631 519.5 1640.5 c 128,-1,50
+ 526 1650 526 1650 540 1657.5 c 128,-1,51
+ 554 1665 554 1665 574 1665 c 0,52,53
+ 593 1665 593 1665 606.5 1657.5 c 128,-1,54
+ 620 1650 620 1650 626.5 1640.5 c 128,-1,55
+ 633 1631 633 1631 636.5 1615 c 128,-1,56
+ 640 1599 640 1599 640 1590 c 2,57,-1
+ 640 1567 l 1,58,-1
+ 640 815 l 1,59,-1
+ 1926 815 l 2,60,61
+ 1942 815 1942 815 1951 814 c 128,-1,62
+ 1960 813 1960 813 1973 809.5 c 128,-1,63
+ 1986 806 1986 806 1992.5 796 c 128,-1,64
+ 1999 786 1999 786 1999 770 c 0,65,66
+ 1999 757 1999 757 1994 748.5 c 128,-1,67
+ 1989 740 1989 740 1982.5 735.5 c 128,-1,68
+ 1976 731 1976 731 1964 729 c 128,-1,69
+ 1952 727 1952 727 1945 726.5 c 128,-1,70
+ 1938 726 1938 726 1924 726 c 2,71,-1
+ 640 726 l 1,72,-1
+ 640 384 l 1,20,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni22B2
@@ -35915,72 +35873,61 @@
LayerCount: 2
Fore
SplineSet
-2101 511 m 6,0,-1
- 394 511 l 1,1,2
- 452 469 452 469 501 412 c 128,-1,3
- 550 355 550 355 577 308 c 128,-1,4
- 604 261 604 261 619 228 c 128,-1,5
- 634 195 634 195 634 186 c 0,6,7
- 634 172 634 172 624 166 c 128,-1,8
- 614 160 614 160 601 160 c 0,9,10
- 584 160 584 160 577.5 166.5 c 128,-1,11
- 571 173 571 173 563 191 c 0,12,13
- 512 301 512 301 437 383.5 c 128,-1,14
- 362 466 362 466 234 526 c 0,15,16
- 228 529 228 529 223.5 532 c 128,-1,17
- 219 535 219 535 215.5 538 c 128,-1,18
- 212 541 212 541 210 545 c 128,-1,19
- 208 549 208 549 208 555 c 0,20,21
- 208 561 208 561 208.5 564 c 128,-1,22
- 209 567 209 567 215 571.5 c 128,-1,23
- 221 576 221 576 222.5 577 c 128,-1,24
- 224 578 224 578 239 586 c 0,25,26
- 302 616 302 616 355 655.5 c 128,-1,27
- 408 695 408 695 440 728.5 c 128,-1,28
- 472 762 472 762 500 805 c 128,-1,29
- 528 848 528 848 539.5 871.5 c 128,-1,30
- 551 895 551 895 565 928 c 0,31,32
- 569 936 569 936 570.5 938.5 c 128,-1,33
- 572 941 572 941 580 945.5 c 128,-1,34
- 588 950 588 950 601 950 c 0,35,36
- 615 950 615 950 624.5 943.5 c 128,-1,37
- 634 937 634 937 634 924 c 0,38,39
- 634 915 634 915 619 882 c 128,-1,40
- 604 849 604 849 577 802 c 128,-1,41
- 550 755 550 755 501 698.5 c 128,-1,42
- 452 642 452 642 394 599 c 1,43,-1
- 2101 599 l 2,44,45
- 2115 599 2115 599 2122 599 c 128,-1,46
- 2129 599 2129 599 2141 596.5 c 128,-1,47
- 2153 594 2153 594 2159 589.5 c 128,-1,48
- 2165 585 2165 585 2169.5 576.5 c 128,-1,49
- 2174 568 2174 568 2174 555 c 128,-1,50
- 2174 542 2174 542 2169.5 533.5 c 128,-1,51
- 2165 525 2165 525 2159 520.5 c 128,-1,52
- 2153 516 2153 516 2141 513.5 c 128,-1,53
- 2129 511 2129 511 2122 511 c 128,-1,54
- 2115 511 2115 511 2101 511 c 6,0,-1
-3382 511 m 6,55,-1
- 2172 511 l 2,56,57
- 2156 511 2156 511 2147 511.5 c 128,-1,58
- 2138 512 2138 512 2125 516 c 128,-1,59
- 2112 520 2112 520 2105.5 529.5 c 128,-1,60
- 2099 539 2099 539 2099 555 c 128,-1,61
- 2099 571 2099 571 2105.5 580.5 c 128,-1,62
- 2112 590 2112 590 2125 594 c 128,-1,63
- 2138 598 2138 598 2147 598.5 c 128,-1,64
- 2156 599 2156 599 2172 599 c 2,65,-1
- 3382 599 l 2,66,67
- 3398 599 3398 599 3407.5 598.5 c 128,-1,68
- 3417 598 3417 598 3430 594 c 128,-1,69
- 3443 590 3443 590 3449 580.5 c 128,-1,70
- 3455 571 3455 571 3455 555 c 128,-1,71
- 3455 539 3455 539 3449 529.5 c 128,-1,72
- 3443 520 3443 520 3430 516 c 128,-1,73
- 3417 512 3417 512 3407.5 511.5 c 128,-1,74
- 3398 511 3398 511 3382 511 c 6,55,-1
-EndSplineSet
-Validated: 5
+3382 511 m 2,0,-1
+ 2172 511 l 2,1,2
+ 2156 511 2156 511 2147 512 c 0,3,4
+ 2143 512 2143 512 2137 513 c 1,5,6
+ 2128 511 2128 511 2122 511 c 2,7,-1
+ 2101 511 l 1,8,-1
+ 394 511 l 1,9,10
+ 452 469 452 469 501 412 c 128,-1,11
+ 550 355 550 355 577 308 c 128,-1,12
+ 604 261 604 261 619 228 c 128,-1,13
+ 634 195 634 195 634 186 c 0,14,15
+ 634 172 634 172 624 166 c 128,-1,16
+ 614 160 614 160 601 160 c 0,17,18
+ 584 160 584 160 577.5 166.5 c 128,-1,19
+ 571 173 571 173 563 191 c 0,20,21
+ 512 301 512 301 437 383.5 c 128,-1,22
+ 362 466 362 466 234 526 c 0,23,24
+ 228 529 228 529 223.5 532 c 128,-1,25
+ 219 535 219 535 215.5 538 c 128,-1,26
+ 212 541 212 541 210 545 c 128,-1,27
+ 208 549 208 549 208 555 c 128,-1,28
+ 208 561 208 561 208.5 564 c 128,-1,29
+ 209 567 209 567 215 571.5 c 128,-1,30
+ 221 576 221 576 222.5 577 c 128,-1,31
+ 224 578 224 578 239 586 c 0,32,33
+ 302 616 302 616 355 655.5 c 128,-1,34
+ 408 695 408 695 440 728 c 0,35,36
+ 472 762 472 762 500 805 c 128,-1,37
+ 528 848 528 848 539.5 871.5 c 128,-1,38
+ 551 895 551 895 565 928 c 0,39,40
+ 569 936 569 936 570.5 938.5 c 128,-1,41
+ 572 941 572 941 580 945.5 c 128,-1,42
+ 588 950 588 950 601 950 c 0,43,44
+ 615 950 615 950 624.5 943.5 c 128,-1,45
+ 634 937 634 937 634 924 c 0,46,47
+ 634 915 634 915 619 882 c 128,-1,48
+ 604 849 604 849 577 802 c 128,-1,49
+ 550 755 550 755 501 698.5 c 128,-1,50
+ 452 642 452 642 394 599 c 1,51,-1
+ 2101 599 l 1,52,-1
+ 2122 599 l 2,53,54
+ 2128 599 2128 599 2137 597 c 1,55,56
+ 2143 598 2143 598 2147 598.5 c 128,-1,57
+ 2151 599 2151 599 2172 599 c 2,58,-1
+ 3382 599 l 2,59,60
+ 3398 599 3398 599 3407.5 598.5 c 128,-1,61
+ 3417 598 3417 598 3430 594 c 128,-1,62
+ 3443 590 3443 590 3449 580.5 c 128,-1,63
+ 3455 571 3455 571 3455 555 c 128,-1,64
+ 3455 539 3455 539 3449 529.5 c 128,-1,65
+ 3443 520 3443 520 3430 516 c 128,-1,66
+ 3417 512 3417 512 3407.5 511.5 c 128,-1,67
+ 3398 511 3398 511 3382 511 c 2,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni27F6
@@ -35988,77 +35935,66 @@
Width: 3663
VWidth: 2220
Flags: W
-HStem: 511 89<233 1525 1525 1525 1525 3265>
-VStem: 1484 84<511 599>
-LayerCount: 2
-Fore
-SplineSet
-3265 511 m 5,0,-1
- 1557 511 l 2,1,2
- 1543 511 1543 511 1536 511 c 128,-1,3
- 1529 511 1529 511 1517.5 513.5 c 128,-1,4
- 1506 516 1506 516 1500 520.5 c 128,-1,5
- 1494 525 1494 525 1489 533.5 c 128,-1,6
- 1484 542 1484 542 1484 555 c 128,-1,7
- 1484 568 1484 568 1489 576.5 c 128,-1,8
- 1494 585 1494 585 1500 589.5 c 128,-1,9
- 1506 594 1506 594 1517.5 596.5 c 128,-1,10
- 1529 599 1529 599 1536.5 599 c 128,-1,11
- 1544 599 1544 599 1557 599 c 2,12,-1
- 3265 599 l 1,13,14
- 3207 641 3207 641 3157.5 698 c 128,-1,15
- 3108 755 3108 755 3081 802 c 128,-1,16
- 3054 849 3054 849 3039.5 882 c 128,-1,17
- 3025 915 3025 915 3025 924 c 0,18,19
- 3025 938 3025 938 3035 944 c 128,-1,20
- 3045 950 3045 950 3058 950 c 0,21,22
- 3075 950 3075 950 3081.5 943.5 c 128,-1,23
- 3088 937 3088 937 3096 919 c 0,24,25
- 3129 846 3129 846 3169.5 788.5 c 128,-1,26
- 3210 731 3210 731 3256 691.5 c 128,-1,27
- 3302 652 3302 652 3343 626.5 c 128,-1,28
- 3384 601 3384 601 3435 577 c 1,29,30
- 3438 577 3438 577 3444.5 571 c 128,-1,31
- 3451 565 3451 565 3451 555 c 0,32,33
- 3451 544 3451 544 3446 539 c 128,-1,34
- 3441 534 3441 534 3420 524 c 0,35,36
- 3374 502 3374 502 3333.5 475.5 c 128,-1,37
- 3293 449 3293 449 3264 424.5 c 128,-1,38
- 3235 400 3235 400 3208.5 369 c 128,-1,39
- 3182 338 3182 338 3166 316 c 128,-1,40
- 3150 294 3150 294 3133.5 264 c 128,-1,41
- 3117 234 3117 234 3110.5 220 c 128,-1,42
- 3104 206 3104 206 3094 182 c 0,43,44
- 3090 174 3090 174 3088 171.5 c 128,-1,45
- 3086 169 3086 169 3078 164.5 c 128,-1,46
- 3070 160 3070 160 3058 160 c 0,47,48
- 3044 160 3044 160 3034.5 166.5 c 128,-1,49
- 3025 173 3025 173 3025 186 c 0,50,51
- 3025 195 3025 195 3039.5 228 c 128,-1,52
- 3054 261 3054 261 3081 308 c 128,-1,53
- 3108 355 3108 355 3157.5 412 c 128,-1,54
- 3207 469 3207 469 3265 511 c 5,0,-1
-1495 511 m 6,55,-1
- 285 511 l 2,56,57
- 269 511 269 511 260 511.5 c 128,-1,58
- 251 512 251 512 238 516 c 128,-1,59
- 225 520 225 520 218.5 529.5 c 128,-1,60
- 212 539 212 539 212 555 c 128,-1,61
- 212 571 212 571 218.5 580.5 c 128,-1,62
- 225 590 225 590 238 594 c 128,-1,63
- 251 598 251 598 260 598.5 c 128,-1,64
- 269 599 269 599 285 599 c 2,65,-1
- 1495 599 l 2,66,67
- 1511 599 1511 599 1520.5 598.5 c 128,-1,68
- 1530 598 1530 598 1543 594 c 128,-1,69
- 1556 590 1556 590 1562 580.5 c 128,-1,70
- 1568 571 1568 571 1568 555 c 128,-1,71
- 1568 539 1568 539 1562 529.5 c 128,-1,72
- 1556 520 1556 520 1543 516 c 128,-1,73
- 1530 512 1530 512 1520.5 511.5 c 128,-1,74
- 1511 511 1511 511 1495 511 c 6,55,-1
-EndSplineSet
-Validated: 5
+HStem: 511 88<218.705 3265>
+VStem: 3025 71<849.875 943.302>
+LayerCount: 2
+Fore
+SplineSet
+1495 511 m 2,0,-1
+ 285 511 l 2,1,2
+ 269 511 269 511 260 511.5 c 128,-1,3
+ 251 512 251 512 238 516 c 128,-1,4
+ 225 520 225 520 218.5 529.5 c 128,-1,5
+ 212 539 212 539 212 555 c 128,-1,6
+ 212 571 212 571 218.5 580.5 c 128,-1,7
+ 225 590 225 590 238 594 c 128,-1,8
+ 251 598 251 598 260 598.5 c 128,-1,9
+ 269 599 269 599 285 599 c 2,10,-1
+ 1495 599 l 2,11,12
+ 1511 599 1511 599 1520 598 c 0,13,14
+ 1526 598 1526 598 1526 598 c 0,15,16
+ 1532 599 1532 599 1536 599 c 2,17,-1
+ 1557 599 l 1,18,-1
+ 3265 599 l 1,19,20
+ 3207 641 3207 641 3157.5 698 c 128,-1,21
+ 3108 755 3108 755 3081 802 c 128,-1,22
+ 3054 849 3054 849 3039.5 882 c 128,-1,23
+ 3025 915 3025 915 3025 924 c 0,24,25
+ 3025 938 3025 938 3035 944 c 128,-1,26
+ 3045 950 3045 950 3058 950 c 0,27,28
+ 3075 950 3075 950 3082 944 c 0,29,30
+ 3088 937 3088 937 3096 919 c 0,31,32
+ 3127 848 3127 848 3170 788 c 0,33,34
+ 3210 731 3210 731 3256 691.5 c 128,-1,35
+ 3302 652 3302 652 3343 626.5 c 128,-1,36
+ 3384 601 3384 601 3435 577 c 1,37,38
+ 3438 577 3438 577 3444.5 571 c 128,-1,39
+ 3451 565 3451 565 3451 555 c 0,40,41
+ 3451 544 3451 544 3446 539 c 128,-1,42
+ 3441 534 3441 534 3420 524 c 0,43,44
+ 3374 502 3374 502 3333.5 475.5 c 128,-1,45
+ 3293 449 3293 449 3264 424.5 c 128,-1,46
+ 3235 400 3235 400 3208.5 369 c 128,-1,47
+ 3182 338 3182 338 3166 316 c 128,-1,48
+ 3150 294 3150 294 3133.5 264 c 128,-1,49
+ 3117 234 3117 234 3110.5 220 c 128,-1,50
+ 3104 206 3104 206 3094 182 c 0,51,52
+ 3090 174 3090 174 3088 171.5 c 128,-1,53
+ 3086 169 3086 169 3078 164 c 0,54,55
+ 3070 160 3070 160 3058 160 c 0,56,57
+ 3044 160 3044 160 3034.5 166.5 c 128,-1,58
+ 3025 173 3025 173 3025 186 c 0,59,60
+ 3025 195 3025 195 3039.5 228 c 128,-1,61
+ 3054 261 3054 261 3081 308 c 128,-1,62
+ 3108 355 3108 355 3157.5 412 c 128,-1,63
+ 3207 469 3207 469 3265 511 c 1,64,-1
+ 1557 511 l 1,65,-1
+ 1536 511 l 2,66,67
+ 1532 511 1532 511 1526 512 c 0,68,69
+ 1523 512 1523 512 1520.5 511.5 c 128,-1,71
+ 1518 511 1518 511 1495 511 c 2,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni27F7
@@ -36066,102 +36002,88 @@
Width: 3663
VWidth: 2220
Flags: W
-HStem: 511 89<313 2047 1583 3326>
-LayerCount: 2
-Fore
-SplineSet
-2020 511 m 6,0,-1
- 313 511 l 1,1,2
- 371 469 371 469 420 412 c 128,-1,3
- 469 355 469 355 496 308 c 128,-1,4
- 523 261 523 261 538 228 c 128,-1,5
- 553 195 553 195 553 186 c 0,6,7
- 553 172 553 172 543 166 c 128,-1,8
- 533 160 533 160 519 160 c 0,9,10
- 502 160 502 160 496 166.5 c 128,-1,11
- 490 173 490 173 482 191 c 0,12,13
- 431 301 431 301 356 383.5 c 128,-1,14
- 281 466 281 466 153 526 c 0,15,16
- 147 529 147 529 142.5 532 c 128,-1,17
- 138 535 138 535 134 538 c 128,-1,18
- 130 541 130 541 128.5 545 c 128,-1,19
- 127 549 127 549 127 555 c 0,20,21
- 127 561 127 561 127.5 564 c 128,-1,22
- 128 567 128 567 134 571.5 c 128,-1,23
- 140 576 140 576 141.5 577 c 128,-1,24
- 143 578 143 578 158 586 c 0,25,26
- 221 616 221 616 274 655.5 c 128,-1,27
- 327 695 327 695 359 728.5 c 128,-1,28
- 391 762 391 762 419 805 c 128,-1,29
- 447 848 447 848 458.5 871.5 c 128,-1,30
- 470 895 470 895 484 928 c 0,31,32
- 488 936 488 936 489.5 938.5 c 128,-1,33
- 491 941 491 941 499 945.5 c 128,-1,34
- 507 950 507 950 519 950 c 0,35,36
- 533 950 533 950 543 943.5 c 128,-1,37
- 553 937 553 937 553 924 c 0,38,39
- 553 915 553 915 538 882 c 128,-1,40
- 523 849 523 849 496 802 c 128,-1,41
- 469 755 469 755 420 698.5 c 128,-1,42
- 371 642 371 642 313 599 c 1,43,-1
- 2020 599 l 2,44,45
- 2034 599 2034 599 2041 599 c 128,-1,46
- 2048 599 2048 599 2060 596.5 c 128,-1,47
- 2072 594 2072 594 2078 589.5 c 128,-1,48
- 2084 585 2084 585 2088.5 576.5 c 128,-1,49
- 2093 568 2093 568 2093 555 c 128,-1,50
- 2093 542 2093 542 2088.5 533.5 c 128,-1,51
- 2084 525 2084 525 2078 520.5 c 128,-1,52
- 2072 516 2072 516 2060 513.5 c 128,-1,53
- 2048 511 2048 511 2041 511 c 128,-1,54
- 2034 511 2034 511 2020 511 c 6,0,-1
-3328 511 m 5,55,-1
- 1621 511 l 2,56,57
- 1607 511 1607 511 1600 511 c 128,-1,58
- 1593 511 1593 511 1581 513.5 c 128,-1,59
- 1569 516 1569 516 1563 520.5 c 128,-1,60
- 1557 525 1557 525 1552 533.5 c 128,-1,61
- 1547 542 1547 542 1547 555 c 128,-1,62
- 1547 568 1547 568 1552 576.5 c 128,-1,63
- 1557 585 1557 585 1563 589.5 c 128,-1,64
- 1569 594 1569 594 1581 596.5 c 128,-1,65
- 1593 599 1593 599 1600 599 c 128,-1,66
- 1607 599 1607 599 1621 599 c 2,67,-1
- 3328 599 l 1,68,69
- 3270 641 3270 641 3221 698 c 128,-1,70
- 3172 755 3172 755 3145 802 c 128,-1,71
- 3118 849 3118 849 3103 882 c 128,-1,72
- 3088 915 3088 915 3088 924 c 0,73,74
- 3088 938 3088 938 3098 944 c 128,-1,75
- 3108 950 3108 950 3121 950 c 0,76,77
- 3138 950 3138 950 3144.5 943.5 c 128,-1,78
- 3151 937 3151 937 3159 919 c 0,79,80
- 3192 846 3192 846 3232.5 788.5 c 128,-1,81
- 3273 731 3273 731 3319.5 691.5 c 128,-1,82
- 3366 652 3366 652 3406.5 626.5 c 128,-1,83
- 3447 601 3447 601 3499 577 c 1,84,85
- 3502 577 3502 577 3508 571 c 128,-1,86
- 3514 565 3514 565 3514 555 c 0,87,88
- 3514 544 3514 544 3509 539 c 128,-1,89
- 3504 534 3504 534 3483 524 c 0,90,91
- 3437 502 3437 502 3396.5 475.5 c 128,-1,92
- 3356 449 3356 449 3327 424.5 c 128,-1,93
- 3298 400 3298 400 3271.5 369 c 128,-1,94
- 3245 338 3245 338 3229.5 316 c 128,-1,95
- 3214 294 3214 294 3197.5 264 c 128,-1,96
- 3181 234 3181 234 3174 220 c 128,-1,97
- 3167 206 3167 206 3157 182 c 0,98,99
- 3153 174 3153 174 3151.5 171.5 c 128,-1,100
- 3150 169 3150 169 3142 164.5 c 128,-1,101
- 3134 160 3134 160 3121 160 c 0,102,103
- 3107 160 3107 160 3097.5 166.5 c 128,-1,104
- 3088 173 3088 173 3088 186 c 0,105,106
- 3088 195 3088 195 3103 228 c 128,-1,107
- 3118 261 3118 261 3145 308 c 128,-1,108
- 3172 355 3172 355 3221 412 c 128,-1,109
- 3270 469 3270 469 3328 511 c 5,55,-1
-EndSplineSet
-Validated: 1029
+HStem: 511 88<313 3328>
+VStem: 482 71<166.706 257.064> 3088 71<849.875 943.302>
+LayerCount: 2
+Fore
+SplineSet
+2041 511 m 1,0,-1
+ 2020 511 l 1,1,-1
+ 1621 511 l 1,2,-1
+ 1600 511 l 1,3,-1
+ 313 511 l 1,4,5
+ 371 469 371 469 420 412 c 128,-1,6
+ 469 355 469 355 496 308 c 128,-1,7
+ 523 261 523 261 538 228 c 128,-1,8
+ 553 195 553 195 553 186 c 0,9,10
+ 553 172 553 172 543 166 c 128,-1,11
+ 533 160 533 160 519 160 c 0,12,13
+ 502 160 502 160 496 166.5 c 128,-1,14
+ 490 173 490 173 482 191 c 0,15,16
+ 431 301 431 301 356 383.5 c 128,-1,17
+ 281 466 281 466 153 526 c 0,18,19
+ 147 529 147 529 142.5 532 c 128,-1,20
+ 138 535 138 535 134 538 c 128,-1,21
+ 130 541 130 541 128.5 545 c 128,-1,22
+ 127 549 127 549 127 555 c 128,-1,23
+ 127 561 127 561 127.5 564 c 128,-1,24
+ 128 567 128 567 134 571.5 c 128,-1,25
+ 140 576 140 576 141.5 577 c 128,-1,26
+ 143 578 143 578 158 586 c 0,27,28
+ 221 616 221 616 274 655.5 c 128,-1,29
+ 327 695 327 695 359 728 c 0,30,31
+ 391 762 391 762 419 805 c 128,-1,32
+ 447 848 447 848 458.5 871.5 c 128,-1,33
+ 470 895 470 895 484 928 c 0,34,35
+ 488 936 488 936 489.5 938.5 c 128,-1,36
+ 491 941 491 941 499 945.5 c 128,-1,37
+ 507 950 507 950 519 950 c 0,38,39
+ 533 950 533 950 543 943.5 c 128,-1,40
+ 553 937 553 937 553 924 c 0,41,42
+ 553 915 553 915 538 882 c 128,-1,43
+ 523 849 523 849 496 802 c 128,-1,44
+ 469 755 469 755 420 698.5 c 128,-1,45
+ 371 642 371 642 313 599 c 1,46,-1
+ 1600 599 l 1,47,-1
+ 1621 599 l 1,48,-1
+ 2020 599 l 1,49,-1
+ 2041 599 l 1,50,-1
+ 3328 599 l 1,51,52
+ 3270 641 3270 641 3221 698 c 128,-1,53
+ 3172 755 3172 755 3145 802 c 128,-1,54
+ 3118 849 3118 849 3103 882 c 128,-1,55
+ 3088 915 3088 915 3088 924 c 0,56,57
+ 3088 938 3088 938 3098 944 c 128,-1,58
+ 3108 950 3108 950 3121 950 c 0,59,60
+ 3138 950 3138 950 3144 944 c 0,61,62
+ 3151 937 3151 937 3159 919 c 0,63,64
+ 3190 848 3190 848 3232 788 c 0,65,66
+ 3273 731 3273 731 3319.5 691.5 c 128,-1,67
+ 3366 652 3366 652 3406.5 626.5 c 128,-1,68
+ 3447 601 3447 601 3499 577 c 1,69,70
+ 3502 577 3502 577 3508 571 c 128,-1,71
+ 3514 565 3514 565 3514 555 c 0,72,73
+ 3514 544 3514 544 3509 539 c 128,-1,74
+ 3504 534 3504 534 3483 524 c 0,75,76
+ 3437 502 3437 502 3396.5 475.5 c 128,-1,77
+ 3356 449 3356 449 3327 424.5 c 128,-1,78
+ 3298 400 3298 400 3271.5 369 c 128,-1,79
+ 3245 338 3245 338 3229.5 316 c 128,-1,80
+ 3214 294 3214 294 3197.5 264 c 128,-1,81
+ 3181 234 3181 234 3174 220 c 128,-1,82
+ 3167 206 3167 206 3157 182 c 0,83,84
+ 3153 174 3153 174 3151.5 171.5 c 128,-1,85
+ 3150 169 3150 169 3142 164.5 c 128,-1,86
+ 3134 160 3134 160 3121 160 c 0,87,88
+ 3107 160 3107 160 3097.5 166.5 c 128,-1,89
+ 3088 173 3088 173 3088 186 c 0,90,91
+ 3088 195 3088 195 3103 228 c 128,-1,92
+ 3118 261 3118 261 3145 308 c 128,-1,93
+ 3172 355 3172 355 3221 412 c 128,-1,94
+ 3270 469 3270 469 3328 511 c 1,95,-1
+ 2041 511 l 1,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni27F8
@@ -36169,117 +36091,95 @@
Width: 3663
VWidth: 2220
Flags: W
-HStem: 295 89<677 2073 2071 3457> 726 89<737 2073 2071 3457>
-LayerCount: 2
-Fore
-SplineSet
-2047 295 m 2,0,-1
- 737 295 l 1,1,2
- 799 227 799 227 846.5 163 c 128,-1,3
- 894 99 894 99 916 59.5 c 128,-1,4
- 938 20 938 20 948.5 -2.5 c 128,-1,5
- 959 -25 959 -25 959 -29 c 0,6,7
- 959 -41 959 -41 951 -47.5 c 128,-1,8
- 943 -54 943 -54 935.5 -55 c 128,-1,9
- 928 -56 928 -56 915 -56 c 0,10,11
- 891 -56 891 -56 881 -49 c 0,12,13
- 879 -48 879 -48 878 -46 c 128,-1,14
- 877 -44 877 -44 875 -40 c 128,-1,15
- 873 -36 873 -36 866.5 -24.5 c 128,-1,16
- 860 -13 860 -13 852 0 c 0,17,18
- 817 64 817 64 775.5 123 c 128,-1,19
- 734 182 734 182 674 243.5 c 128,-1,20
- 614 305 614 305 544.5 356 c 128,-1,21
- 475 407 475 407 383.5 452 c 128,-1,22
- 292 497 292 497 189 526 c 0,23,24
- 169 532 169 532 161 537.5 c 128,-1,25
- 153 543 153 543 153 555.5 c 128,-1,26
- 153 568 153 568 161 573 c 128,-1,27
- 169 578 169 578 191 584 c 0,28,29
- 318 620 318 620 426.5 679 c 128,-1,30
- 535 738 535 738 615.5 811.5 c 128,-1,31
- 696 885 696 885 755 961 c 128,-1,32
- 814 1037 814 1037 859 1123 c 0,33,34
- 876 1154 876 1154 883 1160 c 128,-1,35
- 890 1166 890 1166 915 1166 c 0,36,37
- 929 1166 929 1166 936 1165 c 128,-1,38
- 943 1164 943 1164 951 1157.5 c 128,-1,39
- 959 1151 959 1151 959 1139 c 0,40,41
- 959 1134 959 1134 948.5 1111.5 c 128,-1,42
- 938 1089 938 1089 916 1049 c 128,-1,43
- 894 1009 894 1009 846.5 945.5 c 128,-1,44
- 799 882 799 882 737 815 c 1,45,-1
- 2047 815 l 2,46,47
- 2063 815 2063 815 2072 814 c 128,-1,48
- 2081 813 2081 813 2094 809.5 c 128,-1,49
- 2107 806 2107 806 2113.5 796 c 128,-1,50
- 2120 786 2120 786 2120 770 c 0,51,52
- 2120 757 2120 757 2115 748.5 c 128,-1,53
- 2110 740 2110 740 2103.5 735.5 c 128,-1,54
- 2097 731 2097 731 2085 729 c 128,-1,55
- 2073 727 2073 727 2065.5 726.5 c 128,-1,56
- 2058 726 2058 726 2045 726 c 2,57,-1
- 677 726 l 2,58,59
- 649 726 649 726 639.5 723 c 128,-1,60
- 630 720 630 720 610 704 c 0,61,62
- 479 609 479 609 355 555 c 1,63,64
- 490 494 490 494 608 406 c 0,65,66
- 629 390 629 390 639 387 c 128,-1,67
- 649 384 649 384 677 384 c 2,68,-1
- 2045 384 l 2,69,70
- 2059 384 2059 384 2066 383.5 c 128,-1,71
- 2073 383 2073 383 2085 381 c 128,-1,72
- 2097 379 2097 379 2103.5 374.5 c 128,-1,73
- 2110 370 2110 370 2115 361 c 128,-1,74
- 2120 352 2120 352 2120 340 c 0,75,76
- 2120 324 2120 324 2113.5 314 c 128,-1,77
- 2107 304 2107 304 2094 300.5 c 128,-1,78
- 2081 297 2081 297 2072 296 c 128,-1,79
- 2063 295 2063 295 2047 295 c 2,0,-1
-3434 726 m 2,80,-1
+HStem: 295 89<737 3503.3> 726 89<737 3503.3>
+LayerCount: 2
+Fore
+SplineSet
+3437 295 m 2,0,-1
+ 2107 295 l 2,1,2
+ 2091 295 2091 295 2082 296 c 0,3,4
+ 2079 296 2079 296 2077 297 c 0,5,6
+ 2074 296 2074 296 2072 296 c 0,7,8
+ 2063 295 2063 295 2047 295 c 2,9,-1
+ 737 295 l 1,10,11
+ 793 233 793 233 846 163 c 0,12,13
+ 894 99 894 99 916 59 c 0,14,15
+ 938 20 938 20 948.5 -2.5 c 128,-1,16
+ 959 -25 959 -25 959 -29 c 0,17,18
+ 959 -41 959 -41 951 -47.5 c 128,-1,19
+ 943 -54 943 -54 935.5 -55 c 128,-1,20
+ 928 -56 928 -56 915 -56 c 0,21,22
+ 891 -56 891 -56 881 -49 c 0,23,24
+ 879 -48 879 -48 878 -46 c 2,25,-1
+ 875 -40 l 2,26,27
+ 873 -36 873 -36 866 -24 c 2,28,-1
+ 852 0 l 1,29,30
+ 817 64 817 64 775.5 123 c 128,-1,31
+ 734 182 734 182 674 243.5 c 128,-1,32
+ 614 305 614 305 544.5 356 c 128,-1,33
+ 475 407 475 407 383.5 452 c 128,-1,34
+ 292 497 292 497 189 526 c 0,35,36
+ 169 532 169 532 161 537.5 c 128,-1,37
+ 153 543 153 543 153 555.5 c 128,-1,38
+ 153 568 153 568 161 573 c 0,39,40
+ 168 578 168 578 191 584 c 0,41,42
+ 318 620 318 620 426.5 679 c 128,-1,43
+ 535 738 535 738 615.5 811.5 c 128,-1,44
+ 696 885 696 885 755 961 c 128,-1,45
+ 814 1037 814 1037 859 1123 c 0,46,47
+ 876 1154 876 1154 883 1160 c 128,-1,48
+ 890 1166 890 1166 915 1166 c 0,49,50
+ 929 1166 929 1166 936 1165 c 128,-1,51
+ 943 1164 943 1164 951 1157.5 c 128,-1,52
+ 959 1151 959 1151 959 1139 c 0,53,54
+ 959 1134 959 1134 948.5 1111.5 c 128,-1,55
+ 938 1089 938 1089 916 1049 c 128,-1,56
+ 894 1009 894 1009 846.5 945.5 c 128,-1,57
+ 799 882 799 882 737 815 c 1,58,-1
+ 2047 815 l 2,59,60
+ 2063 815 2063 815 2072 814 c 0,61,62
+ 2074 814 2074 814 2077 813 c 0,63,64
+ 2079 814 2079 814 2082 814 c 0,65,66
+ 2091 815 2091 815 2107 815 c 2,67,-1
+ 3437 815 l 2,68,69
+ 3453 815 3453 815 3462 814 c 128,-1,70
+ 3471 813 3471 813 3484 809.5 c 128,-1,71
+ 3497 806 3497 806 3503.5 796 c 128,-1,72
+ 3510 786 3510 786 3510 770 c 0,73,74
+ 3510 757 3510 757 3505 748.5 c 128,-1,75
+ 3500 740 3500 740 3494 736 c 0,76,77
+ 3487 731 3487 731 3475 729 c 128,-1,78
+ 3463 727 3463 727 3455.5 726.5 c 128,-1,79
+ 3448 726 3448 726 3434 726 c 2,80,-1
2109 726 l 2,81,82
- 2095 726 2095 726 2088 726.5 c 128,-1,83
- 2081 727 2081 727 2068.5 729 c 128,-1,84
- 2056 731 2056 731 2050 735.5 c 128,-1,85
- 2044 740 2044 740 2039 749 c 128,-1,86
- 2034 758 2034 758 2034 770 c 0,87,88
- 2034 786 2034 786 2040 796 c 128,-1,89
- 2046 806 2046 806 2059 809.5 c 128,-1,90
- 2072 813 2072 813 2081.5 814 c 128,-1,91
- 2091 815 2091 815 2107 815 c 2,92,-1
- 3437 815 l 2,93,94
- 3453 815 3453 815 3462 814 c 128,-1,95
- 3471 813 3471 813 3484 809.5 c 128,-1,96
- 3497 806 3497 806 3503.5 796 c 128,-1,97
- 3510 786 3510 786 3510 770 c 0,98,99
- 3510 757 3510 757 3505 748.5 c 128,-1,100
- 3500 740 3500 740 3493.5 735.5 c 128,-1,101
- 3487 731 3487 731 3475 729 c 128,-1,102
- 3463 727 3463 727 3455.5 726.5 c 128,-1,103
- 3448 726 3448 726 3434 726 c 2,80,-1
-3437 295 m 2,104,-1
- 2107 295 l 2,105,106
- 2091 295 2091 295 2081.5 296 c 128,-1,107
- 2072 297 2072 297 2059 300.5 c 128,-1,108
- 2046 304 2046 304 2040 314 c 128,-1,109
- 2034 324 2034 324 2034 340 c 0,110,111
- 2034 353 2034 353 2039 361.5 c 128,-1,112
- 2044 370 2044 370 2050 374.5 c 128,-1,113
- 2056 379 2056 379 2068.5 381 c 128,-1,114
- 2081 383 2081 383 2088 383.5 c 128,-1,115
- 2095 384 2095 384 2109 384 c 2,116,-1
- 3434 384 l 2,117,118
- 3448 384 3448 384 3455.5 383.5 c 128,-1,119
- 3463 383 3463 383 3475 381 c 128,-1,120
- 3487 379 3487 379 3493.5 374.5 c 128,-1,121
- 3500 370 3500 370 3505 361 c 128,-1,122
- 3510 352 3510 352 3510 340 c 0,123,124
- 3510 324 3510 324 3503.5 314 c 128,-1,125
- 3497 304 3497 304 3484 300.5 c 128,-1,126
- 3471 297 3471 297 3462 296 c 128,-1,127
- 3453 295 3453 295 3437 295 c 2,104,-1
-EndSplineSet
-Validated: 1029
+ 2088 726 2088 726 2088 726 c 0,83,84
+ 2084 727 2084 727 2077 728 c 1,85,86
+ 2070 727 2070 727 2065.5 726.5 c 128,-1,87
+ 2061 726 2061 726 2045 726 c 2,88,-1
+ 677 726 l 2,89,90
+ 649 726 649 726 639.5 723 c 128,-1,91
+ 630 720 630 720 610 704 c 1,92,93
+ 479 609 479 609 355 555 c 1,94,95
+ 490 494 490 494 608 406 c 0,96,97
+ 630 390 630 390 639 387 c 0,98,99
+ 649 384 649 384 677 384 c 2,100,-1
+ 2045 384 l 2,101,102
+ 2066 384 2066 384 2066 384 c 0,103,104
+ 2070 383 2070 383 2077 382 c 1,105,106
+ 2084 383 2084 383 2088 383.5 c 128,-1,108
+ 2092 384 2092 384 2109 384 c 2,109,-1
+ 3434 384 l 2,110,111
+ 3448 384 3448 384 3455.5 383.5 c 128,-1,112
+ 3463 383 3463 383 3475 381 c 128,-1,113
+ 3487 379 3487 379 3493.5 374.5 c 128,-1,114
+ 3500 370 3500 370 3505 361 c 128,-1,115
+ 3510 352 3510 352 3510 340 c 0,116,117
+ 3510 324 3510 324 3503.5 314 c 128,-1,118
+ 3497 304 3497 304 3484 300.5 c 128,-1,119
+ 3471 297 3471 297 3462 296 c 128,-1,120
+ 3453 295 3453 295 3437 295 c 2,0,-1
+EndSplineSet
+Validated: 1025
EndChar
StartChar: uni27F9
@@ -36287,117 +36187,91 @@
Width: 3663
VWidth: 2220
Flags: W
-HStem: 295 89<195 1581 1576 2922> 726 89<195 1581 1576 2922>
-LayerCount: 2
-Fore
-SplineSet
-2981 726 m 6,0,-1
- 1614 726 l 2,1,2
- 1600 726 1600 726 1592.5 726.5 c 128,-1,3
- 1585 727 1585 727 1573 729 c 128,-1,4
- 1561 731 1561 731 1555 735.5 c 128,-1,5
- 1549 740 1549 740 1543.5 749 c 128,-1,6
- 1538 758 1538 758 1538 770 c 0,7,8
- 1538 786 1538 786 1544.5 796 c 128,-1,9
- 1551 806 1551 806 1564 809.5 c 128,-1,10
- 1577 813 1577 813 1586 814 c 128,-1,11
- 1595 815 1595 815 1612 815 c 2,12,-1
- 2922 815 l 1,13,14
- 2860 883 2860 883 2812 947 c 128,-1,15
- 2764 1011 2764 1011 2742 1050.5 c 128,-1,16
- 2720 1090 2720 1090 2710 1112.5 c 128,-1,17
- 2700 1135 2700 1135 2700 1139 c 0,18,19
- 2700 1151 2700 1151 2707.5 1157.5 c 128,-1,20
- 2715 1164 2715 1164 2722.5 1165 c 128,-1,21
- 2730 1166 2730 1166 2744 1166 c 0,22,23
- 2768 1166 2768 1166 2777 1159 c 0,24,25
- 2779 1158 2779 1158 2780.5 1156 c 128,-1,26
- 2782 1154 2782 1154 2784 1150 c 128,-1,27
- 2786 1146 2786 1146 2792 1134.5 c 128,-1,28
- 2798 1123 2798 1123 2806 1110 c 0,29,30
- 2841 1046 2841 1046 2883 987 c 128,-1,31
- 2925 928 2925 928 2985 866.5 c 128,-1,32
- 3045 805 3045 805 3114 754 c 128,-1,33
- 3183 703 3183 703 3275 658 c 128,-1,34
- 3367 613 3367 613 3470 584 c 0,35,36
- 3490 578 3490 578 3497.5 572.5 c 128,-1,37
- 3505 567 3505 567 3505 554.5 c 128,-1,38
- 3505 542 3505 542 3497 537 c 128,-1,39
- 3489 532 3489 532 3468 526 c 0,40,41
- 3341 490 3341 490 3232.5 431 c 128,-1,42
- 3124 372 3124 372 3043 298.5 c 128,-1,43
- 2962 225 2962 225 2903.5 149 c 128,-1,44
- 2845 73 2845 73 2799 -13 c 0,45,46
- 2782 -44 2782 -44 2775 -50 c 128,-1,47
- 2768 -56 2768 -56 2744 -56 c 0,48,49
- 2730 -56 2730 -56 2722.5 -55 c 128,-1,50
- 2715 -54 2715 -54 2707.5 -47.5 c 128,-1,51
- 2700 -41 2700 -41 2700 -29 c 0,52,53
- 2700 -24 2700 -24 2710 -1.5 c 128,-1,54
- 2720 21 2720 21 2742 61 c 128,-1,55
- 2764 101 2764 101 2811.5 164.5 c 128,-1,56
- 2859 228 2859 228 2922 295 c 1,57,-1
- 1612 295 l 2,58,59
- 1596 295 1596 295 1586.5 296 c 128,-1,60
- 1577 297 1577 297 1564 300.5 c 128,-1,61
- 1551 304 1551 304 1544.5 314 c 128,-1,62
- 1538 324 1538 324 1538 340 c 0,63,64
- 1538 353 1538 353 1543.5 361.5 c 128,-1,65
- 1549 370 1549 370 1555 374.5 c 128,-1,66
- 1561 379 1561 379 1573 381 c 128,-1,67
- 1585 383 1585 383 1592.5 383.5 c 128,-1,68
- 1600 384 1600 384 1614 384 c 2,69,-1
- 2981 384 l 2,70,71
- 3009 384 3009 384 3019 387 c 128,-1,72
- 3029 390 3029 390 3048 406 c 0,73,74
- 3179 501 3179 501 3303 555 c 1,75,76
- 3168 616 3168 616 3050 704 c 0,77,78
- 3029 720 3029 720 3019 723 c 128,-1,79
- 3009 726 3009 726 2981 726 c 6,0,-1
-1558 726 m 6,80,-1
- 233 726 l 2,81,82
- 219 726 219 726 212 726.5 c 128,-1,83
- 205 727 205 727 193 729 c 128,-1,84
- 181 731 181 731 174.5 735.5 c 128,-1,85
- 168 740 168 740 163 749 c 128,-1,86
- 158 758 158 758 158 770 c 0,87,88
- 158 786 158 786 164 796 c 128,-1,89
- 170 806 170 806 183 809.5 c 128,-1,90
- 196 813 196 813 205.5 814 c 128,-1,91
- 215 815 215 815 231 815 c 2,92,-1
- 1561 815 l 2,93,94
- 1577 815 1577 815 1586 814 c 128,-1,95
- 1595 813 1595 813 1608 809.5 c 128,-1,96
- 1621 806 1621 806 1627.5 796 c 128,-1,97
- 1634 786 1634 786 1634 770 c 0,98,99
- 1634 757 1634 757 1629 748.5 c 128,-1,100
- 1624 740 1624 740 1617.5 735.5 c 128,-1,101
- 1611 731 1611 731 1599 729 c 128,-1,102
- 1587 727 1587 727 1579.5 726.5 c 128,-1,103
- 1572 726 1572 726 1558 726 c 6,80,-1
-1561 295 m 6,104,-1
- 231 295 l 2,105,106
- 215 295 215 295 205.5 296 c 128,-1,107
- 196 297 196 297 183 300.5 c 128,-1,108
- 170 304 170 304 164 314 c 128,-1,109
- 158 324 158 324 158 340 c 0,110,111
- 158 353 158 353 163 361.5 c 128,-1,112
- 168 370 168 370 174.5 374.5 c 128,-1,113
- 181 379 181 379 193 381 c 128,-1,114
- 205 383 205 383 212 383.5 c 128,-1,115
- 219 384 219 384 233 384 c 2,116,-1
- 1558 384 l 2,117,118
- 1572 384 1572 384 1579.5 383.5 c 128,-1,119
- 1587 383 1587 383 1599 381 c 128,-1,120
- 1611 379 1611 379 1617.5 374.5 c 128,-1,121
- 1624 370 1624 370 1629 361 c 128,-1,122
- 1634 352 1634 352 1634 340 c 0,123,124
- 1634 324 1634 324 1627.5 314 c 128,-1,125
- 1621 304 1621 304 1608 300.5 c 128,-1,126
- 1595 297 1595 297 1586 296 c 128,-1,127
- 1577 295 1577 295 1561 295 c 6,104,-1
-EndSplineSet
-Validated: 1029
+HStem: 295 89<164.189 2922> 726.5 88.5<164.189 2922>
+LayerCount: 2
+Fore
+SplineSet
+1561 295 m 2,0,-1
+ 231 295 l 2,1,2
+ 215 295 215 295 205.5 296 c 128,-1,3
+ 196 297 196 297 183 300.5 c 128,-1,4
+ 170 304 170 304 164 314 c 128,-1,5
+ 158 324 158 324 158 340 c 0,6,7
+ 158 353 158 353 163 361.5 c 128,-1,8
+ 168 370 168 370 174.5 374.5 c 128,-1,9
+ 181 379 181 379 193 381 c 128,-1,10
+ 205 383 205 383 212 383.5 c 128,-1,11
+ 219 384 219 384 233 384 c 2,12,-1
+ 1558 384 l 1,13,-1
+ 1580 384 l 1,14,15
+ 1582 383 1582 383 1586 383 c 128,-1,16
+ 1590 383 1590 383 1592 384 c 0,17,18
+ 1595 384 1595 384 1614 384 c 2,19,-1
+ 2981 384 l 2,20,21
+ 3009 384 3009 384 3019 387 c 128,-1,22
+ 3029 390 3029 390 3048 406 c 1,23,24
+ 3179 501 3179 501 3303 555 c 1,25,26
+ 3168 616 3168 616 3050 704 c 0,27,28
+ 3029 720 3029 720 3019 723 c 128,-1,29
+ 3009 726 3009 726 2981 726 c 2,30,-1
+ 1614 726 l 1,31,-1
+ 1592 726 l 1,32,33
+ 1590 727 1590 727 1586 727 c 128,-1,34
+ 1582 727 1582 727 1579.5 726.5 c 128,-1,35
+ 1577 726 1577 726 1558 726 c 2,36,-1
+ 233 726 l 2,37,38
+ 219 726 219 726 212 726.5 c 128,-1,39
+ 205 727 205 727 193 729 c 128,-1,40
+ 181 731 181 731 174.5 735.5 c 128,-1,41
+ 168 740 168 740 163 749 c 128,-1,42
+ 158 758 158 758 158 770 c 0,43,44
+ 158 786 158 786 164 796 c 128,-1,45
+ 170 806 170 806 183 809.5 c 128,-1,46
+ 196 813 196 813 205.5 814 c 128,-1,47
+ 215 815 215 815 231 815 c 2,48,-1
+ 1561 815 l 2,49,50
+ 1577 815 1577 815 1586 814 c 1,51,52
+ 1595 815 1595 815 1612 815 c 2,53,-1
+ 2922 815 l 1,54,55
+ 2860 883 2860 883 2812 947 c 128,-1,56
+ 2764 1011 2764 1011 2742 1050.5 c 128,-1,57
+ 2720 1090 2720 1090 2710 1112.5 c 128,-1,58
+ 2700 1135 2700 1135 2700 1139 c 0,59,60
+ 2700 1151 2700 1151 2707.5 1157.5 c 128,-1,61
+ 2715 1164 2715 1164 2722.5 1165 c 128,-1,62
+ 2730 1166 2730 1166 2744 1166 c 0,63,64
+ 2768 1166 2768 1166 2777 1159 c 0,65,66
+ 2779 1158 2779 1158 2780.5 1156 c 128,-1,67
+ 2782 1154 2782 1154 2784 1150 c 2,68,-1
+ 2792 1134 l 2,69,70
+ 2798 1123 2798 1123 2806 1110 c 0,71,72
+ 2873 1001 2873 1001 2883 987 c 0,73,74
+ 2926 927 2926 927 2985 866 c 0,75,76
+ 3045 805 3045 805 3114 754 c 128,-1,77
+ 3183 703 3183 703 3275 658 c 128,-1,78
+ 3367 613 3367 613 3470 584 c 0,79,80
+ 3490 578 3490 578 3497.5 572.5 c 128,-1,81
+ 3505 567 3505 567 3505 554.5 c 128,-1,82
+ 3505 542 3505 542 3497 537 c 128,-1,83
+ 3489 532 3489 532 3468 526 c 0,84,85
+ 3340 489 3340 489 3232 431 c 0,86,87
+ 3129 375 3129 375 3043 298 c 0,88,89
+ 2961 224 2961 224 2904 149 c 0,90,91
+ 2845 73 2845 73 2799 -13 c 0,92,93
+ 2783 -43 2783 -43 2775 -50 c 0,94,95
+ 2768 -56 2768 -56 2744 -56 c 0,96,97
+ 2730 -56 2730 -56 2722.5 -55 c 128,-1,98
+ 2715 -54 2715 -54 2707.5 -47.5 c 128,-1,99
+ 2700 -41 2700 -41 2700 -29 c 0,100,101
+ 2700 -24 2700 -24 2710 -1.5 c 128,-1,102
+ 2720 21 2720 21 2742 61 c 128,-1,103
+ 2764 101 2764 101 2811.5 164.5 c 128,-1,104
+ 2859 228 2859 228 2922 295 c 1,105,-1
+ 1612 295 l 2,106,107
+ 1596 295 1596 295 1586 296 c 1,108,109
+ 1577 295 1577 295 1561 295 c 2,0,-1
+EndSplineSet
+Validated: 1025
EndChar
StartChar: uni27FA
@@ -36405,138 +36279,106 @@
Width: 3663
VWidth: 2220
Flags: W
-HStem: 295 89<665 2061 1593 2938> 726 89<725 2061 1593 2938>
-LayerCount: 2
-Fore
-SplineSet
-2035 295 m 6,0,-1
- 725 295 l 1,1,2
- 787 227 787 227 834.5 163 c 128,-1,3
- 882 99 882 99 904 59.5 c 128,-1,4
- 926 20 926 20 936.5 -2.5 c 128,-1,5
- 947 -25 947 -25 947 -29 c 0,6,7
- 947 -41 947 -41 939 -47.5 c 128,-1,8
- 931 -54 931 -54 923.5 -55 c 128,-1,9
- 916 -56 916 -56 902 -56 c 0,10,11
- 878 -56 878 -56 869 -49 c 0,12,13
- 867 -48 867 -48 865.5 -46 c 128,-1,14
- 864 -44 864 -44 862 -40 c 128,-1,15
- 860 -36 860 -36 854 -24.5 c 128,-1,16
- 848 -13 848 -13 840 0 c 0,17,18
- 805 64 805 64 763.5 123 c 128,-1,19
- 722 182 722 182 662 243.5 c 128,-1,20
- 602 305 602 305 532.5 356 c 128,-1,21
- 463 407 463 407 371.5 452 c 128,-1,22
- 280 497 280 497 176 526 c 0,23,24
- 156 532 156 532 148.5 537.5 c 128,-1,25
- 141 543 141 543 141 555.5 c 128,-1,26
- 141 568 141 568 149 573 c 128,-1,27
- 157 578 157 578 179 584 c 0,28,29
- 306 620 306 620 414 679 c 128,-1,30
- 522 738 522 738 603 811.5 c 128,-1,31
- 684 885 684 885 743 961 c 128,-1,32
- 802 1037 802 1037 847 1123 c 0,33,34
- 864 1154 864 1154 871 1160 c 128,-1,35
- 878 1166 878 1166 902 1166 c 0,36,37
- 916 1166 916 1166 923.5 1165 c 128,-1,38
- 931 1164 931 1164 939 1157.5 c 128,-1,39
- 947 1151 947 1151 947 1139 c 0,40,41
- 947 1134 947 1134 936.5 1111.5 c 128,-1,42
- 926 1089 926 1089 904 1049 c 128,-1,43
- 882 1009 882 1009 834.5 945.5 c 128,-1,44
- 787 882 787 882 725 815 c 1,45,-1
- 2035 815 l 2,46,47
- 2051 815 2051 815 2060 814 c 128,-1,48
- 2069 813 2069 813 2082 809.5 c 128,-1,49
- 2095 806 2095 806 2101.5 796 c 128,-1,50
- 2108 786 2108 786 2108 770 c 0,51,52
- 2108 757 2108 757 2103 748.5 c 128,-1,53
- 2098 740 2098 740 2091.5 735.5 c 128,-1,54
- 2085 731 2085 731 2073 729 c 128,-1,55
- 2061 727 2061 727 2053.5 726.5 c 128,-1,56
- 2046 726 2046 726 2032 726 c 2,57,-1
- 665 726 l 2,58,59
- 637 726 637 726 627.5 723 c 128,-1,60
- 618 720 618 720 598 704 c 0,61,62
- 467 609 467 609 343 555 c 1,63,64
- 478 494 478 494 596 406 c 0,65,66
- 617 390 617 390 627 387 c 128,-1,67
- 637 384 637 384 665 384 c 2,68,-1
- 2032 384 l 2,69,70
- 2046 384 2046 384 2053.5 383.5 c 128,-1,71
- 2061 383 2061 383 2073 381 c 128,-1,72
- 2085 379 2085 379 2091.5 374.5 c 128,-1,73
- 2098 370 2098 370 2103 361 c 128,-1,74
- 2108 352 2108 352 2108 340 c 0,75,76
- 2108 324 2108 324 2101.5 314 c 128,-1,77
- 2095 304 2095 304 2082 300.5 c 128,-1,78
- 2069 297 2069 297 2060 296 c 128,-1,79
- 2051 295 2051 295 2035 295 c 6,0,-1
-2998 726 m 6,80,-1
- 1631 726 l 2,81,82
- 1617 726 1617 726 1609.5 726.5 c 128,-1,83
- 1602 727 1602 727 1590 729 c 128,-1,84
- 1578 731 1578 731 1571.5 735.5 c 128,-1,85
- 1565 740 1565 740 1560 749 c 128,-1,86
- 1555 758 1555 758 1555 770 c 0,87,88
- 1555 786 1555 786 1561.5 796 c 128,-1,89
- 1568 806 1568 806 1581 809.5 c 128,-1,90
- 1594 813 1594 813 1603 814 c 128,-1,91
- 1612 815 1612 815 1628 815 c 2,92,-1
- 2938 815 l 1,93,94
- 2876 883 2876 883 2828.5 947 c 128,-1,95
- 2781 1011 2781 1011 2759 1050.5 c 128,-1,96
- 2737 1090 2737 1090 2726.5 1112.5 c 128,-1,97
- 2716 1135 2716 1135 2716 1139 c 0,98,99
- 2716 1151 2716 1151 2724 1157.5 c 128,-1,100
- 2732 1164 2732 1164 2739.5 1165 c 128,-1,101
- 2747 1166 2747 1166 2761 1166 c 0,102,103
- 2785 1166 2785 1166 2794 1159 c 0,104,105
- 2796 1158 2796 1158 2797.5 1156 c 128,-1,106
- 2799 1154 2799 1154 2801 1150 c 128,-1,107
- 2803 1146 2803 1146 2809 1134.5 c 128,-1,108
- 2815 1123 2815 1123 2823 1110 c 0,109,110
- 2858 1046 2858 1046 2899.5 987 c 128,-1,111
- 2941 928 2941 928 3001 866.5 c 128,-1,112
- 3061 805 3061 805 3130.5 754 c 128,-1,113
- 3200 703 3200 703 3291.5 658 c 128,-1,114
- 3383 613 3383 613 3487 584 c 0,115,116
- 3507 578 3507 578 3514.5 572.5 c 128,-1,117
- 3522 567 3522 567 3522 554.5 c 128,-1,118
- 3522 542 3522 542 3514 537 c 128,-1,119
- 3506 532 3506 532 3484 526 c 0,120,121
- 3357 490 3357 490 3249 431 c 128,-1,122
- 3141 372 3141 372 3060 298.5 c 128,-1,123
- 2979 225 2979 225 2920 149 c 128,-1,124
- 2861 73 2861 73 2816 -13 c 0,125,126
- 2799 -44 2799 -44 2792 -50 c 128,-1,127
- 2785 -56 2785 -56 2761 -56 c 0,128,129
- 2747 -56 2747 -56 2739.5 -55 c 128,-1,130
- 2732 -54 2732 -54 2724 -47.5 c 128,-1,131
- 2716 -41 2716 -41 2716 -29 c 0,132,133
- 2716 -24 2716 -24 2726.5 -1.5 c 128,-1,134
- 2737 21 2737 21 2759 61 c 128,-1,135
- 2781 101 2781 101 2828.5 164.5 c 128,-1,136
- 2876 228 2876 228 2938 295 c 1,137,-1
- 1628 295 l 2,138,139
- 1612 295 1612 295 1603 296 c 128,-1,140
- 1594 297 1594 297 1581 300.5 c 128,-1,141
- 1568 304 1568 304 1561.5 314 c 128,-1,142
- 1555 324 1555 324 1555 340 c 0,143,144
- 1555 353 1555 353 1560 361.5 c 128,-1,145
- 1565 370 1565 370 1571.5 374.5 c 128,-1,146
- 1578 379 1578 379 1590 381 c 128,-1,147
- 1602 383 1602 383 1609.5 383.5 c 128,-1,148
- 1617 384 1617 384 1631 384 c 2,149,-1
- 2998 384 l 2,150,151
- 3026 384 3026 384 3035.5 387 c 128,-1,152
- 3045 390 3045 390 3065 406 c 0,153,154
- 3196 501 3196 501 3320 555 c 1,155,156
- 3185 616 3185 616 3067 704 c 0,157,158
- 3046 720 3046 720 3036 723 c 128,-1,159
- 3026 726 3026 726 2998 726 c 6,80,-1
-EndSplineSet
-Validated: 1029
+HStem: 295 89<725 2938> 726 89<725 2938>
+LayerCount: 2
+Fore
+SplineSet
+2032 384 m 1,0,-1
+ 2998 384 l 2,1,2
+ 3026 384 3026 384 3035.5 387 c 128,-1,3
+ 3045 390 3045 390 3065 406 c 1,4,5
+ 3196 501 3196 501 3320 555 c 1,6,7
+ 3185 616 3185 616 3067 704 c 0,8,9
+ 3046 720 3046 720 3036 723 c 128,-1,10
+ 3026 726 3026 726 2998 726 c 2,11,-1
+ 2032 726 l 1,12,-1
+ 1631 726 l 1,13,-1
+ 665 726 l 2,14,15
+ 637 726 637 726 627.5 723 c 128,-1,16
+ 618 720 618 720 598 704 c 1,17,18
+ 467 609 467 609 343 555 c 1,19,20
+ 478 494 478 494 596 406 c 0,21,22
+ 618 390 618 390 627 387 c 0,23,24
+ 637 384 637 384 665 384 c 2,25,-1
+ 1631 384 l 1,26,-1
+ 2032 384 l 1,0,-1
+2035 815 m 1,27,-1
+ 2938 815 l 1,28,29
+ 2876 883 2876 883 2828.5 947 c 128,-1,30
+ 2781 1011 2781 1011 2759 1050.5 c 128,-1,31
+ 2737 1090 2737 1090 2726.5 1112.5 c 128,-1,32
+ 2716 1135 2716 1135 2716 1139 c 0,33,34
+ 2716 1151 2716 1151 2724 1157.5 c 128,-1,35
+ 2732 1164 2732 1164 2739.5 1165 c 128,-1,36
+ 2747 1166 2747 1166 2761 1166 c 0,37,38
+ 2785 1166 2785 1166 2794 1159 c 0,39,40
+ 2796 1158 2796 1158 2797.5 1156 c 128,-1,41
+ 2799 1154 2799 1154 2801 1150 c 2,42,-1
+ 2809 1134 l 2,43,44
+ 2815 1123 2815 1123 2823 1110 c 0,45,46
+ 2893 996 2893 996 2900 987 c 0,47,48
+ 2941 928 2941 928 3001 866.5 c 128,-1,49
+ 3061 805 3061 805 3130.5 754 c 128,-1,50
+ 3200 703 3200 703 3291.5 658 c 128,-1,51
+ 3383 613 3383 613 3487 584 c 0,52,53
+ 3507 578 3507 578 3514.5 572.5 c 128,-1,54
+ 3522 567 3522 567 3522 554.5 c 128,-1,55
+ 3522 542 3522 542 3514 537 c 0,56,57
+ 3507 532 3507 532 3484 526 c 0,58,59
+ 3357 490 3357 490 3249 431 c 128,-1,60
+ 3141 372 3141 372 3060 298.5 c 128,-1,61
+ 2979 225 2979 225 2920 149 c 128,-1,62
+ 2861 73 2861 73 2816 -13 c 0,63,64
+ 2800 -43 2800 -43 2792 -50 c 0,65,66
+ 2785 -56 2785 -56 2761 -56 c 0,67,68
+ 2747 -56 2747 -56 2739.5 -55 c 128,-1,69
+ 2732 -54 2732 -54 2724 -47.5 c 128,-1,70
+ 2716 -41 2716 -41 2716 -29 c 0,71,72
+ 2716 -24 2716 -24 2726.5 -1.5 c 128,-1,73
+ 2737 21 2737 21 2759 61 c 128,-1,74
+ 2781 101 2781 101 2828.5 164.5 c 128,-1,75
+ 2876 228 2876 228 2938 295 c 1,76,-1
+ 2035 295 l 1,77,-1
+ 1628 295 l 1,78,-1
+ 725 295 l 1,79,80
+ 781 233 781 233 834 163 c 0,81,82
+ 882 99 882 99 904 59 c 0,83,84
+ 926 20 926 20 936.5 -2.5 c 128,-1,85
+ 947 -25 947 -25 947 -29 c 0,86,87
+ 947 -41 947 -41 939 -47.5 c 128,-1,88
+ 931 -54 931 -54 923.5 -55 c 128,-1,89
+ 916 -56 916 -56 902 -56 c 0,90,91
+ 878 -56 878 -56 869 -49 c 0,92,93
+ 867 -48 867 -48 865.5 -46 c 128,-1,94
+ 864 -44 864 -44 862 -40 c 2,95,-1
+ 854 -24 l 2,96,97
+ 848 -13 848 -13 840 0 c 0,98,99
+ 770 114 770 114 764 123 c 0,100,101
+ 722 182 722 182 662 243.5 c 128,-1,102
+ 602 305 602 305 532.5 356 c 128,-1,103
+ 463 407 463 407 371.5 452 c 128,-1,104
+ 280 497 280 497 176 526 c 0,105,106
+ 156 532 156 532 148.5 537.5 c 128,-1,107
+ 141 543 141 543 141 555.5 c 128,-1,108
+ 141 568 141 568 149 573 c 0,109,110
+ 156 578 156 578 179 584 c 0,111,112
+ 306 620 306 620 414 679 c 128,-1,113
+ 522 738 522 738 603 811.5 c 128,-1,114
+ 684 885 684 885 743 961 c 128,-1,115
+ 802 1037 802 1037 847 1123 c 0,116,117
+ 864 1154 864 1154 871 1160 c 128,-1,118
+ 878 1166 878 1166 902 1166 c 0,119,120
+ 916 1166 916 1166 923.5 1165 c 128,-1,121
+ 931 1164 931 1164 939 1157.5 c 128,-1,122
+ 947 1151 947 1151 947 1139 c 0,123,124
+ 947 1134 947 1134 936.5 1111.5 c 128,-1,125
+ 926 1089 926 1089 904 1049 c 128,-1,126
+ 882 1009 882 1009 834.5 945.5 c 128,-1,127
+ 787 882 787 882 725 815 c 1,128,-1
+ 1628 815 l 1,129,-1
+ 2035 815 l 1,27,-1
+EndSplineSet
+Validated: 1025
EndChar
StartChar: uni27FC
@@ -36544,99 +36386,83 @@
Width: 3663
VWidth: 2220
Flags: W
-HStem: 511 89<257 273 320 1569 1569 1569 1569 3308> 924 44G<190 235>
-VStem: 168 89<163 511 599 947> 256 63<511 599> 1528 84<511 599>
-LayerCount: 2
-Fore
-SplineSet
-1539 511 m 6,0,-1
- 329 511 l 2,1,2
- 313 511 313 511 304 511.5 c 128,-1,3
- 295 512 295 512 281.5 516 c 128,-1,4
- 268 520 268 520 262 529.5 c 128,-1,5
- 256 539 256 539 256 555 c 128,-1,6
- 256 571 256 571 262 580.5 c 128,-1,7
- 268 590 268 590 281.5 594 c 128,-1,8
- 295 598 295 598 304 598.5 c 128,-1,9
- 313 599 313 599 329 599 c 2,10,-1
- 1539 599 l 2,11,12
- 1555 599 1555 599 1564 598.5 c 128,-1,13
- 1573 598 1573 598 1586.5 594 c 128,-1,14
- 1600 590 1600 590 1606 580.5 c 128,-1,15
- 1612 571 1612 571 1612 555 c 128,-1,16
- 1612 539 1612 539 1606 529.5 c 128,-1,17
- 1600 520 1600 520 1586.5 516 c 128,-1,18
- 1573 512 1573 512 1564 511.5 c 128,-1,19
- 1555 511 1555 511 1539 511 c 6,0,-1
-3308 511 m 5,20,-1
- 1601 511 l 2,21,22
- 1587 511 1587 511 1580 511 c 128,-1,23
- 1573 511 1573 511 1561.5 513.5 c 128,-1,24
- 1550 516 1550 516 1544 520.5 c 128,-1,25
- 1538 525 1538 525 1533 533.5 c 128,-1,26
- 1528 542 1528 542 1528 555 c 128,-1,27
- 1528 568 1528 568 1533 576.5 c 128,-1,28
- 1538 585 1538 585 1544 589.5 c 128,-1,29
- 1550 594 1550 594 1561.5 596.5 c 128,-1,30
- 1573 599 1573 599 1580.5 599 c 128,-1,31
- 1588 599 1588 599 1601 599 c 2,32,-1
- 3308 599 l 1,33,34
- 3250 641 3250 641 3201 698 c 128,-1,35
- 3152 755 3152 755 3125 802 c 128,-1,36
- 3098 849 3098 849 3083.5 882 c 128,-1,37
- 3069 915 3069 915 3069 924 c 0,38,39
- 3069 938 3069 938 3078.5 944 c 128,-1,40
- 3088 950 3088 950 3102 950 c 0,41,42
- 3119 950 3119 950 3125.5 943.5 c 128,-1,43
- 3132 937 3132 937 3140 919 c 0,44,45
- 3173 846 3173 846 3213.5 788.5 c 128,-1,46
- 3254 731 3254 731 3300 691.5 c 128,-1,47
- 3346 652 3346 652 3387 626.5 c 128,-1,48
- 3428 601 3428 601 3479 577 c 1,49,50
- 3482 577 3482 577 3488.5 571 c 128,-1,51
- 3495 565 3495 565 3495 555 c 0,52,53
- 3495 544 3495 544 3490 539 c 128,-1,54
- 3485 534 3485 534 3464 524 c 0,55,56
- 3418 502 3418 502 3377.5 475.5 c 128,-1,57
- 3337 449 3337 449 3308 424.5 c 128,-1,58
- 3279 400 3279 400 3252.5 369 c 128,-1,59
- 3226 338 3226 338 3210 316 c 128,-1,60
- 3194 294 3194 294 3177.5 264 c 128,-1,61
- 3161 234 3161 234 3154.5 220 c 128,-1,62
- 3148 206 3148 206 3137 182 c 0,63,64
- 3133 174 3133 174 3131.5 171.5 c 128,-1,65
- 3130 169 3130 169 3122 164.5 c 128,-1,66
- 3114 160 3114 160 3102 160 c 0,67,68
- 3088 160 3088 160 3078.5 166.5 c 128,-1,69
- 3069 173 3069 173 3069 186 c 0,70,71
- 3069 195 3069 195 3083.5 228 c 128,-1,72
- 3098 261 3098 261 3125 308 c 128,-1,73
- 3152 355 3152 355 3201 412 c 128,-1,74
- 3250 469 3250 469 3308 511 c 5,20,-1
-257 511 m 5,75,-1
- 257 215 l 2,76,77
- 257 199 257 199 256 190 c 128,-1,78
- 255 181 255 181 251.5 168 c 128,-1,79
- 248 155 248 155 238 148.5 c 128,-1,80
- 228 142 228 142 212.5 142 c 128,-1,81
- 197 142 197 142 187 148.5 c 128,-1,82
- 177 155 177 155 173.5 168 c 128,-1,83
- 170 181 170 181 169 190 c 128,-1,84
- 168 199 168 199 168 215 c 2,85,-1
- 168 895 l 2,86,87
- 168 911 168 911 169 920 c 128,-1,88
- 170 929 170 929 173.5 942 c 128,-1,89
- 177 955 177 955 187 961.5 c 128,-1,90
- 197 968 197 968 212.5 968 c 128,-1,91
- 228 968 228 968 238 961.5 c 128,-1,92
- 248 955 248 955 251.5 942 c 128,-1,93
- 255 929 255 929 256 920 c 128,-1,94
- 257 911 257 911 257 895 c 2,95,-1
- 257 599 l 1,96,97
- 319 599 319 599 319 555 c 128,-1,98
- 319 511 319 511 257 511 c 5,75,-1
-EndSplineSet
-Validated: 1029
+HStem: 511 88<257 286.567 287.269 3308>
+VStem: 168 89<142.002 511 599 967.998> 3069 71<849.875 943.302>
+LayerCount: 2
+Fore
+SplineSet
+257 511 m 1,0,-1
+ 257 215 l 2,1,2
+ 257 199 257 199 256 190 c 128,-1,3
+ 255 181 255 181 251.5 168 c 128,-1,4
+ 248 155 248 155 238 148.5 c 128,-1,5
+ 228 142 228 142 212.5 142 c 128,-1,6
+ 197 142 197 142 187 148.5 c 128,-1,7
+ 177 155 177 155 173.5 168 c 128,-1,8
+ 170 181 170 181 169 190 c 128,-1,9
+ 168 199 168 199 168 215 c 2,10,-1
+ 168 895 l 2,11,12
+ 168 911 168 911 169 920 c 128,-1,13
+ 170 929 170 929 173.5 942 c 128,-1,14
+ 177 955 177 955 187 961.5 c 128,-1,15
+ 197 968 197 968 212.5 968 c 128,-1,16
+ 228 968 228 968 238 961.5 c 128,-1,17
+ 248 955 248 955 251.5 942 c 128,-1,18
+ 255 929 255 929 256 920 c 128,-1,19
+ 257 911 257 911 257 895 c 2,20,-1
+ 257 599 l 1,21,22
+ 274 599 274 599 287 596 c 1,23,24
+ 297 598 297 598 304 598.5 c 128,-1,25
+ 311 599 311 599 329 599 c 2,26,-1
+ 1539 599 l 2,27,28
+ 1555 599 1555 599 1564 598 c 0,29,30
+ 1569 598 1569 598 1569 598 c 0,31,32
+ 1576 599 1576 599 1580 599 c 2,33,-1
+ 1601 599 l 1,34,-1
+ 3308 599 l 1,35,36
+ 3250 641 3250 641 3201 698 c 128,-1,37
+ 3152 755 3152 755 3125 802 c 128,-1,38
+ 3098 849 3098 849 3083.5 882 c 128,-1,39
+ 3069 915 3069 915 3069 924 c 0,40,41
+ 3069 938 3069 938 3078.5 944 c 128,-1,42
+ 3088 950 3088 950 3102 950 c 0,43,44
+ 3119 950 3119 950 3126 944 c 0,45,46
+ 3132 937 3132 937 3140 919 c 0,47,48
+ 3171 848 3171 848 3214 788 c 0,49,50
+ 3254 731 3254 731 3300 691.5 c 128,-1,51
+ 3346 652 3346 652 3387 626.5 c 128,-1,52
+ 3428 601 3428 601 3479 577 c 1,53,54
+ 3482 577 3482 577 3488.5 571 c 128,-1,55
+ 3495 565 3495 565 3495 555 c 0,56,57
+ 3495 544 3495 544 3490 539 c 128,-1,58
+ 3485 534 3485 534 3464 524 c 0,59,60
+ 3418 502 3418 502 3377.5 475.5 c 128,-1,61
+ 3337 449 3337 449 3308 424.5 c 128,-1,62
+ 3279 400 3279 400 3252.5 369 c 128,-1,63
+ 3226 338 3226 338 3210 316 c 128,-1,64
+ 3194 294 3194 294 3177.5 264 c 128,-1,65
+ 3161 234 3161 234 3154 220 c 2,66,-1
+ 3137 182 l 2,67,68
+ 3133 174 3133 174 3131.5 171.5 c 128,-1,69
+ 3130 169 3130 169 3122 164.5 c 128,-1,70
+ 3114 160 3114 160 3102 160 c 0,71,72
+ 3088 160 3088 160 3078.5 166.5 c 128,-1,73
+ 3069 173 3069 173 3069 186 c 0,74,75
+ 3069 195 3069 195 3083.5 228 c 128,-1,76
+ 3098 261 3098 261 3125 308 c 128,-1,77
+ 3152 355 3152 355 3201 412 c 128,-1,78
+ 3250 469 3250 469 3308 511 c 1,79,-1
+ 1601 511 l 1,80,-1
+ 1580 511 l 2,81,82
+ 1576 511 1576 511 1569 512 c 0,83,84
+ 1566 512 1566 512 1564 511.5 c 128,-1,85
+ 1562 511 1562 511 1539 511 c 2,86,-1
+ 329 511 l 2,87,88
+ 313 511 313 511 304 512 c 0,89,90
+ 297 512 297 512 287 514 c 1,91,92
+ 274 511 274 511 257 511 c 1,0,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni2983
@@ -37269,61 +37095,54 @@
Width: 2220
VWidth: 2220
Flags: W
-HStem: -11 47G<211 236 1981 2009>
+HStem: -11 21G<1982 1993.5>
VStem: 189 89<111 999> 1942 89<111 999>
LayerCount: 2
Fore
SplineSet
-1143 506 m 6,0,-1
- 273 4 l 2,1,2
- 246 -12 246 -12 233 -11 c 0,3,4
- 217 -11 217 -11 207.5 -4 c 128,-1,5
- 198 3 198 3 194 17.5 c 128,-1,6
- 190 32 190 32 189.5 41.5 c 128,-1,7
- 189 51 189 51 189 69 c 2,8,-1
- 189 1041 l 2,9,10
- 189 1058 189 1058 189.5 1068 c 128,-1,11
- 190 1078 190 1078 194 1092.5 c 128,-1,12
- 198 1107 198 1107 207.5 1114 c 128,-1,13
- 217 1121 217 1121 233 1121 c 0,14,15
- 250 1121 250 1121 278 1103 c 2,16,-1
- 1143 604 l 2,17,18
- 1160 595 1160 595 1169 585 c 128,-1,19
- 1178 575 1178 575 1179.5 569 c 128,-1,20
- 1181 563 1181 563 1181 555 c 128,-1,21
- 1181 547 1181 547 1179.5 541 c 128,-1,22
- 1178 535 1178 535 1169 525 c 128,-1,23
- 1160 515 1160 515 1143 506 c 6,0,-1
-1048 555 m 5,24,-1
- 278 999 l 1,25,-1
- 278 111 l 1,26,-1
- 1048 555 l 5,24,-1
-1086 606 m 2,27,-1
- 1949 1106 l 2,28,29
- 1976 1122 1976 1122 1987 1121 c 0,30,31
- 2000 1121 2000 1121 2008.5 1116 c 128,-1,32
- 2017 1111 2017 1111 2021.5 1105 c 128,-1,33
- 2026 1099 2026 1099 2028.5 1086 c 128,-1,34
- 2031 1073 2031 1073 2031 1065 c 128,-1,35
- 2031 1057 2031 1057 2031 1041 c 2,36,-1
- 2031 69 l 2,37,38
- 2031 53 2031 53 2031 45 c 128,-1,39
- 2031 37 2031 37 2028.5 24 c 128,-1,40
- 2026 11 2026 11 2021.5 5 c 128,-1,41
- 2017 -1 2017 -1 2008.5 -6 c 128,-1,42
- 2000 -11 2000 -11 1987 -11 c 0,43,44
- 1977 -11 1977 -11 1945 7 c 2,45,-1
- 1081 504 l 2,46,47
- 1039 526 1039 526 1039 555 c 0,48,49
- 1039 567 1039 567 1045 576 c 128,-1,50
- 1051 585 1051 585 1059 590 c 128,-1,51
- 1067 595 1067 595 1086 606 c 2,27,-1
-1172 555 m 1,52,-1
- 1942 111 l 1,53,-1
- 1942 999 l 1,54,-1
- 1172 555 l 1,52,-1
-EndSplineSet
-Validated: 5
+1039 550 m 1,0,1
+ 1039 552 1039 552 1039 555 c 128,-1,2
+ 1039 558 1039 558 1039 560 c 1,3,-1
+ 278 999 l 1,4,-1
+ 278 111 l 1,5,-1
+ 1039 550 l 1,0,1
+1181 550 m 1,6,-1
+ 1942 111 l 1,7,-1
+ 1942 999 l 1,8,-1
+ 1181 560 l 1,9,10
+ 1181 558 1181 558 1181 555 c 128,-1,11
+ 1181 552 1181 552 1181 550 c 1,6,-1
+1113 621 m 1,12,-1
+ 1949 1106 l 2,13,14
+ 1977 1122 1977 1122 1987 1121 c 1,15,16
+ 2000 1121 2000 1121 2008.5 1116 c 128,-1,17
+ 2017 1111 2017 1111 2021.5 1105 c 128,-1,18
+ 2026 1099 2026 1099 2028.5 1086 c 128,-1,19
+ 2031 1073 2031 1073 2031 1065 c 2,20,-1
+ 2031 1041 l 1,21,-1
+ 2031 69 l 1,22,-1
+ 2031 45 l 2,23,24
+ 2031 37 2031 37 2028.5 24 c 128,-1,25
+ 2026 11 2026 11 2021.5 5 c 128,-1,26
+ 2017 -1 2017 -1 2008.5 -6 c 128,-1,27
+ 2000 -11 2000 -11 1987 -11 c 0,28,29
+ 1977 -11 1977 -11 1945 7 c 2,30,-1
+ 1110 487 l 1,31,-1
+ 273 4 l 2,32,33
+ 246 -12 246 -12 233 -11 c 1,34,35
+ 217 -11 217 -11 208 -4 c 0,36,37
+ 198 3 198 3 194 17.5 c 128,-1,38
+ 190 32 190 32 189.5 41.5 c 128,-1,39
+ 189 51 189 51 189 69 c 2,40,-1
+ 189 1041 l 2,41,42
+ 189 1058 189 1058 189.5 1068 c 128,-1,43
+ 190 1078 190 1078 194 1092.5 c 128,-1,44
+ 198 1107 198 1107 207.5 1114 c 128,-1,45
+ 217 1121 217 1121 233 1121 c 0,46,47
+ 250 1121 250 1121 278 1103 c 1,48,-1
+ 1113 621 l 1,12,-1
+EndSplineSet
+Validated: 1
EndChar
StartChar: uni2A3F
Binary file lib/fonts/IsabelleText.ttf has changed
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/scripts/bash Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,31 @@
+#!/usr/bin/env perl
+#
+# Author: Makarius
+#
+# bash - invoke shell command line (with robust signal handling)
+#
+
+use warnings;
+use strict;
+
+
+# args
+
+my ($group, $script_name, $pid_name, $output_name) = @ARGV;
+
+
+# process id
+
+if ($group eq "group") {
+ use POSIX "setsid";
+ POSIX::setsid || die $!;
+}
+
+open (PID_FILE, ">", $pid_name) || die $!;
+print PID_FILE "$$";
+close PID_FILE;
+
+
+# exec script
+
+exec qq/exec bash '$script_name' > '$output_name'/;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/scripts/keywords Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,124 @@
+#!/usr/bin/env perl
+#
+# Author: Makarius
+#
+# keywords.pl - generate outer syntax keyword files from session logs
+#
+
+use warnings;
+use strict;
+
+
+## arguments
+
+my ($keywords_name, $sessions) = @ARGV;
+
+
+## keywords
+
+my %keywords;
+
+sub set_keyword {
+ my ($name, $kind) = @_;
+ if (defined $keywords{$name} and $keywords{$name} ne $kind and $keywords{$name} ne "minor") {
+ if ($kind ne "minor") {
+ print STDERR "### Inconsistent declaration of keyword \"${name}\": $keywords{$name} vs ${kind}\n";
+ $keywords{$name} = $kind;
+ }
+ } else {
+ $keywords{$name} = $kind;
+ }
+}
+
+sub collect_keywords {
+ while(<STDIN>) {
+ if (m/^Outer syntax keyword:\s*"(.*)"/) {
+ my $name = $1;
+ &set_keyword($name, "minor");
+ }
+ elsif (m/^Outer syntax command:\s*"(.*)"\s*\((.*)\)/) {
+ my $name = $1;
+ my $kind = $2;
+ &set_keyword($name, $kind);
+ }
+ }
+}
+
+
+## Emacs output
+
+sub emacs_output {
+ my @kinds = (
+ "major",
+ "minor",
+ "control",
+ "diag",
+ "theory-begin",
+ "theory-switch",
+ "theory-end",
+ "theory-heading",
+ "theory-decl",
+ "theory-script",
+ "theory-goal",
+ "qed",
+ "qed-block",
+ "qed-global",
+ "proof-heading",
+ "proof-goal",
+ "proof-block",
+ "proof-open",
+ "proof-close",
+ "proof-chain",
+ "proof-decl",
+ "proof-asm",
+ "proof-asm-goal",
+ "proof-script"
+ );
+ my $file = $keywords_name eq "" ? "isar-keywords.el" : "isar-keywords-${keywords_name}.el";
+ open (OUTPUT, "> ${file}") || die "$!";
+ select OUTPUT;
+
+ print ";;\n";
+ print ";; Keyword classification tables for Isabelle/Isar.\n";
+ print ";; Generated from ${sessions}.\n";
+ print ";; *** DO NOT EDIT *** DO NOT EDIT *** DO NOT EDIT ***\n";
+ print ";;\n";
+
+ for my $kind (@kinds) {
+ my @names;
+ for my $name (keys(%keywords)) {
+ if ($kind eq "major" ? $keywords{$name} ne "minor" : $keywords{$name} eq $kind) {
+ if ($kind ne "minor" or $name =~ m/^[A-Za-z0-9_]+$/) {
+ push @names, $name;
+ }
+ }
+ }
+ @names = sort(@names);
+
+ print "\n(defconst isar-keywords-${kind}";
+ print "\n '(";
+ my $first = 1;
+ for my $name (@names) {
+ $name =~ s/([\.\*\+\?\[\]\^\$])/\\\\$1/g;
+ if ($first) {
+ print "\"${name}\"";
+ $first = 0;
+ }
+ else {
+ print "\n \"${name}\"";
+ }
+ }
+ print "))\n";
+ }
+ print "\n(provide 'isar-keywords)\n";
+
+ close OUTPUT;
+ select;
+ print STDERR "${file}\n";
+}
+
+
+## main
+
+&collect_keywords();
+&emacs_output();
--- a/lib/scripts/keywords.pl Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-#
-# Author: Makarius
-#
-# keywords.pl - generate outer syntax keyword files from session logs
-#
-
-## arguments
-
-my ($keywords_name, $sessions) = @ARGV;
-
-
-## keywords
-
-my %keywords;
-
-sub set_keyword {
- my ($name, $kind) = @_;
- if (defined $keywords{$name} and $keywords{$name} ne $kind and $keywords{$name} ne "minor") {
- if ($kind ne "minor") {
- print STDERR "### Inconsistent declaration of keyword \"${name}\": $keywords{$name} vs ${kind}\n";
- $keywords{$name} = $kind;
- }
- } else {
- $keywords{$name} = $kind;
- }
-}
-
-sub collect_keywords {
- while(<STDIN>) {
- if (m/^Outer syntax keyword:\s*"(.*)"/) {
- my $name = $1;
- &set_keyword($name, "minor");
- }
- elsif (m/^Outer syntax command:\s*"(.*)"\s*\((.*)\)/) {
- my $name = $1;
- my $kind = $2;
- &set_keyword($name, $kind);
- }
- }
-}
-
-
-## Emacs output
-
-sub emacs_output {
- my @kinds = (
- "major",
- "minor",
- "control",
- "diag",
- "theory-begin",
- "theory-switch",
- "theory-end",
- "theory-heading",
- "theory-decl",
- "theory-script",
- "theory-goal",
- "qed",
- "qed-block",
- "qed-global",
- "proof-heading",
- "proof-goal",
- "proof-block",
- "proof-open",
- "proof-close",
- "proof-chain",
- "proof-decl",
- "proof-asm",
- "proof-asm-goal",
- "proof-script"
- );
- my $file = $keywords_name eq "" ? "isar-keywords.el" : "isar-keywords-${keywords_name}.el";
- open (OUTPUT, "> ${file}") || die "$!";
- select OUTPUT;
-
- print ";;\n";
- print ";; Keyword classification tables for Isabelle/Isar.\n";
- print ";; Generated from ${sessions}.\n";
- print ";; *** DO NOT EDIT *** DO NOT EDIT *** DO NOT EDIT ***\n";
- print ";;\n";
-
- for my $kind (@kinds) {
- my @names;
- for my $name (keys(%keywords)) {
- if ($kind eq "major" ? $keywords{$name} ne "minor" : $keywords{$name} eq $kind) {
- if ($kind ne "minor" or $name =~ m/^[A-Za-z0-9_]+$/) {
- push @names, $name;
- }
- }
- }
- @names = sort(@names);
-
- print "\n(defconst isar-keywords-${kind}";
- print "\n '(";
- my $first = 1;
- for my $name (@names) {
- $name =~ s/([\.\*\+\?\[\]\^\$])/\\\\$1/g;
- if ($first) {
- print "\"${name}\"";
- $first = 0;
- }
- else {
- print "\n \"${name}\"";
- }
- }
- print "))\n";
- }
- print "\n(provide 'isar-keywords)\n";
-
- close OUTPUT;
- select;
- print STDERR "${file}\n";
-}
-
-
-## main
-
-&collect_keywords();
-&emacs_output();
-
--- a/lib/scripts/run-mosml Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,57 +0,0 @@
-#!/usr/bin/env bash
-#
-# Author: Markus Wenzel, TU Muenchen
-#
-# Moscow ML 2.00 startup script
-
-export -n INFILE OUTFILE MLTEXT TERMINATE NOWRITE
-
-
-## diagnostics
-
-function fail_out()
-{
- echo "Unable to create output heap file: \"$OUTFILE\"" >&2
- exit 2
-}
-
-
-## prepare databases
-
-MOSML="mosml -P sml90"
-
-if [ -z "$INFILE" ]; then
- EXIT='load "OS"; fun exit 0 = (OS.Process.exit OS.Process.success): unit | exit _ = OS.Process.exit OS.Process.failure;'
-else
- EXIT=""
- echo "Cannot load images yet!" >&2
- exit 2
-fi
-
-if [ -z "$OUTFILE" ]; then
- COMMIT='fun commit () = (TextIO.output (TextIO.stdErr, "Error - Database is not opened for writing.\n"); false);'
-else
- COMMIT="fun commit () = true;"
- echo "WARNING: cannot save images yet!" >&2
- [ -f "$OUTFILE" ] && { chmod +w "$OUTFILE" || fail_out; }
-fi
-
-
-## run it!
-
-MLTEXT="$EXIT $COMMIT $MLTEXT"
-MLEXIT="commit();"
-
-if [ -z "$TERMINATE" ]; then
- FEEDER_OPTS=""
-else
- FEEDER_OPTS="-q"
-fi
-
-"$ISABELLE_HOME/lib/scripts/feeder" -p -h "$MLTEXT" -t "$MLEXIT" $FEEDER_OPTS | \
- { read FPID; "$ML_HOME"/$MOSML $ML_OPTIONS 2>&1; RC="$?"; kill -HUP "$FPID"; exit "$RC"; }
-RC="$?"
-
-[ -n "$OUTFILE" -a -f "$OUTFILE" -a -n "$NOWRITE" ] && chmod -w "$OUTFILE"
-
-exit "$RC"
--- a/lib/scripts/system.pl Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-#
-# Author: Makarius
-#
-# system.pl - invoke shell command line (with robust signal handling)
-#
-
-# args
-
-($group, $script_name, $pid_name, $output_name) = @ARGV;
-
-
-# process id
-
-if ($group eq "group") {
- use POSIX "setsid";
- POSIX::setsid || die $!;
-}
-
-open (PID_FILE, ">", $pid_name) || die $!;
-print PID_FILE "$$";
-close PID_FILE;
-
-
-# exec script
-
-$SIG{'INT'} = "DEFAULT"; #paranoia setting, required for Cygwin
-exec qq/exec bash '$script_name' > '$output_name'/;
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/scripts/unsymbolize Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,65 @@
+#!/usr/bin/env perl
+#
+# Author: Markus Wenzel, TU Muenchen
+#
+# unsymbolize.pl - remove unreadable symbol names from sources
+#
+
+use warnings;
+use strict;
+
+sub unsymbolize {
+ my ($file) = @_;
+
+ open (FILE, $file) || die $!;
+ undef $/; my $text = <FILE>; $/ = "\n"; # slurp whole file
+ close FILE || die $!;
+
+ $_ = $text;
+
+ # Pure
+ s/\\?\\<And>/!!/g;
+ s/\\?\\<Colon>/::/g;
+ s/\\?\\<Longrightarrow>/==>/g;
+ s/\\?\\<Midarrow>\\?\\<Rightarrow>/==>/g;
+ s/\\?\\<Rightarrow>/=>/g;
+ s/\\?\\<equiv>/==/g;
+ s/\\?\\<dots>/.../g;
+ s/\\?\\<lbrakk> ?/[| /g;
+ s/\\?\\ ?<rbrakk>/ |]/g;
+ s/\\?\\<lparr> ?/(| /g;
+ s/\\?\\ ?<rparr>/ |)/g;
+ # HOL
+ s/\\?\\<longleftrightarrow>/<->/g;
+ s/\\?\\<longrightarrow>/-->/g;
+ s/\\?\\<midarrow>\\?\\<rightarrow>/-->/g;
+ s/\\?\\<rightarrow>/->/g;
+ s/\\?\\<not>/~/g;
+ s/\\?\\<notin>/~:/g;
+ s/\\?\\<noteq>/~=/g;
+ s/\\?\\<some> ?/SOME /g;
+ # outer syntax
+ s/\\?\\<rightleftharpoons>/==/g;
+ s/\\?\\<rightharpoonup>/=>/g;
+ s/\\?\\<leftharpoondown>/<=/g;
+
+ my $result = $_;
+
+ if ($text ne $result) {
+ print STDERR "fixing $file\n";
+ if (! -f "$file~~") {
+ rename $file, "$file~~" || die $!;
+ }
+ open (FILE, "> $file") || die $!;
+ print FILE $result;
+ close FILE || die $!;
+ }
+}
+
+
+## main
+
+foreach my $file (@ARGV) {
+ eval { &unsymbolize($file); };
+ if ($@) { print STDERR "*** unsymbolize $file: ", $@, "\n"; }
+}
--- a/lib/scripts/unsymbolize.pl Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-#
-# Author: Markus Wenzel, TU Muenchen
-#
-# unsymbolize.pl - remove unreadable symbol names from sources
-#
-
-sub unsymbolize {
- my ($file) = @_;
-
- open (FILE, $file) || die $!;
- undef $/; $text = <FILE>; $/ = "\n"; # slurp whole file
- close FILE || die $!;
-
- $_ = $text;
-
- # Pure
- s/\\?\\<And>/!!/g;
- s/\\?\\<Colon>/::/g;
- s/\\?\\<Longrightarrow>/==>/g;
- s/\\?\\<Midarrow>\\?\\<Rightarrow>/==>/g;
- s/\\?\\<Rightarrow>/=>/g;
- s/\\?\\<equiv>/==/g;
- s/\\?\\<dots>/.../g;
- s/\\?\\<lbrakk> ?/[| /g;
- s/\\?\\ ?<rbrakk>/ |]/g;
- s/\\?\\<lparr> ?/(| /g;
- s/\\?\\ ?<rparr>/ |)/g;
- # HOL
- s/\\?\\<longleftrightarrow>/<->/g;
- s/\\?\\<longrightarrow>/-->/g;
- s/\\?\\<midarrow>\\?\\<rightarrow>/-->/g;
- s/\\?\\<rightarrow>/->/g;
- s/\\?\\<not>/~/g;
- s/\\?\\<notin>/~:/g;
- s/\\?\\<noteq>/~=/g;
- s/\\?\\<some> ?/SOME /g;
- # outer syntax
- s/\\?\\<rightleftharpoons>/==/g;
- s/\\?\\<rightharpoonup>/=>/g;
- s/\\?\\<leftharpoondown>/<=/g;
-
- $result = $_;
-
- if ($text ne $result) {
- print STDERR "fixing $file\n";
- if (! -f "$file~~") {
- rename $file, "$file~~" || die $!;
- }
- open (FILE, "> $file") || die $!;
- print FILE $result;
- close FILE || die $!;
- }
-}
-
-
-## main
-
-foreach $file (@ARGV) {
- eval { &unsymbolize($file); };
- if ($@) { print STDERR "*** unsymbolize $file: ", $@, "\n"; }
-}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/scripts/yxml Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,37 @@
+#!/usr/bin/env perl
+#
+# Author: Makarius
+#
+# yxml.pl - simple XML to YXML converter
+#
+
+use warnings;
+use strict;
+
+use XML::Parser;
+
+binmode(STDOUT, ":utf8");
+
+sub handle_start {
+ print chr(5), chr(6), $_[1];
+ for (my $i = 2; $i <= $#_; $i++) {
+ print ($i % 2 == 0 ? chr(6) : "=");
+ print $_[$i];
+ }
+ print chr(5);
+}
+
+sub handle_end {
+ print chr(5), chr(6), chr(5);
+}
+
+sub handle_char {
+ print $_[1];
+}
+
+my $parser = new XML::Parser(Handlers =>
+ {Start => \&handle_start,
+ End => \&handle_end,
+ Char => \&handle_char});
+
+$parser->parse(*STDIN) or die $!;
--- a/lib/scripts/yxml.pl Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-#
-# Author: Makarius
-#
-# yxml.pl - simple XML to YXML converter
-#
-
-use strict;
-use XML::Parser;
-
-binmode(STDOUT, ":utf8");
-
-sub handle_start {
- print chr(5), chr(6), $_[1];
- for (my $i = 2; $i <= $#_; $i++) {
- print ($i % 2 == 0 ? chr(6) : "=");
- print $_[$i];
- }
- print chr(5);
-}
-
-sub handle_end {
- print chr(5), chr(6), chr(5);
-}
-
-sub handle_char {
- print $_[1];
-}
-
-my $parser = new XML::Parser(Handlers =>
- {Start => \&handle_start,
- End => \&handle_end,
- Char => \&handle_char});
-
-$parser->parse(*STDIN) or die $!;
-
--- a/src/CCL/Set.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/CCL/Set.thy Tue Feb 09 17:06:05 2010 +0100
@@ -40,11 +40,11 @@
"@Bex" :: "[idt, 'a set, o] => o" ("(EX _:_./ _)" [0, 0, 0] 10)
translations
- "{x. P}" == "Collect(%x. P)"
- "INT x:A. B" == "INTER(A, %x. B)"
- "UN x:A. B" == "UNION(A, %x. B)"
- "ALL x:A. P" == "Ball(A, %x. P)"
- "EX x:A. P" == "Bex(A, %x. P)"
+ "{x. P}" == "CONST Collect(%x. P)"
+ "INT x:A. B" == "CONST INTER(A, %x. B)"
+ "UN x:A. B" == "CONST UNION(A, %x. B)"
+ "ALL x:A. P" == "CONST Ball(A, %x. P)"
+ "EX x:A. P" == "CONST Bex(A, %x. P)"
local
--- a/src/CCL/Type.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/CCL/Type.thy Tue Feb 09 17:06:05 2010 +0100
@@ -39,15 +39,15 @@
"@Subtype" :: "[idt, 'a set, o] => 'a set" ("(1{_: _ ./ _})")
translations
- "PROD x:A. B" => "Pi(A, %x. B)"
- "A -> B" => "Pi(A, %_. B)"
- "SUM x:A. B" => "Sigma(A, %x. B)"
- "A * B" => "Sigma(A, %_. B)"
- "{x: A. B}" == "Subtype(A, %x. B)"
+ "PROD x:A. B" => "CONST Pi(A, %x. B)"
+ "A -> B" => "CONST Pi(A, %_. B)"
+ "SUM x:A. B" => "CONST Sigma(A, %x. B)"
+ "A * B" => "CONST Sigma(A, %_. B)"
+ "{x: A. B}" == "CONST Subtype(A, %x. B)"
print_translation {*
- [("Pi", dependent_tr' ("@Pi", "@->")),
- ("Sigma", dependent_tr' ("@Sigma", "@*"))] *}
+ [(@{const_syntax Pi}, dependent_tr' ("@Pi", "@->")),
+ (@{const_syntax Sigma}, dependent_tr' ("@Sigma", "@*"))] *}
axioms
Subtype_def: "{x:A. P(x)} == {x. x:A & P(x)}"
--- a/src/CTT/CTT.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/CTT/CTT.thy Tue Feb 09 17:06:05 2010 +0100
@@ -63,8 +63,8 @@
"_PROD" :: "[idt,t,t]=>t" ("(3PROD _:_./ _)" 10)
"_SUM" :: "[idt,t,t]=>t" ("(3SUM _:_./ _)" 10)
translations
- "PROD x:A. B" == "Prod(A, %x. B)"
- "SUM x:A. B" == "Sum(A, %x. B)"
+ "PROD x:A. B" == "CONST Prod(A, %x. B)"
+ "SUM x:A. B" == "CONST Sum(A, %x. B)"
abbreviation
Arrow :: "[t,t]=>t" (infixr "-->" 30) where
--- a/src/Cube/Cube.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Cube/Cube.thy Tue Feb 09 17:06:05 2010 +0100
@@ -43,9 +43,9 @@
translations
("prop") "x:X" == ("prop") "|- x:X"
- "Lam x:A. B" == "Abs(A, %x. B)"
- "Pi x:A. B" => "Prod(A, %x. B)"
- "A -> B" => "Prod(A, %_. B)"
+ "Lam x:A. B" == "CONST Abs(A, %x. B)"
+ "Pi x:A. B" => "CONST Prod(A, %x. B)"
+ "A -> B" => "CONST Prod(A, %_. B)"
syntax (xsymbols)
Trueprop :: "[context', typing'] => prop" ("(_/ \<turnstile> _)")
@@ -54,7 +54,7 @@
Pi :: "[idt, term, term] => term" ("(3\<Pi> _:_./ _)" [0, 0] 10)
arrow :: "[term, term] => term" (infixr "\<rightarrow>" 10)
-print_translation {* [("Prod", dependent_tr' ("Pi", "arrow"))] *}
+print_translation {* [(@{const_syntax Prod}, dependent_tr' ("Pi", "arrow"))] *}
axioms
s_b: "*: []"
--- a/src/FOL/IFOL.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/FOL/IFOL.thy Tue Feb 09 17:06:05 2010 +0100
@@ -772,7 +772,7 @@
translations
"_Let(_binds(b, bs), e)" == "_Let(b, _Let(bs, e))"
- "let x = a in e" == "Let(a, %x. e)"
+ "let x = a in e" == "CONST Let(a, %x. e)"
lemma LetI:
--- a/src/FOL/simpdata.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/FOL/simpdata.ML Tue Feb 09 17:06:05 2010 +0100
@@ -27,9 +27,9 @@
(*Congruence rules for = or <-> (instead of ==)*)
fun mk_meta_cong rl =
- Drule.standard (mk_meta_eq (mk_meta_prems rl))
- handle THM _ =>
- error("Premises and conclusion of congruence rules must use =-equality or <->");
+ Drule.export_without_context (mk_meta_eq (mk_meta_prems rl))
+ handle THM _ =>
+ error("Premises and conclusion of congruence rules must use =-equality or <->");
val mksimps_pairs =
[("op -->", [@{thm mp}]), ("op &", [@{thm conjunct1}, @{thm conjunct2}]),
--- a/src/FOLP/simp.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/FOLP/simp.ML Tue Feb 09 17:06:05 2010 +0100
@@ -519,7 +519,7 @@
(* Compute Congruence rules for individual constants using the substition
rules *)
-val subst_thms = map Drule.standard subst_thms;
+val subst_thms = map Drule.export_without_context subst_thms;
fun exp_app(0,t) = t
--- a/src/HOL/Algebra/FiniteProduct.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Algebra/FiniteProduct.thy Tue Feb 09 17:06:05 2010 +0100
@@ -302,7 +302,7 @@
"_finprod" :: "index => idt => 'a set => 'b => 'b"
("(3\<Otimes>__\<in>_. _)" [1000, 0, 51, 10] 10)
translations
- "\<Otimes>\<index>i:A. b" == "finprod \<struct>\<index> (%i. b) A"
+ "\<Otimes>\<index>i:A. b" == "CONST finprod \<struct>\<index> (%i. b) A"
-- {* Beware of argument permutation! *}
lemma (in comm_monoid) finprod_empty [simp]:
--- a/src/HOL/Algebra/Ring.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Algebra/Ring.thy Tue Feb 09 17:06:05 2010 +0100
@@ -213,7 +213,7 @@
"_finsum" :: "index => idt => 'a set => 'b => 'b"
("(3\<Oplus>__\<in>_. _)" [1000, 0, 51, 10] 10)
translations
- "\<Oplus>\<index>i:A. b" == "finsum \<struct>\<index> (%i. b) A"
+ "\<Oplus>\<index>i:A. b" == "CONST finsum \<struct>\<index> (%i. b) A"
-- {* Beware of argument permutation! *}
context abelian_monoid begin
--- a/src/HOL/Algebra/poly/UnivPoly2.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Algebra/poly/UnivPoly2.thy Tue Feb 09 17:06:05 2010 +0100
@@ -139,7 +139,7 @@
end
-instance up :: ("{times, comm_monoid_add}") Ring_and_Field.dvd ..
+instance up :: ("{times, comm_monoid_add}") Rings.dvd ..
instantiation up :: ("{times, one, comm_monoid_add, uminus, minus}") inverse
begin
--- a/src/HOL/Algebras.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Algebras.thy Tue Feb 09 17:06:05 2010 +0100
@@ -10,15 +10,30 @@
subsection {* Generic algebraic structures *}
+text {*
+ These locales provide basic structures for interpretation into
+ bigger structures; extensions require careful thinking, otherwise
+ undesired effects may occur due to interpretation.
+*}
+
+ML {*
+structure Ac_Simps = Named_Thms(
+ val name = "ac_simps"
+ val description = "associativity and commutativity simplification rules"
+)
+*}
+
+setup Ac_Simps.setup
+
locale semigroup =
fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "*" 70)
- assumes assoc: "a * b * c = a * (b * c)"
+ assumes assoc [ac_simps]: "a * b * c = a * (b * c)"
locale abel_semigroup = semigroup +
- assumes commute: "a * b = b * a"
+ assumes commute [ac_simps]: "a * b = b * a"
begin
-lemma left_commute:
+lemma left_commute [ac_simps]:
"b * (a * c) = a * (b * c)"
proof -
have "(b * a) * c = (a * b) * c"
@@ -39,25 +54,6 @@
end
-locale lattice = inf!: abel_semigroup inf + sup!: abel_semigroup sup
- for inf (infixl "\<sqinter>" 70) and sup (infixl "\<squnion>" 70) +
- assumes sup_inf_absorb: "a \<squnion> (a \<sqinter> b) = a"
- and inf_sup_absorb: "a \<sqinter> (a \<squnion> b) = a"
-
-sublocale lattice < inf!: semilattice inf
-proof
- fix a
- have "a \<sqinter> (a \<squnion> (a \<sqinter> a)) = a" by (fact inf_sup_absorb)
- then show "a \<sqinter> a = a" by (simp add: sup_inf_absorb)
-qed
-
-sublocale lattice < sup!: semilattice sup
-proof
- fix a
- have "a \<squnion> (a \<sqinter> (a \<squnion> a)) = a" by (fact sup_inf_absorb)
- then show "a \<squnion> a = a" by (simp add: inf_sup_absorb)
-qed
-
subsection {* Generic algebraic operations *}
@@ -158,9 +154,4 @@
end
-syntax
- "_index1" :: index ("\<^sub>1")
-translations
- (index) "\<^sub>1" => (index) "\<^bsub>\<struct>\<^esub>"
-
end
\ No newline at end of file
--- a/src/HOL/Archimedean_Field.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Archimedean_Field.thy Tue Feb 09 17:06:05 2010 +0100
@@ -12,7 +12,7 @@
text {* Archimedean fields have no infinite elements. *}
-class archimedean_field = ordered_field + number_ring +
+class archimedean_field = linordered_field + number_ring +
assumes ex_le_of_int: "\<exists>z. x \<le> of_int z"
lemma ex_less_of_int:
--- a/src/HOL/Auth/Message.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Auth/Message.thy Tue Feb 09 17:06:05 2010 +0100
@@ -58,7 +58,7 @@
translations
"{|x, y, z|}" == "{|x, {|y, z|}|}"
- "{|x, y|}" == "MPair x y"
+ "{|x, y|}" == "CONST MPair x y"
constdefs
--- a/src/HOL/Code_Numeral.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Code_Numeral.thy Tue Feb 09 17:06:05 2010 +0100
@@ -144,7 +144,7 @@
subsection {* Basic arithmetic *}
-instantiation code_numeral :: "{minus, ordered_semidom, semiring_div, linorder}"
+instantiation code_numeral :: "{minus, linordered_semidom, semiring_div, linorder}"
begin
definition [simp, code del]:
--- a/src/HOL/Decision_Procs/Approximation.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/Approximation.thy Tue Feb 09 17:06:05 2010 +0100
@@ -1431,7 +1431,7 @@
moreover have "0 \<le> exp t / real (fact (get_even n)) * (real x) ^ (get_even n)"
by (auto intro!: mult_nonneg_nonneg divide_nonneg_pos simp add: get_even zero_le_even_power exp_gt_zero)
ultimately show ?thesis
- using get_odd exp_gt_zero by (auto intro!: pordered_cancel_semiring_class.mult_nonneg_nonneg)
+ using get_odd exp_gt_zero by (auto intro!: mult_nonneg_nonneg)
qed
finally have "real (lb_exp_horner prec (get_even n) 1 1 x) \<le> exp (real x)" .
} moreover
@@ -1451,7 +1451,7 @@
moreover have "exp t / real (fact (get_odd n)) * (real x) ^ (get_odd n) \<le> 0"
by (auto intro!: mult_nonneg_nonpos divide_nonpos_pos simp add: x_less_zero exp_gt_zero)
ultimately have "exp (real x) \<le> (\<Sum>j = 0..<get_odd n. 1 / real (fact j) * real x ^ j)"
- using get_odd exp_gt_zero by (auto intro!: pordered_cancel_semiring_class.mult_nonneg_nonneg)
+ using get_odd exp_gt_zero by (auto intro!: mult_nonneg_nonneg)
also have "\<dots> \<le> real (ub_exp_horner prec (get_odd n) 1 1 x)"
using bounds(2) by auto
finally have "exp (real x) \<le> real (ub_exp_horner prec (get_odd n) 1 1 x)" .
--- a/src/HOL/Decision_Procs/Decision_Procs.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/Decision_Procs.thy Tue Feb 09 17:06:05 2010 +0100
@@ -2,7 +2,8 @@
theory Decision_Procs
imports
- Commutative_Ring Cooper Ferrack MIR Approximation Dense_Linear_Order Parametric_Ferrante_Rackoff
+ Commutative_Ring Cooper Ferrack MIR Approximation Dense_Linear_Order
+ Parametric_Ferrante_Rackoff
Commutative_Ring_Complete
"ex/Commutative_Ring_Ex" "ex/Approximation_Ex" "ex/Dense_Linear_Order_Ex"
begin
--- a/src/HOL/Decision_Procs/Dense_Linear_Order.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/Dense_Linear_Order.thy Tue Feb 09 17:06:05 2010 +0100
@@ -208,7 +208,7 @@
section {* The classical QE after Langford for dense linear orders *}
-context dense_linear_order
+context dense_linorder
begin
lemma interval_empty_iff:
@@ -265,7 +265,7 @@
lemmas dlo_simps[noatp] = order_refl less_irrefl not_less not_le exists_neq
le_less neq_iff linear less_not_permute
-lemma axiom[noatp]: "dense_linear_order (op \<le>) (op <)" by (rule dense_linear_order_axioms)
+lemma axiom[noatp]: "dense_linorder (op \<le>) (op <)" by (rule dense_linorder_axioms)
lemma atoms[noatp]:
shows "TERM (less :: 'a \<Rightarrow> _)"
and "TERM (less_eq :: 'a \<Rightarrow> _)"
@@ -409,17 +409,17 @@
end
-locale constr_dense_linear_order = linorder_no_lb + linorder_no_ub +
+locale constr_dense_linorder = linorder_no_lb + linorder_no_ub +
fixes between
assumes between_less: "less x y \<Longrightarrow> less x (between x y) \<and> less (between x y) y"
and between_same: "between x x = x"
-sublocale constr_dense_linear_order < dense_linear_order
+sublocale constr_dense_linorder < dense_linorder
apply unfold_locales
using gt_ex lt_ex between_less
by (auto, rule_tac x="between x y" in exI, simp)
-context constr_dense_linear_order
+context constr_dense_linorder
begin
lemma rinf_U[noatp]:
@@ -500,8 +500,8 @@
lemmas npi_thms[noatp] = npi_conj npi_disj npi_eq npi_neq npi_lt npi_le npi_gt npi_ge npi_P
lemmas lin_dense_thms[noatp] = lin_dense_conj lin_dense_disj lin_dense_eq lin_dense_neq lin_dense_lt lin_dense_le lin_dense_gt lin_dense_ge lin_dense_P
-lemma ferrack_axiom[noatp]: "constr_dense_linear_order less_eq less between"
- by (rule constr_dense_linear_order_axioms)
+lemma ferrack_axiom[noatp]: "constr_dense_linorder less_eq less between"
+ by (rule constr_dense_linorder_axioms)
lemma atoms[noatp]:
shows "TERM (less :: 'a \<Rightarrow> _)"
and "TERM (less_eq :: 'a \<Rightarrow> _)"
@@ -551,7 +551,7 @@
subsection {* Ferrante and Rackoff algorithm over ordered fields *}
-lemma neg_prod_lt:"(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x < 0) == (x > 0))"
+lemma neg_prod_lt:"(c\<Colon>'a\<Colon>linordered_field) < 0 \<Longrightarrow> ((c*x < 0) == (x > 0))"
proof-
assume H: "c < 0"
have "c*x < 0 = (0/c < x)" by (simp only: neg_divide_less_eq[OF H] algebra_simps)
@@ -559,7 +559,7 @@
finally show "(c*x < 0) == (x > 0)" by simp
qed
-lemma pos_prod_lt:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x < 0) == (x < 0))"
+lemma pos_prod_lt:"(c\<Colon>'a\<Colon>linordered_field) > 0 \<Longrightarrow> ((c*x < 0) == (x < 0))"
proof-
assume H: "c > 0"
hence "c*x < 0 = (0/c > x)" by (simp only: pos_less_divide_eq[OF H] algebra_simps)
@@ -567,7 +567,7 @@
finally show "(c*x < 0) == (x < 0)" by simp
qed
-lemma neg_prod_sum_lt: "(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x + t< 0) == (x > (- 1/c)*t))"
+lemma neg_prod_sum_lt: "(c\<Colon>'a\<Colon>linordered_field) < 0 \<Longrightarrow> ((c*x + t< 0) == (x > (- 1/c)*t))"
proof-
assume H: "c < 0"
have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp)
@@ -576,7 +576,7 @@
finally show "(c*x + t < 0) == (x > (- 1/c)*t)" by simp
qed
-lemma pos_prod_sum_lt:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x + t < 0) == (x < (- 1/c)*t))"
+lemma pos_prod_sum_lt:"(c\<Colon>'a\<Colon>linordered_field) > 0 \<Longrightarrow> ((c*x + t < 0) == (x < (- 1/c)*t))"
proof-
assume H: "c > 0"
have "c*x + t< 0 = (c*x < -t)" by (subst less_iff_diff_less_0 [of "c*x" "-t"], simp)
@@ -585,10 +585,10 @@
finally show "(c*x + t < 0) == (x < (- 1/c)*t)" by simp
qed
-lemma sum_lt:"((x::'a::pordered_ab_group_add) + t < 0) == (x < - t)"
+lemma sum_lt:"((x::'a::ordered_ab_group_add) + t < 0) == (x < - t)"
using less_diff_eq[where a= x and b=t and c=0] by simp
-lemma neg_prod_le:"(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x <= 0) == (x >= 0))"
+lemma neg_prod_le:"(c\<Colon>'a\<Colon>linordered_field) < 0 \<Longrightarrow> ((c*x <= 0) == (x >= 0))"
proof-
assume H: "c < 0"
have "c*x <= 0 = (0/c <= x)" by (simp only: neg_divide_le_eq[OF H] algebra_simps)
@@ -596,7 +596,7 @@
finally show "(c*x <= 0) == (x >= 0)" by simp
qed
-lemma pos_prod_le:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x <= 0) == (x <= 0))"
+lemma pos_prod_le:"(c\<Colon>'a\<Colon>linordered_field) > 0 \<Longrightarrow> ((c*x <= 0) == (x <= 0))"
proof-
assume H: "c > 0"
hence "c*x <= 0 = (0/c >= x)" by (simp only: pos_le_divide_eq[OF H] algebra_simps)
@@ -604,7 +604,7 @@
finally show "(c*x <= 0) == (x <= 0)" by simp
qed
-lemma neg_prod_sum_le: "(c\<Colon>'a\<Colon>ordered_field) < 0 \<Longrightarrow> ((c*x + t <= 0) == (x >= (- 1/c)*t))"
+lemma neg_prod_sum_le: "(c\<Colon>'a\<Colon>linordered_field) < 0 \<Longrightarrow> ((c*x + t <= 0) == (x >= (- 1/c)*t))"
proof-
assume H: "c < 0"
have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp)
@@ -613,7 +613,7 @@
finally show "(c*x + t <= 0) == (x >= (- 1/c)*t)" by simp
qed
-lemma pos_prod_sum_le:"(c\<Colon>'a\<Colon>ordered_field) > 0 \<Longrightarrow> ((c*x + t <= 0) == (x <= (- 1/c)*t))"
+lemma pos_prod_sum_le:"(c\<Colon>'a\<Colon>linordered_field) > 0 \<Longrightarrow> ((c*x + t <= 0) == (x <= (- 1/c)*t))"
proof-
assume H: "c > 0"
have "c*x + t <= 0 = (c*x <= -t)" by (subst le_iff_diff_le_0 [of "c*x" "-t"], simp)
@@ -622,24 +622,24 @@
finally show "(c*x + t <= 0) == (x <= (- 1/c)*t)" by simp
qed
-lemma sum_le:"((x::'a::pordered_ab_group_add) + t <= 0) == (x <= - t)"
+lemma sum_le:"((x::'a::ordered_ab_group_add) + t <= 0) == (x <= - t)"
using le_diff_eq[where a= x and b=t and c=0] by simp
-lemma nz_prod_eq:"(c\<Colon>'a\<Colon>ordered_field) \<noteq> 0 \<Longrightarrow> ((c*x = 0) == (x = 0))" by simp
-lemma nz_prod_sum_eq: "(c\<Colon>'a\<Colon>ordered_field) \<noteq> 0 \<Longrightarrow> ((c*x + t = 0) == (x = (- 1/c)*t))"
+lemma nz_prod_eq:"(c\<Colon>'a\<Colon>linordered_field) \<noteq> 0 \<Longrightarrow> ((c*x = 0) == (x = 0))" by simp
+lemma nz_prod_sum_eq: "(c\<Colon>'a\<Colon>linordered_field) \<noteq> 0 \<Longrightarrow> ((c*x + t = 0) == (x = (- 1/c)*t))"
proof-
assume H: "c \<noteq> 0"
have "c*x + t = 0 = (c*x = -t)" by (subst eq_iff_diff_eq_0 [of "c*x" "-t"], simp)
also have "\<dots> = (x = -t/c)" by (simp only: nonzero_eq_divide_eq[OF H] algebra_simps)
finally show "(c*x + t = 0) == (x = (- 1/c)*t)" by simp
qed
-lemma sum_eq:"((x::'a::pordered_ab_group_add) + t = 0) == (x = - t)"
+lemma sum_eq:"((x::'a::ordered_ab_group_add) + t = 0) == (x = - t)"
using eq_diff_eq[where a= x and b=t and c=0] by simp
-interpretation class_ordered_field_dense_linear_order: constr_dense_linear_order
+interpretation class_dense_linordered_field: constr_dense_linorder
"op <=" "op <"
- "\<lambda> x y. 1/2 * ((x::'a::{ordered_field,number_ring}) + y)"
+ "\<lambda> x y. 1/2 * ((x::'a::{linordered_field,number_ring}) + y)"
proof (unfold_locales, dlo, dlo, auto)
fix x y::'a assume lt: "x < y"
from less_half_sum[OF lt] show "x < (x + y) /2" by simp
@@ -871,7 +871,7 @@
addsplits [@{thm "abs_split"},@{thm "split_max"}, @{thm "split_min"}]
in
-Ferrante_Rackoff_Data.funs @{thm "class_ordered_field_dense_linear_order.ferrack_axiom"}
+Ferrante_Rackoff_Data.funs @{thm "class_dense_linordered_field.ferrack_axiom"}
{isolate_conv = field_isolate_conv, whatis = classfield_whatis, simpset = class_field_ss}
end
*}
--- a/src/HOL/Decision_Procs/MIR.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/MIR.thy Tue Feb 09 17:06:05 2010 +0100
@@ -54,7 +54,7 @@
by clarsimp
-lemma myl: "\<forall> (a::'a::{pordered_ab_group_add}) (b::'a). (a \<le> b) = (0 \<le> b - a)"
+lemma myl: "\<forall> (a::'a::{ordered_ab_group_add}) (b::'a). (a \<le> b) = (0 \<le> b - a)"
proof(clarify)
fix x y ::"'a"
have "(x \<le> y) = (x - y \<le> 0)" by (simp only: le_iff_diff_le_0[where a="x" and b="y"])
@@ -63,7 +63,7 @@
finally show "(x \<le> y) = (0 \<le> y - x)" .
qed
-lemma myless: "\<forall> (a::'a::{pordered_ab_group_add}) (b::'a). (a < b) = (0 < b - a)"
+lemma myless: "\<forall> (a::'a::{ordered_ab_group_add}) (b::'a). (a < b) = (0 < b - a)"
proof(clarify)
fix x y ::"'a"
have "(x < y) = (x - y < 0)" by (simp only: less_iff_diff_less_0[where a="x" and b="y"])
@@ -72,7 +72,7 @@
finally show "(x < y) = (0 < y - x)" .
qed
-lemma myeq: "\<forall> (a::'a::{pordered_ab_group_add}) (b::'a). (a = b) = (0 = b - a)"
+lemma myeq: "\<forall> (a::'a::{ordered_ab_group_add}) (b::'a). (a = b) = (0 = b - a)"
by auto
(* Maybe should be added to the library \<dots> *)
--- a/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/Parametric_Ferrante_Rackoff.thy Tue Feb 09 17:06:05 2010 +0100
@@ -7,9 +7,9 @@
theory Parametric_Ferrante_Rackoff
imports Reflected_Multivariate_Polynomial
"~~/src/HOL/Decision_Procs/Dense_Linear_Order"
+ Efficient_Nat
begin
-
subsection {* Terms *}
datatype tm = CP poly | Bound nat | Add tm tm | Mul poly tm
@@ -447,7 +447,7 @@
by (induct p rule: fmsize.induct) simp_all
(* Semantics of formulae (fm) *)
-consts Ifm ::"'a::{division_by_zero,ordered_field} list \<Rightarrow> 'a list \<Rightarrow> fm \<Rightarrow> bool"
+consts Ifm ::"'a::{division_by_zero,linordered_field} list \<Rightarrow> 'a list \<Rightarrow> fm \<Rightarrow> bool"
primrec
"Ifm vs bs T = True"
"Ifm vs bs F = False"
@@ -1833,16 +1833,16 @@
ultimately show ?case by blast
qed (auto simp add: nth_pos2 tmbound0_I[where vs=vs and bs="bs" and b="y" and b'="x"] bound0_I[where vs=vs and bs="bs" and b="y" and b'="x"])
-lemma one_plus_one_pos[simp]: "(1::'a::{ordered_field}) + 1 > 0"
+lemma one_plus_one_pos[simp]: "(1::'a::{linordered_field}) + 1 > 0"
proof-
have op: "(1::'a) > 0" by simp
from add_pos_pos[OF op op] show ?thesis .
qed
-lemma one_plus_one_nonzero[simp]: "(1::'a::{ordered_field}) + 1 \<noteq> 0"
+lemma one_plus_one_nonzero[simp]: "(1::'a::{linordered_field}) + 1 \<noteq> 0"
using one_plus_one_pos[where ?'a = 'a] by (simp add: less_le)
-lemma half_sum_eq: "(u + u) / (1+1) = (u::'a::{ordered_field})"
+lemma half_sum_eq: "(u + u) / (1+1) = (u::'a::{linordered_field})"
proof-
have "(u + u) = (1 + 1) * u" by (simp add: ring_simps)
hence "(u + u) / (1+1) = (1 + 1)*u / (1 + 1)" by simp
@@ -2187,10 +2187,7 @@
{assume dc: "?c*?d < 0"
from dc one_plus_one_pos[where ?'a='a] have dc': "(1 + 1)*?c *?d < 0"
- apply (simp add: mult_less_0_iff field_simps)
- apply (rule add_neg_neg)
- apply (simp_all add: mult_less_0_iff)
- done
+ by (simp add: mult_less_0_iff field_simps)
hence c:"?c \<noteq> 0" and d: "?d\<noteq> 0" by auto
from add_frac_eq[OF c d, of "- ?t" "- ?s"]
have th: "(- ?t / ?c + - ?s / ?d)/(1 + 1) = - (?d * ?t + ?c* ?s )/ ((1 + 1)*?c*?d)"
@@ -2597,13 +2594,6 @@
by (simp add: Let_def stupid)
-
-(*
-lemmas [code func] = polysub_def
-lemmas [code func del] = Zero_nat_def
-code_gen "frpar" in SML to FRParTest
-*)
-
section{* Second implemenation: Case splits not local *}
lemma fr_eq2: assumes lp: "islin p"
@@ -2948,14 +2938,7 @@
show ?thesis unfolding frpar2_def by (auto simp add: prep)
qed
-code_module FRPar
- contains
- frpar = "frpar"
- frpar2 = "frpar2"
- test = "%x . frpar (E(Lt (Mul 1\<^sub>p (Bound 0))))"
-
-ML{*
-
+ML {*
structure ReflectedFRPar =
struct
@@ -2986,92 +2969,93 @@
fun num_of_term m t =
case t of
- Const(@{const_name Algebras.uminus},_)$t => FRPar.Neg (num_of_term m t)
- | Const(@{const_name Algebras.plus},_)$a$b => FRPar.Add (num_of_term m a, num_of_term m b)
- | Const(@{const_name Algebras.minus},_)$a$b => FRPar.Sub (num_of_term m a, num_of_term m b)
- | Const(@{const_name Algebras.times},_)$a$b => FRPar.Mul (num_of_term m a, num_of_term m b)
- | Const(@{const_name Power.power},_)$a$n => FRPar.Pw (num_of_term m a, dest_nat n)
- | Const(@{const_name Algebras.divide},_)$a$b => FRPar.C (HOLogic.dest_number a |> snd, HOLogic.dest_number b |> snd)
- | _ => (FRPar.C (HOLogic.dest_number t |> snd,1)
- handle TERM _ => FRPar.Bound (AList.lookup (op aconv) m t |> the));
+ Const(@{const_name Algebras.uminus},_)$t => @{code poly.Neg} (num_of_term m t)
+ | Const(@{const_name Algebras.plus},_)$a$b => @{code poly.Add} (num_of_term m a, num_of_term m b)
+ | Const(@{const_name Algebras.minus},_)$a$b => @{code poly.Sub} (num_of_term m a, num_of_term m b)
+ | Const(@{const_name Algebras.times},_)$a$b => @{code poly.Mul} (num_of_term m a, num_of_term m b)
+ | Const(@{const_name Power.power},_)$a$n => @{code poly.Pw} (num_of_term m a, dest_nat n)
+ | Const(@{const_name Algebras.divide},_)$a$b => @{code poly.C} (HOLogic.dest_number a |> snd, HOLogic.dest_number b |> snd)
+ | _ => (@{code poly.C} (HOLogic.dest_number t |> snd,1)
+ handle TERM _ => @{code poly.Bound} (AList.lookup (op aconv) m t |> the));
fun tm_of_term m m' t =
case t of
- Const(@{const_name Algebras.uminus},_)$t => FRPar.tm_Neg (tm_of_term m m' t)
- | Const(@{const_name Algebras.plus},_)$a$b => FRPar.tm_Add (tm_of_term m m' a, tm_of_term m m' b)
- | Const(@{const_name Algebras.minus},_)$a$b => FRPar.tm_Sub (tm_of_term m m' a, tm_of_term m m' b)
- | Const(@{const_name Algebras.times},_)$a$b => FRPar.tm_Mul (num_of_term m' a, tm_of_term m m' b)
- | _ => (FRPar.CP (num_of_term m' t)
- handle TERM _ => FRPar.tm_Bound (AList.lookup (op aconv) m t |> the)
- | Option => FRPar.tm_Bound (AList.lookup (op aconv) m t |> the));
+ Const(@{const_name Algebras.uminus},_)$t => @{code Neg} (tm_of_term m m' t)
+ | Const(@{const_name Algebras.plus},_)$a$b => @{code Add} (tm_of_term m m' a, tm_of_term m m' b)
+ | Const(@{const_name Algebras.minus},_)$a$b => @{code Sub} (tm_of_term m m' a, tm_of_term m m' b)
+ | Const(@{const_name Algebras.times},_)$a$b => @{code Mul} (num_of_term m' a, tm_of_term m m' b)
+ | _ => (@{code CP} (num_of_term m' t)
+ handle TERM _ => @{code Bound} (AList.lookup (op aconv) m t |> the)
+ | Option => @{code Bound} (AList.lookup (op aconv) m t |> the));
fun term_of_num T m t =
case t of
- FRPar.C (a,b) => (if b = 1 then num T a else if b=0 then (rz T)
+ @{code poly.C} (a,b) => (if b = 1 then num T a else if b=0 then (rz T)
else (divt T) $ num T a $ num T b)
-| FRPar.Bound i => AList.lookup (op = : int*int -> bool) m i |> the
-| FRPar.Add(a,b) => (plust T)$(term_of_num T m a)$(term_of_num T m b)
-| FRPar.Mul(a,b) => (timest T)$(term_of_num T m a)$(term_of_num T m b)
-| FRPar.Sub(a,b) => (minust T)$(term_of_num T m a)$(term_of_num T m b)
-| FRPar.Neg a => (uminust T)$(term_of_num T m a)
-| FRPar.Pw(a,n) => (powt T)$(term_of_num T m t)$(HOLogic.mk_number HOLogic.natT n)
-| FRPar.CN(c,n,p) => term_of_num T m (FRPar.Add(c,FRPar.Mul(FRPar.Bound n, p)))
+| @{code poly.Bound} i => AList.lookup (op = : int*int -> bool) m i |> the
+| @{code poly.Add} (a,b) => (plust T)$(term_of_num T m a)$(term_of_num T m b)
+| @{code poly.Mul} (a,b) => (timest T)$(term_of_num T m a)$(term_of_num T m b)
+| @{code poly.Sub} (a,b) => (minust T)$(term_of_num T m a)$(term_of_num T m b)
+| @{code poly.Neg} a => (uminust T)$(term_of_num T m a)
+| @{code poly.Pw} (a,n) => (powt T)$(term_of_num T m t)$(HOLogic.mk_number HOLogic.natT n)
+| @{code poly.CN} (c,n,p) => term_of_num T m (@{code poly.Add} (c, @{code poly.Mul} (@{code poly.Bound} n, p)))
| _ => error "term_of_num: Unknown term";
fun term_of_tm T m m' t =
case t of
- FRPar.CP p => term_of_num T m' p
-| FRPar.tm_Bound i => AList.lookup (op = : int*int -> bool) m i |> the
-| FRPar.tm_Add(a,b) => (plust T)$(term_of_tm T m m' a)$(term_of_tm T m m' b)
-| FRPar.tm_Mul(a,b) => (timest T)$(term_of_num T m' a)$(term_of_tm T m m' b)
-| FRPar.tm_Sub(a,b) => (minust T)$(term_of_tm T m m' a)$(term_of_tm T m m' b)
-| FRPar.tm_Neg a => (uminust T)$(term_of_tm T m m' a)
-| FRPar.CNP(n,c,p) => term_of_tm T m m' (FRPar.tm_Add(FRPar.tm_Mul(c, FRPar.tm_Bound n), p))
+ @{code CP} p => term_of_num T m' p
+| @{code Bound} i => AList.lookup (op = : int*int -> bool) m i |> the
+| @{code Add} (a,b) => (plust T)$(term_of_tm T m m' a)$(term_of_tm T m m' b)
+| @{code Mul} (a,b) => (timest T)$(term_of_num T m' a)$(term_of_tm T m m' b)
+| @{code Sub} (a,b) => (minust T)$(term_of_tm T m m' a)$(term_of_tm T m m' b)
+| @{code Neg} a => (uminust T)$(term_of_tm T m m' a)
+| @{code CNP} (n,c,p) => term_of_tm T m m' (@{code Add}
+ (@{code Mul} (c, @{code Bound} n), p))
| _ => error "term_of_tm: Unknown term";
fun fm_of_term m m' fm =
case fm of
- Const("True",_) => FRPar.T
- | Const("False",_) => FRPar.F
- | Const("Not",_)$p => FRPar.NOT (fm_of_term m m' p)
- | Const("op &",_)$p$q => FRPar.And(fm_of_term m m' p, fm_of_term m m' q)
- | Const("op |",_)$p$q => FRPar.Or(fm_of_term m m' p, fm_of_term m m' q)
- | Const("op -->",_)$p$q => FRPar.Imp(fm_of_term m m' p, fm_of_term m m' q)
+ Const("True",_) => @{code T}
+ | Const("False",_) => @{code F}
+ | Const("Not",_)$p => @{code NOT} (fm_of_term m m' p)
+ | Const("op &",_)$p$q => @{code And} (fm_of_term m m' p, fm_of_term m m' q)
+ | Const("op |",_)$p$q => @{code Or} (fm_of_term m m' p, fm_of_term m m' q)
+ | Const("op -->",_)$p$q => @{code Imp} (fm_of_term m m' p, fm_of_term m m' q)
| Const("op =",ty)$p$q =>
- if domain_type ty = bT then FRPar.Iff(fm_of_term m m' p, fm_of_term m m' q)
- else FRPar.Eq (FRPar.tm_Sub(tm_of_term m m' p, tm_of_term m m' q))
+ if domain_type ty = bT then @{code Iff} (fm_of_term m m' p, fm_of_term m m' q)
+ else @{code Eq} (@{code Sub} (tm_of_term m m' p, tm_of_term m m' q))
| Const(@{const_name Algebras.less},_)$p$q =>
- FRPar.Lt (FRPar.tm_Sub(tm_of_term m m' p, tm_of_term m m' q))
+ @{code Lt} (@{code Sub} (tm_of_term m m' p, tm_of_term m m' q))
| Const(@{const_name Algebras.less_eq},_)$p$q =>
- FRPar.Le (FRPar.tm_Sub(tm_of_term m m' p, tm_of_term m m' q))
+ @{code Le} (@{code Sub} (tm_of_term m m' p, tm_of_term m m' q))
| Const("Ex",_)$Abs(xn,xT,p) =>
let val (xn', p') = variant_abs (xn,xT,p)
val x = Free(xn',xT)
fun incr i = i + 1
val m0 = (x,0):: (map (apsnd incr) m)
- in FRPar.E (fm_of_term m0 m' p') end
+ in @{code E} (fm_of_term m0 m' p') end
| Const("All",_)$Abs(xn,xT,p) =>
let val (xn', p') = variant_abs (xn,xT,p)
val x = Free(xn',xT)
fun incr i = i + 1
val m0 = (x,0):: (map (apsnd incr) m)
- in FRPar.A (fm_of_term m0 m' p') end
+ in @{code A} (fm_of_term m0 m' p') end
| _ => error "fm_of_term";
fun term_of_fm T m m' t =
case t of
- FRPar.T => Const("True",bT)
- | FRPar.F => Const("False",bT)
- | FRPar.NOT p => nott $ (term_of_fm T m m' p)
- | FRPar.And (p,q) => conjt $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
- | FRPar.Or (p,q) => disjt $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
- | FRPar.Imp (p,q) => impt $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
- | FRPar.Iff (p,q) => ifft $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
- | FRPar.Lt p => (llt T) $ (term_of_tm T m m' p) $ (rz T)
- | FRPar.Le p => (lle T) $ (term_of_tm T m m' p) $ (rz T)
- | FRPar.Eq p => (eqt T) $ (term_of_tm T m m' p) $ (rz T)
- | FRPar.NEq p => nott $ ((eqt T) $ (term_of_tm T m m' p) $ (rz T))
+ @{code T} => Const("True",bT)
+ | @{code F} => Const("False",bT)
+ | @{code NOT} p => nott $ (term_of_fm T m m' p)
+ | @{code And} (p,q) => conjt $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
+ | @{code Or} (p,q) => disjt $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
+ | @{code Imp} (p,q) => impt $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
+ | @{code Iff} (p,q) => ifft $ (term_of_fm T m m' p) $ (term_of_fm T m m' q)
+ | @{code Lt} p => (llt T) $ (term_of_tm T m m' p) $ (rz T)
+ | @{code Le} p => (lle T) $ (term_of_tm T m m' p) $ (rz T)
+ | @{code Eq} p => (eqt T) $ (term_of_tm T m m' p) $ (rz T)
+ | @{code NEq} p => nott $ ((eqt T) $ (term_of_tm T m m' p) $ (rz T))
| _ => error "term_of_fm: quantifiers!!!!???";
fun frpar_oracle (T,m, m', fm) =
@@ -3080,7 +3064,7 @@
val im = 0 upto (length m - 1)
val im' = 0 upto (length m' - 1)
in HOLogic.mk_Trueprop (HOLogic.mk_eq(t, term_of_fm T (im ~~ m) (im' ~~ m')
- (FRPar.frpar (fm_of_term (m ~~ im) (m' ~~ im') t))))
+ (@{code frpar} (fm_of_term (m ~~ im) (m' ~~ im') t))))
end;
fun frpar_oracle2 (T,m, m', fm) =
@@ -3089,7 +3073,7 @@
val im = 0 upto (length m - 1)
val im' = 0 upto (length m' - 1)
in HOLogic.mk_Trueprop (HOLogic.mk_eq(t, term_of_fm T (im ~~ m) (im' ~~ m')
- (FRPar.frpar2 (fm_of_term (m ~~ im) (m' ~~ im') t))))
+ (@{code frpar2} (fm_of_term (m ~~ im) (m' ~~ im') t))))
end;
end;
@@ -3172,54 +3156,54 @@
*} "Parametric QE for linear Arithmetic over fields, Version 2"
-lemma "\<exists>(x::'a::{division_by_zero,ordered_field,number_ring}). y \<noteq> -1 \<longrightarrow> (y + 1)*x < 0"
- apply (frpar type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "y::'a::{division_by_zero,ordered_field,number_ring}")
+lemma "\<exists>(x::'a::{division_by_zero,linordered_field,number_ring}). y \<noteq> -1 \<longrightarrow> (y + 1)*x < 0"
+ apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "y::'a::{division_by_zero,linordered_field,number_ring}")
apply (simp add: ring_simps)
apply (rule spec[where x=y])
- apply (frpar type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "z::'a::{division_by_zero,ordered_field,number_ring}")
+ apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "z::'a::{division_by_zero,linordered_field,number_ring}")
by simp
text{* Collins/Jones Problem *}
(*
-lemma "\<exists>(r::'a::{division_by_zero,ordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0"
+lemma "\<exists>(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0"
proof-
- have "(\<exists>(r::'a::{division_by_zero,ordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \<longleftrightarrow> (\<exists>(r::'a::{division_by_zero,ordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \<and> 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \<longleftrightarrow> ?rhs")
+ have "(\<exists>(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \<longleftrightarrow> (\<exists>(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \<and> 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \<longleftrightarrow> ?rhs")
by (simp add: ring_simps)
have "?rhs"
- apply (frpar type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "a::'a::{division_by_zero,ordered_field,number_ring}" "b::'a::{division_by_zero,ordered_field,number_ring}")
+ apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "a::'a::{division_by_zero,linordered_field,number_ring}" "b::'a::{division_by_zero,linordered_field,number_ring}")
apply (simp add: ring_simps)
oops
*)
(*
-lemma "ALL (x::'a::{division_by_zero,ordered_field,number_ring}) y. (1 - t)*x \<le> (1+t)*y \<and> (1 - t)*y \<le> (1+t)*x --> 0 \<le> y"
-apply (frpar type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "t::'a::{division_by_zero,ordered_field,number_ring}")
+lemma "ALL (x::'a::{division_by_zero,linordered_field,number_ring}) y. (1 - t)*x \<le> (1+t)*y \<and> (1 - t)*y \<le> (1+t)*x --> 0 \<le> y"
+apply (frpar type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "t::'a::{division_by_zero,linordered_field,number_ring}")
oops
*)
-lemma "\<exists>(x::'a::{division_by_zero,ordered_field,number_ring}). y \<noteq> -1 \<longrightarrow> (y + 1)*x < 0"
- apply (frpar2 type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "y::'a::{division_by_zero,ordered_field,number_ring}")
+lemma "\<exists>(x::'a::{division_by_zero,linordered_field,number_ring}). y \<noteq> -1 \<longrightarrow> (y + 1)*x < 0"
+ apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "y::'a::{division_by_zero,linordered_field,number_ring}")
apply (simp add: ring_simps)
apply (rule spec[where x=y])
- apply (frpar2 type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "z::'a::{division_by_zero,ordered_field,number_ring}")
+ apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "z::'a::{division_by_zero,linordered_field,number_ring}")
by simp
text{* Collins/Jones Problem *}
(*
-lemma "\<exists>(r::'a::{division_by_zero,ordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0"
+lemma "\<exists>(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0"
proof-
- have "(\<exists>(r::'a::{division_by_zero,ordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \<longleftrightarrow> (\<exists>(r::'a::{division_by_zero,ordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \<and> 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \<longleftrightarrow> ?rhs")
+ have "(\<exists>(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < (2 - 3*r) *(a^2 + b^2) + (2*a)*r \<and> (2 - 3*r) *(a^2 + b^2) + 4*a*r - 2*a - r < 0) \<longleftrightarrow> (\<exists>(r::'a::{division_by_zero,linordered_field,number_ring}). 0 < r \<and> r < 1 \<and> 0 < 2 *(a^2 + b^2) - (3*(a^2 + b^2)) * r + (2*a)*r \<and> 2*(a^2 + b^2) - (3*(a^2 + b^2) - 4*a + 1)*r - 2*a < 0)" (is "?lhs \<longleftrightarrow> ?rhs")
by (simp add: ring_simps)
have "?rhs"
- apply (frpar2 type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "a::'a::{division_by_zero,ordered_field,number_ring}" "b::'a::{division_by_zero,ordered_field,number_ring}")
+ apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "a::'a::{division_by_zero,linordered_field,number_ring}" "b::'a::{division_by_zero,linordered_field,number_ring}")
apply simp
oops
*)
(*
-lemma "ALL (x::'a::{division_by_zero,ordered_field,number_ring}) y. (1 - t)*x \<le> (1+t)*y \<and> (1 - t)*y \<le> (1+t)*x --> 0 \<le> y"
-apply (frpar2 type: "'a::{division_by_zero,ordered_field,number_ring}" pars: "t::'a::{division_by_zero,ordered_field,number_ring}")
+lemma "ALL (x::'a::{division_by_zero,linordered_field,number_ring}) y. (1 - t)*x \<le> (1+t)*y \<and> (1 - t)*y \<le> (1+t)*x --> 0 \<le> y"
+apply (frpar2 type: "'a::{division_by_zero,linordered_field,number_ring}" pars: "t::'a::{division_by_zero,linordered_field,number_ring}")
apply (simp add: field_simps linorder_neq_iff[symmetric])
apply ferrack
oops
--- a/src/HOL/Decision_Procs/Polynomial_List.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/Polynomial_List.thy Tue Feb 09 17:06:05 2010 +0100
@@ -772,7 +772,7 @@
text{*bound for polynomial.*}
-lemma poly_mono: "abs(x) \<le> k ==> abs(poly p (x::'a::{ordered_idom})) \<le> poly (map abs p) k"
+lemma poly_mono: "abs(x) \<le> k ==> abs(poly p (x::'a::{linordered_idom})) \<le> poly (map abs p) k"
apply (induct "p", auto)
apply (rule_tac y = "abs a + abs (x * poly p x)" in order_trans)
apply (rule abs_triangle_ineq)
--- a/src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/Reflected_Multivariate_Polynomial.thy Tue Feb 09 17:06:05 2010 +0100
@@ -2,25 +2,21 @@
Author: Amine Chaieb
*)
-header {* Implementation and verification of mutivariate polynomials Library *}
-
+header {* Implementation and verification of multivariate polynomials *}
theory Reflected_Multivariate_Polynomial
-imports Parity Abstract_Rat Efficient_Nat List Polynomial_List
+imports Complex_Main Abstract_Rat Polynomial_List
begin
- (* Impelementation *)
+ (* Implementation *)
subsection{* Datatype of polynomial expressions *}
datatype poly = C Num| Bound nat| Add poly poly|Sub poly poly
| Mul poly poly| Neg poly| Pw poly nat| CN poly nat poly
-ML{* @{term "Add"}*}
-syntax "_poly0" :: "poly" ("0\<^sub>p")
-translations "0\<^sub>p" \<rightleftharpoons> "C (0\<^sub>N)"
-syntax "_poly" :: "int \<Rightarrow> poly" ("_\<^sub>p")
-translations "i\<^sub>p" \<rightleftharpoons> "C (i\<^sub>N)"
+abbreviation poly_0 :: "poly" ("0\<^sub>p") where "0\<^sub>p \<equiv> C (0\<^sub>N)"
+abbreviation poly_p :: "int \<Rightarrow> poly" ("_\<^sub>p") where "i\<^sub>p \<equiv> C (i\<^sub>N)"
subsection{* Boundedness, substitution and all that *}
consts polysize:: "poly \<Rightarrow> nat"
@@ -118,14 +114,14 @@
polysub :: "poly\<times>poly \<Rightarrow> poly"
polymul :: "poly\<times>poly \<Rightarrow> poly"
polypow :: "nat \<Rightarrow> poly \<Rightarrow> poly"
-syntax "_polyadd" :: "poly \<Rightarrow> poly \<Rightarrow> poly" (infixl "+\<^sub>p" 60)
-translations "a +\<^sub>p b" \<rightleftharpoons> "polyadd (a,b)"
-syntax "_polymul" :: "poly \<Rightarrow> poly \<Rightarrow> poly" (infixl "*\<^sub>p" 60)
-translations "a *\<^sub>p b" \<rightleftharpoons> "polymul (a,b)"
-syntax "_polysub" :: "poly \<Rightarrow> poly \<Rightarrow> poly" (infixl "-\<^sub>p" 60)
-translations "a -\<^sub>p b" \<rightleftharpoons> "polysub (a,b)"
-syntax "_polypow" :: "nat \<Rightarrow> poly \<Rightarrow> poly" (infixl "^\<^sub>p" 60)
-translations "a ^\<^sub>p k" \<rightleftharpoons> "polypow k a"
+abbreviation poly_add :: "poly \<Rightarrow> poly \<Rightarrow> poly" (infixl "+\<^sub>p" 60)
+ where "a +\<^sub>p b \<equiv> polyadd (a,b)"
+abbreviation poly_mul :: "poly \<Rightarrow> poly \<Rightarrow> poly" (infixl "*\<^sub>p" 60)
+ where "a *\<^sub>p b \<equiv> polymul (a,b)"
+abbreviation poly_sub :: "poly \<Rightarrow> poly \<Rightarrow> poly" (infixl "-\<^sub>p" 60)
+ where "a -\<^sub>p b \<equiv> polysub (a,b)"
+abbreviation poly_pow :: "poly \<Rightarrow> nat \<Rightarrow> poly" (infixl "^\<^sub>p" 60)
+ where "a ^\<^sub>p k \<equiv> polypow k a"
recdef polyadd "measure (\<lambda> (a,b). polysize a + polysize b)"
"polyadd (C c, C c') = C (c+\<^sub>Nc')"
@@ -244,8 +240,9 @@
"Ipoly bs (Mul a b) = Ipoly bs a * Ipoly bs b"
"Ipoly bs (Pw t n) = (Ipoly bs t) ^ n"
"Ipoly bs (CN c n p) = (Ipoly bs c) + (bs!n)*(Ipoly bs p)"
-syntax "_Ipoly" :: "poly \<Rightarrow> 'a list \<Rightarrow>'a::{ring_char_0,power,division_by_zero,field}" ("\<lparr>_\<rparr>\<^sub>p\<^bsup>_\<^esup>")
-translations "\<lparr>p\<rparr>\<^sub>p\<^bsup>bs\<^esup>" \<rightleftharpoons> "Ipoly bs p"
+abbreviation
+ Ipoly_syntax :: "poly \<Rightarrow> 'a list \<Rightarrow>'a::{ring_char_0,power,division_by_zero,field}" ("\<lparr>_\<rparr>\<^sub>p\<^bsup>_\<^esup>")
+ where "\<lparr>p\<rparr>\<^sub>p\<^bsup>bs\<^esup> \<equiv> Ipoly bs p"
lemma Ipoly_CInt: "Ipoly bs (C (i,1)) = of_int i"
by (simp add: INum_def)
--- a/src/HOL/Decision_Procs/ex/Dense_Linear_Order_Ex.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/ex/Dense_Linear_Order_Ex.thy Tue Feb 09 17:06:05 2010 +0100
@@ -7,147 +7,147 @@
begin
lemma
- "\<exists>(y::'a::{ordered_field,number_ring, division_by_zero}) <2. x + 3* y < 0 \<and> x - y >0"
+ "\<exists>(y::'a::{linordered_field,number_ring, division_by_zero}) <2. x + 3* y < 0 \<and> x - y >0"
by ferrack
-lemma "~ (ALL x (y::'a::{ordered_field,number_ring, division_by_zero}). x < y --> 10*x < 11*y)"
+lemma "~ (ALL x (y::'a::{linordered_field,number_ring, division_by_zero}). x < y --> 10*x < 11*y)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. x < y --> (10*(x + 5*y + -1) < 60*y)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. x < y --> (10*(x + 5*y + -1) < 60*y)"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y. x ~= y --> x < y"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. x ~= y --> x < y"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y. (x ~= y & 10*x ~= 9*y & 10*x < y) --> x < y"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (x ~= y & 10*x ~= 9*y & 10*x < y) --> x < y"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. (x ~= y & 5*x <= y) --> 500*x <= 100*y"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (x ~= y & 5*x <= y) --> 500*x <= 100*y"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX (y::'a::{ordered_field,number_ring, division_by_zero}). 4*x + 3*y <= 0 & 4*x + 3*y >= -1)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX (y::'a::{linordered_field,number_ring, division_by_zero}). 4*x + 3*y <= 0 & 4*x + 3*y >= -1)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) < 0. (EX (y::'a::{ordered_field,number_ring, division_by_zero}) > 0. 7*x + y > 0 & x - y <= 9)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) < 0. (EX (y::'a::{linordered_field,number_ring, division_by_zero}) > 0. 7*x + y > 0 & x - y <= 9)"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}). (0 < x & x < 1) --> (ALL y > 1. x + y ~= 1)"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). (0 < x & x < 1) --> (ALL y > 1. x + y ~= 1)"
by ferrack
-lemma "EX x. (ALL (y::'a::{ordered_field,number_ring, division_by_zero}). y < 2 --> 2*(y - x) \<le> 0 )"
+lemma "EX x. (ALL (y::'a::{linordered_field,number_ring, division_by_zero}). y < 2 --> 2*(y - x) \<le> 0 )"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). x < 10 | x > 20 | (EX y. y>= 0 & y <= 10 & x+y = 20)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). x < 10 | x > 20 | (EX y. y>= 0 & y <= 10 & x+y = 20)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y z. x + y < z --> y >= z --> x < 0"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. x + y < z --> y >= z --> x < 0"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z. x + 7*y < 5* z & 5*y >= 7*z & x < 0"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z. x + 7*y < 5* z & 5*y >= 7*z & x < 0"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y z. abs (x + y) <= z --> (abs z = z)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. abs (x + y) <= z --> (abs z = z)"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z. x + 7*y - 5* z < 0 & 5*y + 7*z + 3*x < 0"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z. x + 7*y - 5* z < 0 & 5*y + 7*z + 3*x < 0"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y z. (abs (5*x+3*y+z) <= 5*x+3*y+z & abs (5*x+3*y+z) >= - (5*x+3*y+z)) | (abs (5*x+3*y+z) >= 5*x+3*y+z & abs (5*x+3*y+z) <= - (5*x+3*y+z))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. (abs (5*x+3*y+z) <= 5*x+3*y+z & abs (5*x+3*y+z) >= - (5*x+3*y+z)) | (abs (5*x+3*y+z) >= 5*x+3*y+z & abs (5*x+3*y+z) <= - (5*x+3*y+z))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. x < y --> (EX z>0. x+z = y)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. x < y --> (EX z>0. x+z = y)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. x < y --> (EX z>0. x+z = y)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. x < y --> (EX z>0. x+z = y)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. (EX z>0. abs (x - y) <= z )"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX z>0. abs (x - y) <= z )"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y. (ALL z>=0. abs (3*x+7*y) <= 2*z + 1)"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (ALL z>=0. abs (3*x+7*y) <= 2*z + 1)"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y. (ALL z<0. (z < x --> z <= y) & (z > y --> z >= x))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero})>0. (ALL y. (EX z. 13* abs z \<noteq> abs (12*y - x) & 5*x - 3*(abs y) <= 7*z))"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero})>0. (ALL y. (EX z. 13* abs z \<noteq> abs (12*y - x) & 5*x - 3*(abs y) <= 7*z))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}). abs (4*x + 17) < 4 & (ALL y . abs (x*34 - 34*y - 9) \<noteq> 0 \<longrightarrow> (EX z. 5*x - 3*abs y <= 7*z))"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). abs (4*x + 17) < 4 & (ALL y . abs (x*34 - 34*y - 9) \<noteq> 0 \<longrightarrow> (EX z. 5*x - 3*abs y <= 7*z))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y > abs (23*x - 9). (ALL z > abs (3*y - 19* abs x). x+z > 2*y))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y > abs (23*x - 9). (ALL z > abs (3*y - 19* abs x). x+z > 2*y))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y< abs (3*x - 1). (ALL z >= (3*abs x - 1). abs (12*x - 13*y + 19*z) > abs (23*x) ))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y< abs (3*x - 1). (ALL z >= (3*abs x - 1). abs (12*x - 13*y + 19*z) > abs (23*x) ))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}). abs x < 100 & (ALL y > x. (EX z<2*y - x. 5*x - 3*y <= 7*z))"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). abs x < 100 & (ALL y > x. (EX z<2*y - x. 5*x - 3*y <= 7*z))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y z w. 7*x<3*y --> 5*y < 7*z --> z < 2*w --> 7*(2*w-x) > 2*y"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. 7*x<3*y --> 5*y < 7*z --> z < 2*w --> 7*(2*w-x) > 2*y"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z w. 5*x + 3*z - 17*w + abs (y - 8*x + z) <= 89"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. 5*x + 3*z - 17*w + abs (y - 8*x + z) <= 89"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z w. 5*x + 3*z - 17*w + 7* (y - 8*x + z) <= max y (7*z - x + w)"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. 5*x + 3*z - 17*w + 7* (y - 8*x + z) <= max y (7*z - x + w)"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y z. (EX w >= (x+y+z). w <= abs x + abs y + abs z)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y z. (EX w >= (x+y+z). w <= abs x + abs y + abs z)"
by ferrack
-lemma "~(ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y z w. 3* x + z*4 = 3*y & x + y < z & x> w & 3*x < w + y))"
+lemma "~(ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y z w. 3* x + z*4 = 3*y & x + y < z & x> w & 3*x < w + y))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. (EX z w. abs (x-y) = (z-w) & z*1234 < 233*x & w ~= y)"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX z w. abs (x-y) = (z-w) & z*1234 < 233*x & w ~= y)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z. (ALL w >= abs (x+y+z). w >= abs x + abs y + abs z)"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z. (ALL w >= abs (x+y+z). w >= abs x + abs y + abs z)"
by ferrack
-lemma "EX z. (ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. (EX w >= (x+y+z). w <= abs x + abs y + abs z))"
+lemma "EX z. (ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX w >= (x+y+z). w <= abs x + abs y + abs z))"
by ferrack
-lemma "EX z. (ALL (x::'a::{ordered_field,number_ring, division_by_zero}) < abs z. (EX y w. x< y & x < z & x> w & 3*x < w + y))"
+lemma "EX z. (ALL (x::'a::{linordered_field,number_ring, division_by_zero}) < abs z. (EX y w. x< y & x < z & x> w & 3*x < w + y))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}) y. (EX z. (ALL w. abs (x-y) = abs (z-w) --> z < x & w ~= y))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}) y. (EX z. (ALL w. abs (x-y) = abs (z-w) --> z < x & w ~= y))"
by ferrack
-lemma "EX y. (ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)))"
+lemma "EX y. (ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) z. (ALL w >= 13*x - 4*z. (EX y. w >= abs x + abs y + z))"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) z. (ALL w >= 13*x - 4*z. (EX y. w >= abs x + abs y + z))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}). (ALL y < x. (EX z > (x+y).
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). (ALL y < x. (EX z > (x+y).
(ALL w. 5*w + 10*x - z >= y --> w + 7*x + 3*z >= 2*y)))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}). (ALL y. (EX z > y.
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}). (ALL y. (EX z > y.
(ALL w . w < 13 --> w + 10*x - z >= y --> 5*w + 7*x + 13*z >= 2*y)))"
by ferrack
-lemma "EX (x::'a::{ordered_field,number_ring, division_by_zero}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)"
+lemma "EX (x::'a::{linordered_field,number_ring, division_by_zero}) y z w. min (5*x + 3*z) (17*w) + 5* abs (y - 8*x + z) <= max y (7*z - x + w)"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (y - x) < w)))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (y - x) < w)))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (x + z) < w - y)))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y. (ALL z>19. y <= x + z & (EX w. abs (x + z) < w - y)))"
by ferrack
-lemma "ALL (x::'a::{ordered_field,number_ring, division_by_zero}). (EX y. abs y ~= abs x & (ALL z> max x y. (EX w. w ~= y & w ~= z & 3*w - z >= x + y)))"
+lemma "ALL (x::'a::{linordered_field,number_ring, division_by_zero}). (EX y. abs y ~= abs x & (ALL z> max x y. (EX w. w ~= y & w ~= z & 3*w - z >= x + y)))"
by ferrack
end
--- a/src/HOL/Decision_Procs/mir_tac.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Decision_Procs/mir_tac.ML Tue Feb 09 17:06:05 2010 +0100
@@ -33,7 +33,7 @@
@{thm "real_of_nat_number_of"},
@{thm "real_of_nat_Suc"}, @{thm "real_of_nat_one"}, @{thm "real_of_one"},
@{thm "real_of_int_zero"}, @{thm "real_of_nat_zero"},
- @{thm "Ring_and_Field.divide_zero"},
+ @{thm "Fields.divide_zero"},
@{thm "divide_divide_eq_left"}, @{thm "times_divide_eq_right"},
@{thm "times_divide_eq_left"}, @{thm "divide_divide_eq_right"},
@{thm "diff_def"}, @{thm "minus_divide_left"}]
--- a/src/HOL/Divides.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Divides.thy Tue Feb 09 17:06:05 2010 +0100
@@ -657,7 +657,7 @@
val trans = trans;
val prove_eq_sums = Arith_Data.prove_conv2 all_tac (Arith_Data.simp_all_tac
- (@{thm monoid_add_class.add_0_left} :: @{thm monoid_add_class.add_0_right} :: @{thms add_ac}))
+ (@{thm add_0_left} :: @{thm add_0_right} :: @{thms add_ac}))
end)
@@ -1655,8 +1655,8 @@
lemmas arithmetic_simps =
arith_simps
add_special
- OrderedGroup.add_0_left
- OrderedGroup.add_0_right
+ add_0_left
+ add_0_right
mult_zero_left
mult_zero_right
mult_1_left
--- a/src/HOL/Fact.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Fact.thy Tue Feb 09 17:06:05 2010 +0100
@@ -266,15 +266,15 @@
lemma of_nat_fact_not_zero [simp]: "of_nat (fact n) \<noteq> (0::'a::semiring_char_0)"
by auto
-lemma of_nat_fact_gt_zero [simp]: "(0::'a::{ordered_semidom}) < of_nat(fact n)" by auto
+lemma of_nat_fact_gt_zero [simp]: "(0::'a::{linordered_semidom}) < of_nat(fact n)" by auto
-lemma of_nat_fact_ge_zero [simp]: "(0::'a::ordered_semidom) \<le> of_nat(fact n)"
+lemma of_nat_fact_ge_zero [simp]: "(0::'a::linordered_semidom) \<le> of_nat(fact n)"
by simp
-lemma inv_of_nat_fact_gt_zero [simp]: "(0::'a::ordered_field) < inverse (of_nat (fact n))"
+lemma inv_of_nat_fact_gt_zero [simp]: "(0::'a::linordered_field) < inverse (of_nat (fact n))"
by (auto simp add: positive_imp_inverse_positive)
-lemma inv_of_nat_fact_ge_zero [simp]: "(0::'a::ordered_field) \<le> inverse (of_nat (fact n))"
+lemma inv_of_nat_fact_ge_zero [simp]: "(0::'a::linordered_field) \<le> inverse (of_nat (fact n))"
by (auto intro: order_less_imp_le)
end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Fields.thy Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,1044 @@
+(* Title: HOL/Fields.thy
+ Author: Gertrud Bauer
+ Author: Steven Obua
+ Author: Tobias Nipkow
+ Author: Lawrence C Paulson
+ Author: Markus Wenzel
+ Author: Jeremy Avigad
+*)
+
+header {* Fields *}
+
+theory Fields
+imports Rings
+begin
+
+class field = comm_ring_1 + inverse +
+ assumes field_inverse: "a \<noteq> 0 \<Longrightarrow> inverse a * a = 1"
+ assumes divide_inverse: "a / b = a * inverse b"
+begin
+
+subclass division_ring
+proof
+ fix a :: 'a
+ assume "a \<noteq> 0"
+ thus "inverse a * a = 1" by (rule field_inverse)
+ thus "a * inverse a = 1" by (simp only: mult_commute)
+qed
+
+subclass idom ..
+
+lemma right_inverse_eq: "b \<noteq> 0 \<Longrightarrow> a / b = 1 \<longleftrightarrow> a = b"
+proof
+ assume neq: "b \<noteq> 0"
+ {
+ hence "a = (a / b) * b" by (simp add: divide_inverse mult_ac)
+ also assume "a / b = 1"
+ finally show "a = b" by simp
+ next
+ assume "a = b"
+ with neq show "a / b = 1" by (simp add: divide_inverse)
+ }
+qed
+
+lemma nonzero_inverse_eq_divide: "a \<noteq> 0 \<Longrightarrow> inverse a = 1 / a"
+by (simp add: divide_inverse)
+
+lemma divide_self [simp]: "a \<noteq> 0 \<Longrightarrow> a / a = 1"
+by (simp add: divide_inverse)
+
+lemma divide_zero_left [simp]: "0 / a = 0"
+by (simp add: divide_inverse)
+
+lemma inverse_eq_divide: "inverse a = 1 / a"
+by (simp add: divide_inverse)
+
+lemma add_divide_distrib: "(a+b) / c = a/c + b/c"
+by (simp add: divide_inverse algebra_simps)
+
+text{*There is no slick version using division by zero.*}
+lemma inverse_add:
+ "[| a \<noteq> 0; b \<noteq> 0 |]
+ ==> inverse a + inverse b = (a + b) * inverse a * inverse b"
+by (simp add: division_ring_inverse_add mult_ac)
+
+lemma nonzero_mult_divide_mult_cancel_left [simp, noatp]:
+assumes [simp]: "b\<noteq>0" and [simp]: "c\<noteq>0" shows "(c*a)/(c*b) = a/b"
+proof -
+ have "(c*a)/(c*b) = c * a * (inverse b * inverse c)"
+ by (simp add: divide_inverse nonzero_inverse_mult_distrib)
+ also have "... = a * inverse b * (inverse c * c)"
+ by (simp only: mult_ac)
+ also have "... = a * inverse b" by simp
+ finally show ?thesis by (simp add: divide_inverse)
+qed
+
+lemma nonzero_mult_divide_mult_cancel_right [simp, noatp]:
+ "\<lbrakk>b \<noteq> 0; c \<noteq> 0\<rbrakk> \<Longrightarrow> (a * c) / (b * c) = a / b"
+by (simp add: mult_commute [of _ c])
+
+lemma divide_1 [simp]: "a / 1 = a"
+by (simp add: divide_inverse)
+
+lemma times_divide_eq_right: "a * (b / c) = (a * b) / c"
+by (simp add: divide_inverse mult_assoc)
+
+lemma times_divide_eq_left: "(b / c) * a = (b * a) / c"
+by (simp add: divide_inverse mult_ac)
+
+text {* These are later declared as simp rules. *}
+lemmas times_divide_eq [noatp] = times_divide_eq_right times_divide_eq_left
+
+lemma add_frac_eq:
+ assumes "y \<noteq> 0" and "z \<noteq> 0"
+ shows "x / y + w / z = (x * z + w * y) / (y * z)"
+proof -
+ have "x / y + w / z = (x * z) / (y * z) + (y * w) / (y * z)"
+ using assms by simp
+ also have "\<dots> = (x * z + y * w) / (y * z)"
+ by (simp only: add_divide_distrib)
+ finally show ?thesis
+ by (simp only: mult_commute)
+qed
+
+text{*Special Cancellation Simprules for Division*}
+
+lemma nonzero_mult_divide_cancel_right [simp, noatp]:
+ "b \<noteq> 0 \<Longrightarrow> a * b / b = a"
+using nonzero_mult_divide_mult_cancel_right [of 1 b a] by simp
+
+lemma nonzero_mult_divide_cancel_left [simp, noatp]:
+ "a \<noteq> 0 \<Longrightarrow> a * b / a = b"
+using nonzero_mult_divide_mult_cancel_left [of 1 a b] by simp
+
+lemma nonzero_divide_mult_cancel_right [simp, noatp]:
+ "\<lbrakk>a \<noteq> 0; b \<noteq> 0\<rbrakk> \<Longrightarrow> b / (a * b) = 1 / a"
+using nonzero_mult_divide_mult_cancel_right [of a b 1] by simp
+
+lemma nonzero_divide_mult_cancel_left [simp, noatp]:
+ "\<lbrakk>a \<noteq> 0; b \<noteq> 0\<rbrakk> \<Longrightarrow> a / (a * b) = 1 / b"
+using nonzero_mult_divide_mult_cancel_left [of b a 1] by simp
+
+lemma nonzero_mult_divide_mult_cancel_left2 [simp, noatp]:
+ "\<lbrakk>b \<noteq> 0; c \<noteq> 0\<rbrakk> \<Longrightarrow> (c * a) / (b * c) = a / b"
+using nonzero_mult_divide_mult_cancel_left [of b c a] by (simp add: mult_ac)
+
+lemma nonzero_mult_divide_mult_cancel_right2 [simp, noatp]:
+ "\<lbrakk>b \<noteq> 0; c \<noteq> 0\<rbrakk> \<Longrightarrow> (a * c) / (c * b) = a / b"
+using nonzero_mult_divide_mult_cancel_right [of b c a] by (simp add: mult_ac)
+
+lemma minus_divide_left: "- (a / b) = (-a) / b"
+by (simp add: divide_inverse)
+
+lemma nonzero_minus_divide_right: "b \<noteq> 0 ==> - (a / b) = a / (- b)"
+by (simp add: divide_inverse nonzero_inverse_minus_eq)
+
+lemma nonzero_minus_divide_divide: "b \<noteq> 0 ==> (-a) / (-b) = a / b"
+by (simp add: divide_inverse nonzero_inverse_minus_eq)
+
+lemma divide_minus_left [simp, noatp]: "(-a) / b = - (a / b)"
+by (simp add: divide_inverse)
+
+lemma diff_divide_distrib: "(a - b) / c = a / c - b / c"
+by (simp add: diff_minus add_divide_distrib)
+
+lemma add_divide_eq_iff:
+ "z \<noteq> 0 \<Longrightarrow> x + y / z = (z * x + y) / z"
+by (simp add: add_divide_distrib)
+
+lemma divide_add_eq_iff:
+ "z \<noteq> 0 \<Longrightarrow> x / z + y = (x + z * y) / z"
+by (simp add: add_divide_distrib)
+
+lemma diff_divide_eq_iff:
+ "z \<noteq> 0 \<Longrightarrow> x - y / z = (z * x - y) / z"
+by (simp add: diff_divide_distrib)
+
+lemma divide_diff_eq_iff:
+ "z \<noteq> 0 \<Longrightarrow> x / z - y = (x - z * y) / z"
+by (simp add: diff_divide_distrib)
+
+lemma nonzero_eq_divide_eq: "c \<noteq> 0 \<Longrightarrow> a = b / c \<longleftrightarrow> a * c = b"
+proof -
+ assume [simp]: "c \<noteq> 0"
+ have "a = b / c \<longleftrightarrow> a * c = (b / c) * c" by simp
+ also have "... \<longleftrightarrow> a * c = b" by (simp add: divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma nonzero_divide_eq_eq: "c \<noteq> 0 \<Longrightarrow> b / c = a \<longleftrightarrow> b = a * c"
+proof -
+ assume [simp]: "c \<noteq> 0"
+ have "b / c = a \<longleftrightarrow> (b / c) * c = a * c" by simp
+ also have "... \<longleftrightarrow> b = a * c" by (simp add: divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma divide_eq_imp: "c \<noteq> 0 \<Longrightarrow> b = a * c \<Longrightarrow> b / c = a"
+by simp
+
+lemma eq_divide_imp: "c \<noteq> 0 \<Longrightarrow> a * c = b \<Longrightarrow> a = b / c"
+by (erule subst, simp)
+
+lemmas field_eq_simps[noatp] = algebra_simps
+ (* pull / out*)
+ add_divide_eq_iff divide_add_eq_iff
+ diff_divide_eq_iff divide_diff_eq_iff
+ (* multiply eqn *)
+ nonzero_eq_divide_eq nonzero_divide_eq_eq
+(* is added later:
+ times_divide_eq_left times_divide_eq_right
+*)
+
+text{*An example:*}
+lemma "\<lbrakk>a\<noteq>b; c\<noteq>d; e\<noteq>f\<rbrakk> \<Longrightarrow> ((a-b)*(c-d)*(e-f))/((c-d)*(e-f)*(a-b)) = 1"
+apply(subgoal_tac "(c-d)*(e-f)*(a-b) \<noteq> 0")
+ apply(simp add:field_eq_simps)
+apply(simp)
+done
+
+lemma diff_frac_eq:
+ "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> x / y - w / z = (x * z - w * y) / (y * z)"
+by (simp add: field_eq_simps times_divide_eq)
+
+lemma frac_eq_eq:
+ "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> (x / y = w / z) = (x * z = w * y)"
+by (simp add: field_eq_simps times_divide_eq)
+
+end
+
+class division_by_zero = zero + inverse +
+ assumes inverse_zero [simp]: "inverse 0 = 0"
+
+lemma divide_zero [simp]:
+ "a / 0 = (0::'a::{field,division_by_zero})"
+by (simp add: divide_inverse)
+
+lemma divide_self_if [simp]:
+ "a / (a::'a::{field,division_by_zero}) = (if a=0 then 0 else 1)"
+by simp
+
+class linordered_field = field + linordered_idom
+
+lemma inverse_nonzero_iff_nonzero [simp]:
+ "(inverse a = 0) = (a = (0::'a::{division_ring,division_by_zero}))"
+by (force dest: inverse_zero_imp_zero)
+
+lemma inverse_minus_eq [simp]:
+ "inverse(-a) = -inverse(a::'a::{division_ring,division_by_zero})"
+proof cases
+ assume "a=0" thus ?thesis by (simp add: inverse_zero)
+next
+ assume "a\<noteq>0"
+ thus ?thesis by (simp add: nonzero_inverse_minus_eq)
+qed
+
+lemma inverse_eq_imp_eq:
+ "inverse a = inverse b ==> a = (b::'a::{division_ring,division_by_zero})"
+apply (cases "a=0 | b=0")
+ apply (force dest!: inverse_zero_imp_zero
+ simp add: eq_commute [of "0::'a"])
+apply (force dest!: nonzero_inverse_eq_imp_eq)
+done
+
+lemma inverse_eq_iff_eq [simp]:
+ "(inverse a = inverse b) = (a = (b::'a::{division_ring,division_by_zero}))"
+by (force dest!: inverse_eq_imp_eq)
+
+lemma inverse_inverse_eq [simp]:
+ "inverse(inverse (a::'a::{division_ring,division_by_zero})) = a"
+ proof cases
+ assume "a=0" thus ?thesis by simp
+ next
+ assume "a\<noteq>0"
+ thus ?thesis by (simp add: nonzero_inverse_inverse_eq)
+ qed
+
+text{*This version builds in division by zero while also re-orienting
+ the right-hand side.*}
+lemma inverse_mult_distrib [simp]:
+ "inverse(a*b) = inverse(a) * inverse(b::'a::{field,division_by_zero})"
+ proof cases
+ assume "a \<noteq> 0 & b \<noteq> 0"
+ thus ?thesis by (simp add: nonzero_inverse_mult_distrib mult_commute)
+ next
+ assume "~ (a \<noteq> 0 & b \<noteq> 0)"
+ thus ?thesis by force
+ qed
+
+lemma inverse_divide [simp]:
+ "inverse (a/b) = b / (a::'a::{field,division_by_zero})"
+by (simp add: divide_inverse mult_commute)
+
+
+subsection {* Calculations with fractions *}
+
+text{* There is a whole bunch of simp-rules just for class @{text
+field} but none for class @{text field} and @{text nonzero_divides}
+because the latter are covered by a simproc. *}
+
+lemma mult_divide_mult_cancel_left:
+ "c\<noteq>0 ==> (c*a) / (c*b) = a / (b::'a::{field,division_by_zero})"
+apply (cases "b = 0")
+apply (simp_all add: nonzero_mult_divide_mult_cancel_left)
+done
+
+lemma mult_divide_mult_cancel_right:
+ "c\<noteq>0 ==> (a*c) / (b*c) = a / (b::'a::{field,division_by_zero})"
+apply (cases "b = 0")
+apply (simp_all add: nonzero_mult_divide_mult_cancel_right)
+done
+
+lemma divide_divide_eq_right [simp,noatp]:
+ "a / (b/c) = (a*c) / (b::'a::{field,division_by_zero})"
+by (simp add: divide_inverse mult_ac)
+
+lemma divide_divide_eq_left [simp,noatp]:
+ "(a / b) / (c::'a::{field,division_by_zero}) = a / (b*c)"
+by (simp add: divide_inverse mult_assoc)
+
+
+subsubsection{*Special Cancellation Simprules for Division*}
+
+lemma mult_divide_mult_cancel_left_if[simp,noatp]:
+fixes c :: "'a :: {field,division_by_zero}"
+shows "(c*a) / (c*b) = (if c=0 then 0 else a/b)"
+by (simp add: mult_divide_mult_cancel_left)
+
+
+subsection {* Division and Unary Minus *}
+
+lemma minus_divide_right: "- (a/b) = a / -(b::'a::{field,division_by_zero})"
+by (simp add: divide_inverse)
+
+lemma divide_minus_right [simp, noatp]:
+ "a / -(b::'a::{field,division_by_zero}) = -(a / b)"
+by (simp add: divide_inverse)
+
+lemma minus_divide_divide:
+ "(-a)/(-b) = a / (b::'a::{field,division_by_zero})"
+apply (cases "b=0", simp)
+apply (simp add: nonzero_minus_divide_divide)
+done
+
+lemma eq_divide_eq:
+ "((a::'a::{field,division_by_zero}) = b/c) = (if c\<noteq>0 then a*c = b else a=0)"
+by (simp add: nonzero_eq_divide_eq)
+
+lemma divide_eq_eq:
+ "(b/c = (a::'a::{field,division_by_zero})) = (if c\<noteq>0 then b = a*c else a=0)"
+by (force simp add: nonzero_divide_eq_eq)
+
+
+subsection {* Ordered Fields *}
+
+lemma positive_imp_inverse_positive:
+assumes a_gt_0: "0 < a" shows "0 < inverse (a::'a::linordered_field)"
+proof -
+ have "0 < a * inverse a"
+ by (simp add: a_gt_0 [THEN order_less_imp_not_eq2] zero_less_one)
+ thus "0 < inverse a"
+ by (simp add: a_gt_0 [THEN order_less_not_sym] zero_less_mult_iff)
+qed
+
+lemma negative_imp_inverse_negative:
+ "a < 0 ==> inverse a < (0::'a::linordered_field)"
+by (insert positive_imp_inverse_positive [of "-a"],
+ simp add: nonzero_inverse_minus_eq order_less_imp_not_eq)
+
+lemma inverse_le_imp_le:
+assumes invle: "inverse a \<le> inverse b" and apos: "0 < a"
+shows "b \<le> (a::'a::linordered_field)"
+proof (rule classical)
+ assume "~ b \<le> a"
+ hence "a < b" by (simp add: linorder_not_le)
+ hence bpos: "0 < b" by (blast intro: apos order_less_trans)
+ hence "a * inverse a \<le> a * inverse b"
+ by (simp add: apos invle order_less_imp_le mult_left_mono)
+ hence "(a * inverse a) * b \<le> (a * inverse b) * b"
+ by (simp add: bpos order_less_imp_le mult_right_mono)
+ thus "b \<le> a" by (simp add: mult_assoc apos bpos order_less_imp_not_eq2)
+qed
+
+lemma inverse_positive_imp_positive:
+assumes inv_gt_0: "0 < inverse a" and nz: "a \<noteq> 0"
+shows "0 < (a::'a::linordered_field)"
+proof -
+ have "0 < inverse (inverse a)"
+ using inv_gt_0 by (rule positive_imp_inverse_positive)
+ thus "0 < a"
+ using nz by (simp add: nonzero_inverse_inverse_eq)
+qed
+
+lemma inverse_positive_iff_positive [simp]:
+ "(0 < inverse a) = (0 < (a::'a::{linordered_field,division_by_zero}))"
+apply (cases "a = 0", simp)
+apply (blast intro: inverse_positive_imp_positive positive_imp_inverse_positive)
+done
+
+lemma inverse_negative_imp_negative:
+assumes inv_less_0: "inverse a < 0" and nz: "a \<noteq> 0"
+shows "a < (0::'a::linordered_field)"
+proof -
+ have "inverse (inverse a) < 0"
+ using inv_less_0 by (rule negative_imp_inverse_negative)
+ thus "a < 0" using nz by (simp add: nonzero_inverse_inverse_eq)
+qed
+
+lemma inverse_negative_iff_negative [simp]:
+ "(inverse a < 0) = (a < (0::'a::{linordered_field,division_by_zero}))"
+apply (cases "a = 0", simp)
+apply (blast intro: inverse_negative_imp_negative negative_imp_inverse_negative)
+done
+
+lemma inverse_nonnegative_iff_nonnegative [simp]:
+ "(0 \<le> inverse a) = (0 \<le> (a::'a::{linordered_field,division_by_zero}))"
+by (simp add: linorder_not_less [symmetric])
+
+lemma inverse_nonpositive_iff_nonpositive [simp]:
+ "(inverse a \<le> 0) = (a \<le> (0::'a::{linordered_field,division_by_zero}))"
+by (simp add: linorder_not_less [symmetric])
+
+lemma linordered_field_no_lb: "\<forall> x. \<exists>y. y < (x::'a::linordered_field)"
+proof
+ fix x::'a
+ have m1: "- (1::'a) < 0" by simp
+ from add_strict_right_mono[OF m1, where c=x]
+ have "(- 1) + x < x" by simp
+ thus "\<exists>y. y < x" by blast
+qed
+
+lemma linordered_field_no_ub: "\<forall> x. \<exists>y. y > (x::'a::linordered_field)"
+proof
+ fix x::'a
+ have m1: " (1::'a) > 0" by simp
+ from add_strict_right_mono[OF m1, where c=x]
+ have "1 + x > x" by simp
+ thus "\<exists>y. y > x" by blast
+qed
+
+subsection{*Anti-Monotonicity of @{term inverse}*}
+
+lemma less_imp_inverse_less:
+assumes less: "a < b" and apos: "0 < a"
+shows "inverse b < inverse (a::'a::linordered_field)"
+proof (rule ccontr)
+ assume "~ inverse b < inverse a"
+ hence "inverse a \<le> inverse b" by (simp add: linorder_not_less)
+ hence "~ (a < b)"
+ by (simp add: linorder_not_less inverse_le_imp_le [OF _ apos])
+ thus False by (rule notE [OF _ less])
+qed
+
+lemma inverse_less_imp_less:
+ "[|inverse a < inverse b; 0 < a|] ==> b < (a::'a::linordered_field)"
+apply (simp add: order_less_le [of "inverse a"] order_less_le [of "b"])
+apply (force dest!: inverse_le_imp_le nonzero_inverse_eq_imp_eq)
+done
+
+text{*Both premises are essential. Consider -1 and 1.*}
+lemma inverse_less_iff_less [simp,noatp]:
+ "[|0 < a; 0 < b|] ==> (inverse a < inverse b) = (b < (a::'a::linordered_field))"
+by (blast intro: less_imp_inverse_less dest: inverse_less_imp_less)
+
+lemma le_imp_inverse_le:
+ "[|a \<le> b; 0 < a|] ==> inverse b \<le> inverse (a::'a::linordered_field)"
+by (force simp add: order_le_less less_imp_inverse_less)
+
+lemma inverse_le_iff_le [simp,noatp]:
+ "[|0 < a; 0 < b|] ==> (inverse a \<le> inverse b) = (b \<le> (a::'a::linordered_field))"
+by (blast intro: le_imp_inverse_le dest: inverse_le_imp_le)
+
+
+text{*These results refer to both operands being negative. The opposite-sign
+case is trivial, since inverse preserves signs.*}
+lemma inverse_le_imp_le_neg:
+ "[|inverse a \<le> inverse b; b < 0|] ==> b \<le> (a::'a::linordered_field)"
+apply (rule classical)
+apply (subgoal_tac "a < 0")
+ prefer 2 apply (force simp add: linorder_not_le intro: order_less_trans)
+apply (insert inverse_le_imp_le [of "-b" "-a"])
+apply (simp add: order_less_imp_not_eq nonzero_inverse_minus_eq)
+done
+
+lemma less_imp_inverse_less_neg:
+ "[|a < b; b < 0|] ==> inverse b < inverse (a::'a::linordered_field)"
+apply (subgoal_tac "a < 0")
+ prefer 2 apply (blast intro: order_less_trans)
+apply (insert less_imp_inverse_less [of "-b" "-a"])
+apply (simp add: order_less_imp_not_eq nonzero_inverse_minus_eq)
+done
+
+lemma inverse_less_imp_less_neg:
+ "[|inverse a < inverse b; b < 0|] ==> b < (a::'a::linordered_field)"
+apply (rule classical)
+apply (subgoal_tac "a < 0")
+ prefer 2
+ apply (force simp add: linorder_not_less intro: order_le_less_trans)
+apply (insert inverse_less_imp_less [of "-b" "-a"])
+apply (simp add: order_less_imp_not_eq nonzero_inverse_minus_eq)
+done
+
+lemma inverse_less_iff_less_neg [simp,noatp]:
+ "[|a < 0; b < 0|] ==> (inverse a < inverse b) = (b < (a::'a::linordered_field))"
+apply (insert inverse_less_iff_less [of "-b" "-a"])
+apply (simp del: inverse_less_iff_less
+ add: order_less_imp_not_eq nonzero_inverse_minus_eq)
+done
+
+lemma le_imp_inverse_le_neg:
+ "[|a \<le> b; b < 0|] ==> inverse b \<le> inverse (a::'a::linordered_field)"
+by (force simp add: order_le_less less_imp_inverse_less_neg)
+
+lemma inverse_le_iff_le_neg [simp,noatp]:
+ "[|a < 0; b < 0|] ==> (inverse a \<le> inverse b) = (b \<le> (a::'a::linordered_field))"
+by (blast intro: le_imp_inverse_le_neg dest: inverse_le_imp_le_neg)
+
+
+subsection{*Inverses and the Number One*}
+
+lemma one_less_inverse_iff:
+ "(1 < inverse x) = (0 < x & x < (1::'a::{linordered_field,division_by_zero}))"
+proof cases
+ assume "0 < x"
+ with inverse_less_iff_less [OF zero_less_one, of x]
+ show ?thesis by simp
+next
+ assume notless: "~ (0 < x)"
+ have "~ (1 < inverse x)"
+ proof
+ assume "1 < inverse x"
+ also with notless have "... \<le> 0" by (simp add: linorder_not_less)
+ also have "... < 1" by (rule zero_less_one)
+ finally show False by auto
+ qed
+ with notless show ?thesis by simp
+qed
+
+lemma inverse_eq_1_iff [simp]:
+ "(inverse x = 1) = (x = (1::'a::{field,division_by_zero}))"
+by (insert inverse_eq_iff_eq [of x 1], simp)
+
+lemma one_le_inverse_iff:
+ "(1 \<le> inverse x) = (0 < x & x \<le> (1::'a::{linordered_field,division_by_zero}))"
+by (force simp add: order_le_less one_less_inverse_iff zero_less_one
+ eq_commute [of 1])
+
+lemma inverse_less_1_iff:
+ "(inverse x < 1) = (x \<le> 0 | 1 < (x::'a::{linordered_field,division_by_zero}))"
+by (simp add: linorder_not_le [symmetric] one_le_inverse_iff)
+
+lemma inverse_le_1_iff:
+ "(inverse x \<le> 1) = (x \<le> 0 | 1 \<le> (x::'a::{linordered_field,division_by_zero}))"
+by (simp add: linorder_not_less [symmetric] one_less_inverse_iff)
+
+
+subsection{*Simplification of Inequalities Involving Literal Divisors*}
+
+lemma pos_le_divide_eq: "0 < (c::'a::linordered_field) ==> (a \<le> b/c) = (a*c \<le> b)"
+proof -
+ assume less: "0<c"
+ hence "(a \<le> b/c) = (a*c \<le> (b/c)*c)"
+ by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
+ also have "... = (a*c \<le> b)"
+ by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma neg_le_divide_eq: "c < (0::'a::linordered_field) ==> (a \<le> b/c) = (b \<le> a*c)"
+proof -
+ assume less: "c<0"
+ hence "(a \<le> b/c) = ((b/c)*c \<le> a*c)"
+ by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
+ also have "... = (b \<le> a*c)"
+ by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma le_divide_eq:
+ "(a \<le> b/c) =
+ (if 0 < c then a*c \<le> b
+ else if c < 0 then b \<le> a*c
+ else a \<le> (0::'a::{linordered_field,division_by_zero}))"
+apply (cases "c=0", simp)
+apply (force simp add: pos_le_divide_eq neg_le_divide_eq linorder_neq_iff)
+done
+
+lemma pos_divide_le_eq: "0 < (c::'a::linordered_field) ==> (b/c \<le> a) = (b \<le> a*c)"
+proof -
+ assume less: "0<c"
+ hence "(b/c \<le> a) = ((b/c)*c \<le> a*c)"
+ by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
+ also have "... = (b \<le> a*c)"
+ by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma neg_divide_le_eq: "c < (0::'a::linordered_field) ==> (b/c \<le> a) = (a*c \<le> b)"
+proof -
+ assume less: "c<0"
+ hence "(b/c \<le> a) = (a*c \<le> (b/c)*c)"
+ by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
+ also have "... = (a*c \<le> b)"
+ by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma divide_le_eq:
+ "(b/c \<le> a) =
+ (if 0 < c then b \<le> a*c
+ else if c < 0 then a*c \<le> b
+ else 0 \<le> (a::'a::{linordered_field,division_by_zero}))"
+apply (cases "c=0", simp)
+apply (force simp add: pos_divide_le_eq neg_divide_le_eq linorder_neq_iff)
+done
+
+lemma pos_less_divide_eq:
+ "0 < (c::'a::linordered_field) ==> (a < b/c) = (a*c < b)"
+proof -
+ assume less: "0<c"
+ hence "(a < b/c) = (a*c < (b/c)*c)"
+ by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
+ also have "... = (a*c < b)"
+ by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma neg_less_divide_eq:
+ "c < (0::'a::linordered_field) ==> (a < b/c) = (b < a*c)"
+proof -
+ assume less: "c<0"
+ hence "(a < b/c) = ((b/c)*c < a*c)"
+ by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
+ also have "... = (b < a*c)"
+ by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma less_divide_eq:
+ "(a < b/c) =
+ (if 0 < c then a*c < b
+ else if c < 0 then b < a*c
+ else a < (0::'a::{linordered_field,division_by_zero}))"
+apply (cases "c=0", simp)
+apply (force simp add: pos_less_divide_eq neg_less_divide_eq linorder_neq_iff)
+done
+
+lemma pos_divide_less_eq:
+ "0 < (c::'a::linordered_field) ==> (b/c < a) = (b < a*c)"
+proof -
+ assume less: "0<c"
+ hence "(b/c < a) = ((b/c)*c < a*c)"
+ by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
+ also have "... = (b < a*c)"
+ by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma neg_divide_less_eq:
+ "c < (0::'a::linordered_field) ==> (b/c < a) = (a*c < b)"
+proof -
+ assume less: "c<0"
+ hence "(b/c < a) = (a*c < (b/c)*c)"
+ by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
+ also have "... = (a*c < b)"
+ by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
+ finally show ?thesis .
+qed
+
+lemma divide_less_eq:
+ "(b/c < a) =
+ (if 0 < c then b < a*c
+ else if c < 0 then a*c < b
+ else 0 < (a::'a::{linordered_field,division_by_zero}))"
+apply (cases "c=0", simp)
+apply (force simp add: pos_divide_less_eq neg_divide_less_eq linorder_neq_iff)
+done
+
+
+subsection{*Field simplification*}
+
+text{* Lemmas @{text field_simps} multiply with denominators in in(equations)
+if they can be proved to be non-zero (for equations) or positive/negative
+(for inequations). Can be too aggressive and is therefore separate from the
+more benign @{text algebra_simps}. *}
+
+lemmas field_simps[noatp] = field_eq_simps
+ (* multiply ineqn *)
+ pos_divide_less_eq neg_divide_less_eq
+ pos_less_divide_eq neg_less_divide_eq
+ pos_divide_le_eq neg_divide_le_eq
+ pos_le_divide_eq neg_le_divide_eq
+
+text{* Lemmas @{text sign_simps} is a first attempt to automate proofs
+of positivity/negativity needed for @{text field_simps}. Have not added @{text
+sign_simps} to @{text field_simps} because the former can lead to case
+explosions. *}
+
+lemmas sign_simps[noatp] = group_simps
+ zero_less_mult_iff mult_less_0_iff
+
+(* Only works once linear arithmetic is installed:
+text{*An example:*}
+lemma fixes a b c d e f :: "'a::linordered_field"
+shows "\<lbrakk>a>b; c<d; e<f; 0 < u \<rbrakk> \<Longrightarrow>
+ ((a-b)*(c-d)*(e-f))/((c-d)*(e-f)*(a-b)) <
+ ((e-f)*(a-b)*(c-d))/((e-f)*(a-b)*(c-d)) + u"
+apply(subgoal_tac "(c-d)*(e-f)*(a-b) > 0")
+ prefer 2 apply(simp add:sign_simps)
+apply(subgoal_tac "(c-d)*(e-f)*(a-b)*u > 0")
+ prefer 2 apply(simp add:sign_simps)
+apply(simp add:field_simps)
+done
+*)
+
+
+subsection{*Division and Signs*}
+
+lemma zero_less_divide_iff:
+ "((0::'a::{linordered_field,division_by_zero}) < a/b) = (0 < a & 0 < b | a < 0 & b < 0)"
+by (simp add: divide_inverse zero_less_mult_iff)
+
+lemma divide_less_0_iff:
+ "(a/b < (0::'a::{linordered_field,division_by_zero})) =
+ (0 < a & b < 0 | a < 0 & 0 < b)"
+by (simp add: divide_inverse mult_less_0_iff)
+
+lemma zero_le_divide_iff:
+ "((0::'a::{linordered_field,division_by_zero}) \<le> a/b) =
+ (0 \<le> a & 0 \<le> b | a \<le> 0 & b \<le> 0)"
+by (simp add: divide_inverse zero_le_mult_iff)
+
+lemma divide_le_0_iff:
+ "(a/b \<le> (0::'a::{linordered_field,division_by_zero})) =
+ (0 \<le> a & b \<le> 0 | a \<le> 0 & 0 \<le> b)"
+by (simp add: divide_inverse mult_le_0_iff)
+
+lemma divide_eq_0_iff [simp,noatp]:
+ "(a/b = 0) = (a=0 | b=(0::'a::{field,division_by_zero}))"
+by (simp add: divide_inverse)
+
+lemma divide_pos_pos:
+ "0 < (x::'a::linordered_field) ==> 0 < y ==> 0 < x / y"
+by(simp add:field_simps)
+
+
+lemma divide_nonneg_pos:
+ "0 <= (x::'a::linordered_field) ==> 0 < y ==> 0 <= x / y"
+by(simp add:field_simps)
+
+lemma divide_neg_pos:
+ "(x::'a::linordered_field) < 0 ==> 0 < y ==> x / y < 0"
+by(simp add:field_simps)
+
+lemma divide_nonpos_pos:
+ "(x::'a::linordered_field) <= 0 ==> 0 < y ==> x / y <= 0"
+by(simp add:field_simps)
+
+lemma divide_pos_neg:
+ "0 < (x::'a::linordered_field) ==> y < 0 ==> x / y < 0"
+by(simp add:field_simps)
+
+lemma divide_nonneg_neg:
+ "0 <= (x::'a::linordered_field) ==> y < 0 ==> x / y <= 0"
+by(simp add:field_simps)
+
+lemma divide_neg_neg:
+ "(x::'a::linordered_field) < 0 ==> y < 0 ==> 0 < x / y"
+by(simp add:field_simps)
+
+lemma divide_nonpos_neg:
+ "(x::'a::linordered_field) <= 0 ==> y < 0 ==> 0 <= x / y"
+by(simp add:field_simps)
+
+
+subsection{*Cancellation Laws for Division*}
+
+lemma divide_cancel_right [simp,noatp]:
+ "(a/c = b/c) = (c = 0 | a = (b::'a::{field,division_by_zero}))"
+apply (cases "c=0", simp)
+apply (simp add: divide_inverse)
+done
+
+lemma divide_cancel_left [simp,noatp]:
+ "(c/a = c/b) = (c = 0 | a = (b::'a::{field,division_by_zero}))"
+apply (cases "c=0", simp)
+apply (simp add: divide_inverse)
+done
+
+
+subsection {* Division and the Number One *}
+
+text{*Simplify expressions equated with 1*}
+lemma divide_eq_1_iff [simp,noatp]:
+ "(a/b = 1) = (b \<noteq> 0 & a = (b::'a::{field,division_by_zero}))"
+apply (cases "b=0", simp)
+apply (simp add: right_inverse_eq)
+done
+
+lemma one_eq_divide_iff [simp,noatp]:
+ "(1 = a/b) = (b \<noteq> 0 & a = (b::'a::{field,division_by_zero}))"
+by (simp add: eq_commute [of 1])
+
+lemma zero_eq_1_divide_iff [simp,noatp]:
+ "((0::'a::{linordered_field,division_by_zero}) = 1/a) = (a = 0)"
+apply (cases "a=0", simp)
+apply (auto simp add: nonzero_eq_divide_eq)
+done
+
+lemma one_divide_eq_0_iff [simp,noatp]:
+ "(1/a = (0::'a::{linordered_field,division_by_zero})) = (a = 0)"
+apply (cases "a=0", simp)
+apply (insert zero_neq_one [THEN not_sym])
+apply (auto simp add: nonzero_divide_eq_eq)
+done
+
+text{*Simplify expressions such as @{text "0 < 1/x"} to @{text "0 < x"}*}
+lemmas zero_less_divide_1_iff = zero_less_divide_iff [of 1, simplified]
+lemmas divide_less_0_1_iff = divide_less_0_iff [of 1, simplified]
+lemmas zero_le_divide_1_iff = zero_le_divide_iff [of 1, simplified]
+lemmas divide_le_0_1_iff = divide_le_0_iff [of 1, simplified]
+
+declare zero_less_divide_1_iff [simp,noatp]
+declare divide_less_0_1_iff [simp,noatp]
+declare zero_le_divide_1_iff [simp,noatp]
+declare divide_le_0_1_iff [simp,noatp]
+
+
+subsection {* Ordering Rules for Division *}
+
+lemma divide_strict_right_mono:
+ "[|a < b; 0 < c|] ==> a / c < b / (c::'a::linordered_field)"
+by (simp add: order_less_imp_not_eq2 divide_inverse mult_strict_right_mono
+ positive_imp_inverse_positive)
+
+lemma divide_right_mono:
+ "[|a \<le> b; 0 \<le> c|] ==> a/c \<le> b/(c::'a::{linordered_field,division_by_zero})"
+by (force simp add: divide_strict_right_mono order_le_less)
+
+lemma divide_right_mono_neg: "(a::'a::{division_by_zero,linordered_field}) <= b
+ ==> c <= 0 ==> b / c <= a / c"
+apply (drule divide_right_mono [of _ _ "- c"])
+apply auto
+done
+
+lemma divide_strict_right_mono_neg:
+ "[|b < a; c < 0|] ==> a / c < b / (c::'a::linordered_field)"
+apply (drule divide_strict_right_mono [of _ _ "-c"], simp)
+apply (simp add: order_less_imp_not_eq nonzero_minus_divide_right [symmetric])
+done
+
+text{*The last premise ensures that @{term a} and @{term b}
+ have the same sign*}
+lemma divide_strict_left_mono:
+ "[|b < a; 0 < c; 0 < a*b|] ==> c / a < c / (b::'a::linordered_field)"
+by(auto simp: field_simps times_divide_eq zero_less_mult_iff mult_strict_right_mono)
+
+lemma divide_left_mono:
+ "[|b \<le> a; 0 \<le> c; 0 < a*b|] ==> c / a \<le> c / (b::'a::linordered_field)"
+by(auto simp: field_simps times_divide_eq zero_less_mult_iff mult_right_mono)
+
+lemma divide_left_mono_neg: "(a::'a::{division_by_zero,linordered_field}) <= b
+ ==> c <= 0 ==> 0 < a * b ==> c / a <= c / b"
+ apply (drule divide_left_mono [of _ _ "- c"])
+ apply (auto simp add: mult_commute)
+done
+
+lemma divide_strict_left_mono_neg:
+ "[|a < b; c < 0; 0 < a*b|] ==> c / a < c / (b::'a::linordered_field)"
+by(auto simp: field_simps times_divide_eq zero_less_mult_iff mult_strict_right_mono_neg)
+
+
+text{*Simplify quotients that are compared with the value 1.*}
+
+lemma le_divide_eq_1 [noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "(1 \<le> b / a) = ((0 < a & a \<le> b) | (a < 0 & b \<le> a))"
+by (auto simp add: le_divide_eq)
+
+lemma divide_le_eq_1 [noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "(b / a \<le> 1) = ((0 < a & b \<le> a) | (a < 0 & a \<le> b) | a=0)"
+by (auto simp add: divide_le_eq)
+
+lemma less_divide_eq_1 [noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "(1 < b / a) = ((0 < a & a < b) | (a < 0 & b < a))"
+by (auto simp add: less_divide_eq)
+
+lemma divide_less_eq_1 [noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "(b / a < 1) = ((0 < a & b < a) | (a < 0 & a < b) | a=0)"
+by (auto simp add: divide_less_eq)
+
+
+subsection{*Conditional Simplification Rules: No Case Splits*}
+
+lemma le_divide_eq_1_pos [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "0 < a \<Longrightarrow> (1 \<le> b/a) = (a \<le> b)"
+by (auto simp add: le_divide_eq)
+
+lemma le_divide_eq_1_neg [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "a < 0 \<Longrightarrow> (1 \<le> b/a) = (b \<le> a)"
+by (auto simp add: le_divide_eq)
+
+lemma divide_le_eq_1_pos [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "0 < a \<Longrightarrow> (b/a \<le> 1) = (b \<le> a)"
+by (auto simp add: divide_le_eq)
+
+lemma divide_le_eq_1_neg [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "a < 0 \<Longrightarrow> (b/a \<le> 1) = (a \<le> b)"
+by (auto simp add: divide_le_eq)
+
+lemma less_divide_eq_1_pos [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "0 < a \<Longrightarrow> (1 < b/a) = (a < b)"
+by (auto simp add: less_divide_eq)
+
+lemma less_divide_eq_1_neg [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "a < 0 \<Longrightarrow> (1 < b/a) = (b < a)"
+by (auto simp add: less_divide_eq)
+
+lemma divide_less_eq_1_pos [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "0 < a \<Longrightarrow> (b/a < 1) = (b < a)"
+by (auto simp add: divide_less_eq)
+
+lemma divide_less_eq_1_neg [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "a < 0 \<Longrightarrow> b/a < 1 <-> a < b"
+by (auto simp add: divide_less_eq)
+
+lemma eq_divide_eq_1 [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "(1 = b/a) = ((a \<noteq> 0 & a = b))"
+by (auto simp add: eq_divide_eq)
+
+lemma divide_eq_eq_1 [simp,noatp]:
+ fixes a :: "'a :: {linordered_field,division_by_zero}"
+ shows "(b/a = 1) = ((a \<noteq> 0 & a = b))"
+by (auto simp add: divide_eq_eq)
+
+
+subsection {* Reasoning about inequalities with division *}
+
+lemma mult_imp_div_pos_le: "0 < (y::'a::linordered_field) ==> x <= z * y ==>
+ x / y <= z"
+by (subst pos_divide_le_eq, assumption+)
+
+lemma mult_imp_le_div_pos: "0 < (y::'a::linordered_field) ==> z * y <= x ==>
+ z <= x / y"
+by(simp add:field_simps)
+
+lemma mult_imp_div_pos_less: "0 < (y::'a::linordered_field) ==> x < z * y ==>
+ x / y < z"
+by(simp add:field_simps)
+
+lemma mult_imp_less_div_pos: "0 < (y::'a::linordered_field) ==> z * y < x ==>
+ z < x / y"
+by(simp add:field_simps)
+
+lemma frac_le: "(0::'a::linordered_field) <= x ==>
+ x <= y ==> 0 < w ==> w <= z ==> x / z <= y / w"
+ apply (rule mult_imp_div_pos_le)
+ apply simp
+ apply (subst times_divide_eq_left)
+ apply (rule mult_imp_le_div_pos, assumption)
+ apply (rule mult_mono)
+ apply simp_all
+done
+
+lemma frac_less: "(0::'a::linordered_field) <= x ==>
+ x < y ==> 0 < w ==> w <= z ==> x / z < y / w"
+ apply (rule mult_imp_div_pos_less)
+ apply simp
+ apply (subst times_divide_eq_left)
+ apply (rule mult_imp_less_div_pos, assumption)
+ apply (erule mult_less_le_imp_less)
+ apply simp_all
+done
+
+lemma frac_less2: "(0::'a::linordered_field) < x ==>
+ x <= y ==> 0 < w ==> w < z ==> x / z < y / w"
+ apply (rule mult_imp_div_pos_less)
+ apply simp_all
+ apply (subst times_divide_eq_left)
+ apply (rule mult_imp_less_div_pos, assumption)
+ apply (erule mult_le_less_imp_less)
+ apply simp_all
+done
+
+text{*It's not obvious whether these should be simprules or not.
+ Their effect is to gather terms into one big fraction, like
+ a*b*c / x*y*z. The rationale for that is unclear, but many proofs
+ seem to need them.*}
+
+declare times_divide_eq [simp]
+
+
+subsection {* Ordered Fields are Dense *}
+
+lemma less_half_sum: "a < b ==> a < (a+b) / (1+1::'a::linordered_field)"
+by (simp add: field_simps zero_less_two)
+
+lemma gt_half_sum: "a < b ==> (a+b)/(1+1::'a::linordered_field) < b"
+by (simp add: field_simps zero_less_two)
+
+instance linordered_field < dense_linorder
+proof
+ fix x y :: 'a
+ have "x < x + 1" by simp
+ then show "\<exists>y. x < y" ..
+ have "x - 1 < x" by simp
+ then show "\<exists>y. y < x" ..
+ show "x < y \<Longrightarrow> \<exists>z>x. z < y" by (blast intro!: less_half_sum gt_half_sum)
+qed
+
+
+subsection {* Absolute Value *}
+
+lemma nonzero_abs_inverse:
+ "a \<noteq> 0 ==> abs (inverse (a::'a::linordered_field)) = inverse (abs a)"
+apply (auto simp add: linorder_neq_iff abs_if nonzero_inverse_minus_eq
+ negative_imp_inverse_negative)
+apply (blast intro: positive_imp_inverse_positive elim: order_less_asym)
+done
+
+lemma abs_inverse [simp]:
+ "abs (inverse (a::'a::{linordered_field,division_by_zero})) =
+ inverse (abs a)"
+apply (cases "a=0", simp)
+apply (simp add: nonzero_abs_inverse)
+done
+
+lemma nonzero_abs_divide:
+ "b \<noteq> 0 ==> abs (a / (b::'a::linordered_field)) = abs a / abs b"
+by (simp add: divide_inverse abs_mult nonzero_abs_inverse)
+
+lemma abs_divide [simp]:
+ "abs (a / (b::'a::{linordered_field,division_by_zero})) = abs a / abs b"
+apply (cases "b=0", simp)
+apply (simp add: nonzero_abs_divide)
+done
+
+lemma abs_div_pos: "(0::'a::{division_by_zero,linordered_field}) < y ==>
+ abs x / y = abs (x / y)"
+ apply (subst abs_divide)
+ apply (simp add: order_less_imp_le)
+done
+
+code_modulename SML
+ Fields Arith
+
+code_modulename OCaml
+ Fields Arith
+
+code_modulename Haskell
+ Fields Arith
+
+end
--- a/src/HOL/Finite_Set.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Finite_Set.thy Tue Feb 09 17:06:05 2010 +0100
@@ -830,7 +830,7 @@
end
-context lower_semilattice
+context semilattice_inf
begin
lemma ab_semigroup_idem_mult_inf: "ab_semigroup_idem_mult inf"
@@ -857,20 +857,20 @@
end
-context upper_semilattice
+context semilattice_sup
begin
lemma ab_semigroup_idem_mult_sup: "ab_semigroup_idem_mult sup"
-by (rule lower_semilattice.ab_semigroup_idem_mult_inf)(rule dual_semilattice)
+by (rule semilattice_inf.ab_semigroup_idem_mult_inf)(rule dual_semilattice)
lemma fold_sup_insert[simp]: "finite A \<Longrightarrow> fold sup b (insert a A) = sup a (fold sup b A)"
-by(rule lower_semilattice.fold_inf_insert)(rule dual_semilattice)
+by(rule semilattice_inf.fold_inf_insert)(rule dual_semilattice)
lemma fold_sup_le_sup: "finite A \<Longrightarrow> ALL a:A. a \<le> b \<Longrightarrow> fold sup c A \<le> sup b c"
-by(rule lower_semilattice.inf_le_fold_inf)(rule dual_semilattice)
+by(rule semilattice_inf.inf_le_fold_inf)(rule dual_semilattice)
lemma sup_le_fold_sup: "finite A \<Longrightarrow> a \<in> A \<Longrightarrow> sup a b \<le> fold sup b A"
-by(rule lower_semilattice.fold_inf_le_inf)(rule dual_semilattice)
+by(rule semilattice_inf.fold_inf_le_inf)(rule dual_semilattice)
end
@@ -1486,7 +1486,7 @@
qed
lemma setsum_mono:
- assumes le: "\<And>i. i\<in>K \<Longrightarrow> f (i::'a) \<le> ((g i)::('b::{comm_monoid_add, pordered_ab_semigroup_add}))"
+ assumes le: "\<And>i. i\<in>K \<Longrightarrow> f (i::'a) \<le> ((g i)::('b::{comm_monoid_add, ordered_ab_semigroup_add}))"
shows "(\<Sum>i\<in>K. f i) \<le> (\<Sum>i\<in>K. g i)"
proof (cases "finite K")
case True
@@ -1505,7 +1505,7 @@
qed
lemma setsum_strict_mono:
- fixes f :: "'a \<Rightarrow> 'b::{pordered_cancel_ab_semigroup_add,comm_monoid_add}"
+ fixes f :: "'a \<Rightarrow> 'b::{ordered_cancel_ab_semigroup_add,comm_monoid_add}"
assumes "finite A" "A \<noteq> {}"
and "!!x. x:A \<Longrightarrow> f x < g x"
shows "setsum f A < setsum g A"
@@ -1534,7 +1534,7 @@
qed
lemma setsum_nonneg:
- assumes nn: "\<forall>x\<in>A. (0::'a::{pordered_ab_semigroup_add,comm_monoid_add}) \<le> f x"
+ assumes nn: "\<forall>x\<in>A. (0::'a::{ordered_ab_semigroup_add,comm_monoid_add}) \<le> f x"
shows "0 \<le> setsum f A"
proof (cases "finite A")
case True thus ?thesis using nn
@@ -1550,7 +1550,7 @@
qed
lemma setsum_nonpos:
- assumes np: "\<forall>x\<in>A. f x \<le> (0::'a::{pordered_ab_semigroup_add,comm_monoid_add})"
+ assumes np: "\<forall>x\<in>A. f x \<le> (0::'a::{ordered_ab_semigroup_add,comm_monoid_add})"
shows "setsum f A \<le> 0"
proof (cases "finite A")
case True thus ?thesis using np
@@ -1566,7 +1566,7 @@
qed
lemma setsum_mono2:
-fixes f :: "'a \<Rightarrow> 'b :: {pordered_ab_semigroup_add_imp_le,comm_monoid_add}"
+fixes f :: "'a \<Rightarrow> 'b :: {ordered_ab_semigroup_add_imp_le,comm_monoid_add}"
assumes fin: "finite B" and sub: "A \<subseteq> B" and nn: "\<And>b. b \<in> B-A \<Longrightarrow> 0 \<le> f b"
shows "setsum f A \<le> setsum f B"
proof -
@@ -1580,7 +1580,7 @@
lemma setsum_mono3: "finite B ==> A <= B ==>
ALL x: B - A.
- 0 <= ((f x)::'a::{comm_monoid_add,pordered_ab_semigroup_add}) ==>
+ 0 <= ((f x)::'a::{comm_monoid_add,ordered_ab_semigroup_add}) ==>
setsum f A <= setsum f B"
apply (subgoal_tac "setsum f B = setsum f A + setsum f (B - A)")
apply (erule ssubst)
@@ -1640,7 +1640,7 @@
qed
lemma setsum_abs[iff]:
- fixes f :: "'a => ('b::pordered_ab_group_add_abs)"
+ fixes f :: "'a => ('b::ordered_ab_group_add_abs)"
shows "abs (setsum f A) \<le> setsum (%i. abs(f i)) A"
proof (cases "finite A")
case True
@@ -1656,7 +1656,7 @@
qed
lemma setsum_abs_ge_zero[iff]:
- fixes f :: "'a => ('b::pordered_ab_group_add_abs)"
+ fixes f :: "'a => ('b::ordered_ab_group_add_abs)"
shows "0 \<le> setsum (%i. abs(f i)) A"
proof (cases "finite A")
case True
@@ -1671,7 +1671,7 @@
qed
lemma abs_setsum_abs[simp]:
- fixes f :: "'a => ('b::pordered_ab_group_add_abs)"
+ fixes f :: "'a => ('b::ordered_ab_group_add_abs)"
shows "abs (\<Sum>a\<in>A. abs(f a)) = (\<Sum>a\<in>A. abs(f a))"
proof (cases "finite A")
case True
@@ -1946,10 +1946,10 @@
done
lemma setprod_nonneg [rule_format]:
- "(ALL x: A. (0::'a::ordered_semidom) \<le> f x) --> 0 \<le> setprod f A"
+ "(ALL x: A. (0::'a::linordered_semidom) \<le> f x) --> 0 \<le> setprod f A"
by (cases "finite A", induct set: finite, simp_all add: mult_nonneg_nonneg)
-lemma setprod_pos [rule_format]: "(ALL x: A. (0::'a::ordered_semidom) < f x)
+lemma setprod_pos [rule_format]: "(ALL x: A. (0::'a::linordered_semidom) < f x)
--> 0 < setprod f A"
by (cases "finite A", induct set: finite, simp_all add: mult_pos_pos)
@@ -2289,7 +2289,7 @@
lemma setsum_bounded:
- assumes le: "\<And>i. i\<in>A \<Longrightarrow> f i \<le> (K::'a::{semiring_1, pordered_ab_semigroup_add})"
+ assumes le: "\<And>i. i\<in>A \<Longrightarrow> f i \<le> (K::'a::{semiring_1, ordered_ab_semigroup_add})"
shows "setsum f A \<le> of_nat(card A) * K"
proof (cases "finite A")
case True
@@ -2791,7 +2791,7 @@
over (non-empty) sets by means of @{text fold1}.
*}
-context lower_semilattice
+context semilattice_inf
begin
lemma below_fold1_iff:
@@ -2859,7 +2859,7 @@
apply(erule exE)
apply(rule order_trans)
apply(erule (1) fold1_belowI)
-apply(erule (1) lower_semilattice.fold1_belowI [OF dual_semilattice])
+apply(erule (1) semilattice_inf.fold1_belowI [OF dual_semilattice])
done
lemma sup_Inf_absorb [simp]:
@@ -2871,7 +2871,7 @@
lemma inf_Sup_absorb [simp]:
"finite A \<Longrightarrow> a \<in> A \<Longrightarrow> inf a (\<Squnion>\<^bsub>fin\<^esub>A) = a"
by (simp add: Sup_fin_def inf_absorb1
- lower_semilattice.fold1_belowI [OF dual_semilattice])
+ semilattice_inf.fold1_belowI [OF dual_semilattice])
end
@@ -2991,7 +2991,7 @@
proof qed (auto simp add: max_def)
lemma max_lattice:
- "lower_semilattice (op \<ge>) (op >) max"
+ "semilattice_inf (op \<ge>) (op >) max"
by (fact min_max.dual_semilattice)
lemma dual_max:
@@ -3158,7 +3158,7 @@
assumes "finite A" and "x \<in> A"
shows "x \<le> Max A"
proof -
- interpret lower_semilattice "op \<ge>" "op >" max
+ interpret semilattice_inf "op \<ge>" "op >" max
by (rule max_lattice)
from assms show ?thesis by (simp add: Max_def fold1_belowI)
qed
@@ -3172,7 +3172,7 @@
assumes "finite A" and "A \<noteq> {}"
shows "Max A \<le> x \<longleftrightarrow> (\<forall>a\<in>A. a \<le> x)"
proof -
- interpret lower_semilattice "op \<ge>" "op >" max
+ interpret semilattice_inf "op \<ge>" "op >" max
by (rule max_lattice)
from assms show ?thesis by (simp add: Max_def below_fold1_iff)
qed
@@ -3293,7 +3293,7 @@
end
-context ordered_ab_semigroup_add
+context linordered_ab_semigroup_add
begin
lemma add_Min_commute:
@@ -3324,6 +3324,19 @@
end
+context linordered_ab_group_add
+begin
+
+lemma minus_Max_eq_Min [simp]:
+ "finite S \<Longrightarrow> S \<noteq> {} \<Longrightarrow> - (Max S) = Min (uminus ` S)"
+ by (induct S rule: finite_ne_induct) (simp_all add: minus_max_eq_min)
+
+lemma minus_Min_eq_Max [simp]:
+ "finite S \<Longrightarrow> S \<noteq> {} \<Longrightarrow> - (Min S) = Max (uminus ` S)"
+ by (induct S rule: finite_ne_induct) (simp_all add: minus_min_eq_max)
+
+end
+
subsection {* Expressing set operations via @{const fold} *}
@@ -3347,12 +3360,12 @@
proof
qed auto
-lemma (in lower_semilattice) fun_left_comm_idem_inf:
+lemma (in semilattice_inf) fun_left_comm_idem_inf:
"fun_left_comm_idem inf"
proof
qed (auto simp add: inf_left_commute)
-lemma (in upper_semilattice) fun_left_comm_idem_sup:
+lemma (in semilattice_sup) fun_left_comm_idem_sup:
"fun_left_comm_idem sup"
proof
qed (auto simp add: sup_left_commute)
--- a/src/HOL/GCD.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/GCD.thy Tue Feb 09 17:06:05 2010 +0100
@@ -1445,12 +1445,12 @@
subsubsection {* The complete divisibility lattice *}
-interpretation gcd_semilattice_nat: lower_semilattice "op dvd" "(%m n::nat. m dvd n & ~ n dvd m)" gcd
+interpretation gcd_semilattice_nat: semilattice_inf "op dvd" "(%m n::nat. m dvd n & ~ n dvd m)" gcd
proof
case goal3 thus ?case by(metis gcd_unique_nat)
qed auto
-interpretation lcm_semilattice_nat: upper_semilattice "op dvd" "(%m n::nat. m dvd n & ~ n dvd m)" lcm
+interpretation lcm_semilattice_nat: semilattice_sup "op dvd" "(%m n::nat. m dvd n & ~ n dvd m)" lcm
proof
case goal3 thus ?case by(metis lcm_unique_nat)
qed auto
--- a/src/HOL/Groebner_Basis.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Groebner_Basis.thy Tue Feb 09 17:06:05 2010 +0100
@@ -621,7 +621,7 @@
val ths = [@{thm "mult_numeral_1"}, @{thm "mult_numeral_1_right"},
@{thm "divide_Numeral1"},
- @{thm "Ring_and_Field.divide_zero"}, @{thm "divide_Numeral0"},
+ @{thm "Fields.divide_zero"}, @{thm "divide_Numeral0"},
@{thm "divide_divide_eq_left"}, @{thm "mult_frac_frac"},
@{thm "mult_num_frac"}, @{thm "mult_frac_num"},
@{thm "mult_frac_frac"}, @{thm "times_divide_eq_right"},
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Groups.thy Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,1159 @@
+(* Title: HOL/Groups.thy
+ Author: Gertrud Bauer, Steven Obua, Lawrence C Paulson, Markus Wenzel, Jeremy Avigad
+*)
+
+header {* Groups, also combined with orderings *}
+
+theory Groups
+imports Lattices
+uses "~~/src/Provers/Arith/abel_cancel.ML"
+begin
+
+text {*
+ The theory of partially ordered groups is taken from the books:
+ \begin{itemize}
+ \item \emph{Lattice Theory} by Garret Birkhoff, American Mathematical Society 1979
+ \item \emph{Partially Ordered Algebraic Systems}, Pergamon Press 1963
+ \end{itemize}
+ Most of the used notions can also be looked up in
+ \begin{itemize}
+ \item \url{http://www.mathworld.com} by Eric Weisstein et. al.
+ \item \emph{Algebra I} by van der Waerden, Springer.
+ \end{itemize}
+*}
+
+ML {*
+structure Algebra_Simps = Named_Thms(
+ val name = "algebra_simps"
+ val description = "algebra simplification rules"
+)
+*}
+
+setup Algebra_Simps.setup
+
+text{* The rewrites accumulated in @{text algebra_simps} deal with the
+classical algebraic structures of groups, rings and family. They simplify
+terms by multiplying everything out (in case of a ring) and bringing sums and
+products into a canonical form (by ordered rewriting). As a result it decides
+group and ring equalities but also helps with inequalities.
+
+Of course it also works for fields, but it knows nothing about multiplicative
+inverses or division. This is catered for by @{text field_simps}. *}
+
+subsection {* Semigroups and Monoids *}
+
+class semigroup_add = plus +
+ assumes add_assoc [algebra_simps]: "(a + b) + c = a + (b + c)"
+
+sublocale semigroup_add < plus!: semigroup plus proof
+qed (fact add_assoc)
+
+class ab_semigroup_add = semigroup_add +
+ assumes add_commute [algebra_simps]: "a + b = b + a"
+
+sublocale ab_semigroup_add < plus!: abel_semigroup plus proof
+qed (fact add_commute)
+
+context ab_semigroup_add
+begin
+
+lemmas add_left_commute [algebra_simps] = plus.left_commute
+
+theorems add_ac = add_assoc add_commute add_left_commute
+
+end
+
+theorems add_ac = add_assoc add_commute add_left_commute
+
+class semigroup_mult = times +
+ assumes mult_assoc [algebra_simps]: "(a * b) * c = a * (b * c)"
+
+sublocale semigroup_mult < times!: semigroup times proof
+qed (fact mult_assoc)
+
+class ab_semigroup_mult = semigroup_mult +
+ assumes mult_commute [algebra_simps]: "a * b = b * a"
+
+sublocale ab_semigroup_mult < times!: abel_semigroup times proof
+qed (fact mult_commute)
+
+context ab_semigroup_mult
+begin
+
+lemmas mult_left_commute [algebra_simps] = times.left_commute
+
+theorems mult_ac = mult_assoc mult_commute mult_left_commute
+
+end
+
+theorems mult_ac = mult_assoc mult_commute mult_left_commute
+
+class ab_semigroup_idem_mult = ab_semigroup_mult +
+ assumes mult_idem: "x * x = x"
+
+sublocale ab_semigroup_idem_mult < times!: semilattice times proof
+qed (fact mult_idem)
+
+context ab_semigroup_idem_mult
+begin
+
+lemmas mult_left_idem = times.left_idem
+
+end
+
+class monoid_add = zero + semigroup_add +
+ assumes add_0_left [simp]: "0 + a = a"
+ and add_0_right [simp]: "a + 0 = a"
+
+lemma zero_reorient: "0 = x \<longleftrightarrow> x = 0"
+by (rule eq_commute)
+
+class comm_monoid_add = zero + ab_semigroup_add +
+ assumes add_0: "0 + a = a"
+begin
+
+subclass monoid_add
+ proof qed (insert add_0, simp_all add: add_commute)
+
+end
+
+class monoid_mult = one + semigroup_mult +
+ assumes mult_1_left [simp]: "1 * a = a"
+ assumes mult_1_right [simp]: "a * 1 = a"
+
+lemma one_reorient: "1 = x \<longleftrightarrow> x = 1"
+by (rule eq_commute)
+
+class comm_monoid_mult = one + ab_semigroup_mult +
+ assumes mult_1: "1 * a = a"
+begin
+
+subclass monoid_mult
+ proof qed (insert mult_1, simp_all add: mult_commute)
+
+end
+
+class cancel_semigroup_add = semigroup_add +
+ assumes add_left_imp_eq: "a + b = a + c \<Longrightarrow> b = c"
+ assumes add_right_imp_eq: "b + a = c + a \<Longrightarrow> b = c"
+begin
+
+lemma add_left_cancel [simp]:
+ "a + b = a + c \<longleftrightarrow> b = c"
+by (blast dest: add_left_imp_eq)
+
+lemma add_right_cancel [simp]:
+ "b + a = c + a \<longleftrightarrow> b = c"
+by (blast dest: add_right_imp_eq)
+
+end
+
+class cancel_ab_semigroup_add = ab_semigroup_add +
+ assumes add_imp_eq: "a + b = a + c \<Longrightarrow> b = c"
+begin
+
+subclass cancel_semigroup_add
+proof
+ fix a b c :: 'a
+ assume "a + b = a + c"
+ then show "b = c" by (rule add_imp_eq)
+next
+ fix a b c :: 'a
+ assume "b + a = c + a"
+ then have "a + b = a + c" by (simp only: add_commute)
+ then show "b = c" by (rule add_imp_eq)
+qed
+
+end
+
+class cancel_comm_monoid_add = cancel_ab_semigroup_add + comm_monoid_add
+
+
+subsection {* Groups *}
+
+class group_add = minus + uminus + monoid_add +
+ assumes left_minus [simp]: "- a + a = 0"
+ assumes diff_minus: "a - b = a + (- b)"
+begin
+
+lemma minus_unique:
+ assumes "a + b = 0" shows "- a = b"
+proof -
+ have "- a = - a + (a + b)" using assms by simp
+ also have "\<dots> = b" by (simp add: add_assoc [symmetric])
+ finally show ?thesis .
+qed
+
+lemmas equals_zero_I = minus_unique (* legacy name *)
+
+lemma minus_zero [simp]: "- 0 = 0"
+proof -
+ have "0 + 0 = 0" by (rule add_0_right)
+ thus "- 0 = 0" by (rule minus_unique)
+qed
+
+lemma minus_minus [simp]: "- (- a) = a"
+proof -
+ have "- a + a = 0" by (rule left_minus)
+ thus "- (- a) = a" by (rule minus_unique)
+qed
+
+lemma right_minus [simp]: "a + - a = 0"
+proof -
+ have "a + - a = - (- a) + - a" by simp
+ also have "\<dots> = 0" by (rule left_minus)
+ finally show ?thesis .
+qed
+
+lemma minus_add_cancel: "- a + (a + b) = b"
+by (simp add: add_assoc [symmetric])
+
+lemma add_minus_cancel: "a + (- a + b) = b"
+by (simp add: add_assoc [symmetric])
+
+lemma minus_add: "- (a + b) = - b + - a"
+proof -
+ have "(a + b) + (- b + - a) = 0"
+ by (simp add: add_assoc add_minus_cancel)
+ thus "- (a + b) = - b + - a"
+ by (rule minus_unique)
+qed
+
+lemma right_minus_eq: "a - b = 0 \<longleftrightarrow> a = b"
+proof
+ assume "a - b = 0"
+ have "a = (a - b) + b" by (simp add:diff_minus add_assoc)
+ also have "\<dots> = b" using `a - b = 0` by simp
+ finally show "a = b" .
+next
+ assume "a = b" thus "a - b = 0" by (simp add: diff_minus)
+qed
+
+lemma diff_self [simp]: "a - a = 0"
+by (simp add: diff_minus)
+
+lemma diff_0 [simp]: "0 - a = - a"
+by (simp add: diff_minus)
+
+lemma diff_0_right [simp]: "a - 0 = a"
+by (simp add: diff_minus)
+
+lemma diff_minus_eq_add [simp]: "a - - b = a + b"
+by (simp add: diff_minus)
+
+lemma neg_equal_iff_equal [simp]:
+ "- a = - b \<longleftrightarrow> a = b"
+proof
+ assume "- a = - b"
+ hence "- (- a) = - (- b)" by simp
+ thus "a = b" by simp
+next
+ assume "a = b"
+ thus "- a = - b" by simp
+qed
+
+lemma neg_equal_0_iff_equal [simp]:
+ "- a = 0 \<longleftrightarrow> a = 0"
+by (subst neg_equal_iff_equal [symmetric], simp)
+
+lemma neg_0_equal_iff_equal [simp]:
+ "0 = - a \<longleftrightarrow> 0 = a"
+by (subst neg_equal_iff_equal [symmetric], simp)
+
+text{*The next two equations can make the simplifier loop!*}
+
+lemma equation_minus_iff:
+ "a = - b \<longleftrightarrow> b = - a"
+proof -
+ have "- (- a) = - b \<longleftrightarrow> - a = b" by (rule neg_equal_iff_equal)
+ thus ?thesis by (simp add: eq_commute)
+qed
+
+lemma minus_equation_iff:
+ "- a = b \<longleftrightarrow> - b = a"
+proof -
+ have "- a = - (- b) \<longleftrightarrow> a = -b" by (rule neg_equal_iff_equal)
+ thus ?thesis by (simp add: eq_commute)
+qed
+
+lemma diff_add_cancel: "a - b + b = a"
+by (simp add: diff_minus add_assoc)
+
+lemma add_diff_cancel: "a + b - b = a"
+by (simp add: diff_minus add_assoc)
+
+declare diff_minus[symmetric, algebra_simps]
+
+lemma eq_neg_iff_add_eq_0: "a = - b \<longleftrightarrow> a + b = 0"
+proof
+ assume "a = - b" then show "a + b = 0" by simp
+next
+ assume "a + b = 0"
+ moreover have "a + (b + - b) = (a + b) + - b"
+ by (simp only: add_assoc)
+ ultimately show "a = - b" by simp
+qed
+
+end
+
+class ab_group_add = minus + uminus + comm_monoid_add +
+ assumes ab_left_minus: "- a + a = 0"
+ assumes ab_diff_minus: "a - b = a + (- b)"
+begin
+
+subclass group_add
+ proof qed (simp_all add: ab_left_minus ab_diff_minus)
+
+subclass cancel_comm_monoid_add
+proof
+ fix a b c :: 'a
+ assume "a + b = a + c"
+ then have "- a + a + b = - a + a + c"
+ unfolding add_assoc by simp
+ then show "b = c" by simp
+qed
+
+lemma uminus_add_conv_diff[algebra_simps]:
+ "- a + b = b - a"
+by (simp add:diff_minus add_commute)
+
+lemma minus_add_distrib [simp]:
+ "- (a + b) = - a + - b"
+by (rule minus_unique) (simp add: add_ac)
+
+lemma minus_diff_eq [simp]:
+ "- (a - b) = b - a"
+by (simp add: diff_minus add_commute)
+
+lemma add_diff_eq[algebra_simps]: "a + (b - c) = (a + b) - c"
+by (simp add: diff_minus add_ac)
+
+lemma diff_add_eq[algebra_simps]: "(a - b) + c = (a + c) - b"
+by (simp add: diff_minus add_ac)
+
+lemma diff_eq_eq[algebra_simps]: "a - b = c \<longleftrightarrow> a = c + b"
+by (auto simp add: diff_minus add_assoc)
+
+lemma eq_diff_eq[algebra_simps]: "a = c - b \<longleftrightarrow> a + b = c"
+by (auto simp add: diff_minus add_assoc)
+
+lemma diff_diff_eq[algebra_simps]: "(a - b) - c = a - (b + c)"
+by (simp add: diff_minus add_ac)
+
+lemma diff_diff_eq2[algebra_simps]: "a - (b - c) = (a + c) - b"
+by (simp add: diff_minus add_ac)
+
+lemma eq_iff_diff_eq_0: "a = b \<longleftrightarrow> a - b = 0"
+by (simp add: algebra_simps)
+
+lemma diff_eq_0_iff_eq [simp, noatp]: "a - b = 0 \<longleftrightarrow> a = b"
+by (simp add: algebra_simps)
+
+end
+
+subsection {* (Partially) Ordered Groups *}
+
+class ordered_ab_semigroup_add = order + ab_semigroup_add +
+ assumes add_left_mono: "a \<le> b \<Longrightarrow> c + a \<le> c + b"
+begin
+
+lemma add_right_mono:
+ "a \<le> b \<Longrightarrow> a + c \<le> b + c"
+by (simp add: add_commute [of _ c] add_left_mono)
+
+text {* non-strict, in both arguments *}
+lemma add_mono:
+ "a \<le> b \<Longrightarrow> c \<le> d \<Longrightarrow> a + c \<le> b + d"
+ apply (erule add_right_mono [THEN order_trans])
+ apply (simp add: add_commute add_left_mono)
+ done
+
+end
+
+class ordered_cancel_ab_semigroup_add =
+ ordered_ab_semigroup_add + cancel_ab_semigroup_add
+begin
+
+lemma add_strict_left_mono:
+ "a < b \<Longrightarrow> c + a < c + b"
+by (auto simp add: less_le add_left_mono)
+
+lemma add_strict_right_mono:
+ "a < b \<Longrightarrow> a + c < b + c"
+by (simp add: add_commute [of _ c] add_strict_left_mono)
+
+text{*Strict monotonicity in both arguments*}
+lemma add_strict_mono:
+ "a < b \<Longrightarrow> c < d \<Longrightarrow> a + c < b + d"
+apply (erule add_strict_right_mono [THEN less_trans])
+apply (erule add_strict_left_mono)
+done
+
+lemma add_less_le_mono:
+ "a < b \<Longrightarrow> c \<le> d \<Longrightarrow> a + c < b + d"
+apply (erule add_strict_right_mono [THEN less_le_trans])
+apply (erule add_left_mono)
+done
+
+lemma add_le_less_mono:
+ "a \<le> b \<Longrightarrow> c < d \<Longrightarrow> a + c < b + d"
+apply (erule add_right_mono [THEN le_less_trans])
+apply (erule add_strict_left_mono)
+done
+
+end
+
+class ordered_ab_semigroup_add_imp_le =
+ ordered_cancel_ab_semigroup_add +
+ assumes add_le_imp_le_left: "c + a \<le> c + b \<Longrightarrow> a \<le> b"
+begin
+
+lemma add_less_imp_less_left:
+ assumes less: "c + a < c + b" shows "a < b"
+proof -
+ from less have le: "c + a <= c + b" by (simp add: order_le_less)
+ have "a <= b"
+ apply (insert le)
+ apply (drule add_le_imp_le_left)
+ by (insert le, drule add_le_imp_le_left, assumption)
+ moreover have "a \<noteq> b"
+ proof (rule ccontr)
+ assume "~(a \<noteq> b)"
+ then have "a = b" by simp
+ then have "c + a = c + b" by simp
+ with less show "False"by simp
+ qed
+ ultimately show "a < b" by (simp add: order_le_less)
+qed
+
+lemma add_less_imp_less_right:
+ "a + c < b + c \<Longrightarrow> a < b"
+apply (rule add_less_imp_less_left [of c])
+apply (simp add: add_commute)
+done
+
+lemma add_less_cancel_left [simp]:
+ "c + a < c + b \<longleftrightarrow> a < b"
+by (blast intro: add_less_imp_less_left add_strict_left_mono)
+
+lemma add_less_cancel_right [simp]:
+ "a + c < b + c \<longleftrightarrow> a < b"
+by (blast intro: add_less_imp_less_right add_strict_right_mono)
+
+lemma add_le_cancel_left [simp]:
+ "c + a \<le> c + b \<longleftrightarrow> a \<le> b"
+by (auto, drule add_le_imp_le_left, simp_all add: add_left_mono)
+
+lemma add_le_cancel_right [simp]:
+ "a + c \<le> b + c \<longleftrightarrow> a \<le> b"
+by (simp add: add_commute [of a c] add_commute [of b c])
+
+lemma add_le_imp_le_right:
+ "a + c \<le> b + c \<Longrightarrow> a \<le> b"
+by simp
+
+lemma max_add_distrib_left:
+ "max x y + z = max (x + z) (y + z)"
+ unfolding max_def by auto
+
+lemma min_add_distrib_left:
+ "min x y + z = min (x + z) (y + z)"
+ unfolding min_def by auto
+
+end
+
+subsection {* Support for reasoning about signs *}
+
+class ordered_comm_monoid_add =
+ ordered_cancel_ab_semigroup_add + comm_monoid_add
+begin
+
+lemma add_pos_nonneg:
+ assumes "0 < a" and "0 \<le> b" shows "0 < a + b"
+proof -
+ have "0 + 0 < a + b"
+ using assms by (rule add_less_le_mono)
+ then show ?thesis by simp
+qed
+
+lemma add_pos_pos:
+ assumes "0 < a" and "0 < b" shows "0 < a + b"
+by (rule add_pos_nonneg) (insert assms, auto)
+
+lemma add_nonneg_pos:
+ assumes "0 \<le> a" and "0 < b" shows "0 < a + b"
+proof -
+ have "0 + 0 < a + b"
+ using assms by (rule add_le_less_mono)
+ then show ?thesis by simp
+qed
+
+lemma add_nonneg_nonneg:
+ assumes "0 \<le> a" and "0 \<le> b" shows "0 \<le> a + b"
+proof -
+ have "0 + 0 \<le> a + b"
+ using assms by (rule add_mono)
+ then show ?thesis by simp
+qed
+
+lemma add_neg_nonpos:
+ assumes "a < 0" and "b \<le> 0" shows "a + b < 0"
+proof -
+ have "a + b < 0 + 0"
+ using assms by (rule add_less_le_mono)
+ then show ?thesis by simp
+qed
+
+lemma add_neg_neg:
+ assumes "a < 0" and "b < 0" shows "a + b < 0"
+by (rule add_neg_nonpos) (insert assms, auto)
+
+lemma add_nonpos_neg:
+ assumes "a \<le> 0" and "b < 0" shows "a + b < 0"
+proof -
+ have "a + b < 0 + 0"
+ using assms by (rule add_le_less_mono)
+ then show ?thesis by simp
+qed
+
+lemma add_nonpos_nonpos:
+ assumes "a \<le> 0" and "b \<le> 0" shows "a + b \<le> 0"
+proof -
+ have "a + b \<le> 0 + 0"
+ using assms by (rule add_mono)
+ then show ?thesis by simp
+qed
+
+lemmas add_sign_intros =
+ add_pos_nonneg add_pos_pos add_nonneg_pos add_nonneg_nonneg
+ add_neg_nonpos add_neg_neg add_nonpos_neg add_nonpos_nonpos
+
+lemma add_nonneg_eq_0_iff:
+ assumes x: "0 \<le> x" and y: "0 \<le> y"
+ shows "x + y = 0 \<longleftrightarrow> x = 0 \<and> y = 0"
+proof (intro iffI conjI)
+ have "x = x + 0" by simp
+ also have "x + 0 \<le> x + y" using y by (rule add_left_mono)
+ also assume "x + y = 0"
+ also have "0 \<le> x" using x .
+ finally show "x = 0" .
+next
+ have "y = 0 + y" by simp
+ also have "0 + y \<le> x + y" using x by (rule add_right_mono)
+ also assume "x + y = 0"
+ also have "0 \<le> y" using y .
+ finally show "y = 0" .
+next
+ assume "x = 0 \<and> y = 0"
+ then show "x + y = 0" by simp
+qed
+
+end
+
+class ordered_ab_group_add =
+ ab_group_add + ordered_ab_semigroup_add
+begin
+
+subclass ordered_cancel_ab_semigroup_add ..
+
+subclass ordered_ab_semigroup_add_imp_le
+proof
+ fix a b c :: 'a
+ assume "c + a \<le> c + b"
+ hence "(-c) + (c + a) \<le> (-c) + (c + b)" by (rule add_left_mono)
+ hence "((-c) + c) + a \<le> ((-c) + c) + b" by (simp only: add_assoc)
+ thus "a \<le> b" by simp
+qed
+
+subclass ordered_comm_monoid_add ..
+
+lemma max_diff_distrib_left:
+ shows "max x y - z = max (x - z) (y - z)"
+by (simp add: diff_minus, rule max_add_distrib_left)
+
+lemma min_diff_distrib_left:
+ shows "min x y - z = min (x - z) (y - z)"
+by (simp add: diff_minus, rule min_add_distrib_left)
+
+lemma le_imp_neg_le:
+ assumes "a \<le> b" shows "-b \<le> -a"
+proof -
+ have "-a+a \<le> -a+b" using `a \<le> b` by (rule add_left_mono)
+ hence "0 \<le> -a+b" by simp
+ hence "0 + (-b) \<le> (-a + b) + (-b)" by (rule add_right_mono)
+ thus ?thesis by (simp add: add_assoc)
+qed
+
+lemma neg_le_iff_le [simp]: "- b \<le> - a \<longleftrightarrow> a \<le> b"
+proof
+ assume "- b \<le> - a"
+ hence "- (- a) \<le> - (- b)" by (rule le_imp_neg_le)
+ thus "a\<le>b" by simp
+next
+ assume "a\<le>b"
+ thus "-b \<le> -a" by (rule le_imp_neg_le)
+qed
+
+lemma neg_le_0_iff_le [simp]: "- a \<le> 0 \<longleftrightarrow> 0 \<le> a"
+by (subst neg_le_iff_le [symmetric], simp)
+
+lemma neg_0_le_iff_le [simp]: "0 \<le> - a \<longleftrightarrow> a \<le> 0"
+by (subst neg_le_iff_le [symmetric], simp)
+
+lemma neg_less_iff_less [simp]: "- b < - a \<longleftrightarrow> a < b"
+by (force simp add: less_le)
+
+lemma neg_less_0_iff_less [simp]: "- a < 0 \<longleftrightarrow> 0 < a"
+by (subst neg_less_iff_less [symmetric], simp)
+
+lemma neg_0_less_iff_less [simp]: "0 < - a \<longleftrightarrow> a < 0"
+by (subst neg_less_iff_less [symmetric], simp)
+
+text{*The next several equations can make the simplifier loop!*}
+
+lemma less_minus_iff: "a < - b \<longleftrightarrow> b < - a"
+proof -
+ have "(- (-a) < - b) = (b < - a)" by (rule neg_less_iff_less)
+ thus ?thesis by simp
+qed
+
+lemma minus_less_iff: "- a < b \<longleftrightarrow> - b < a"
+proof -
+ have "(- a < - (-b)) = (- b < a)" by (rule neg_less_iff_less)
+ thus ?thesis by simp
+qed
+
+lemma le_minus_iff: "a \<le> - b \<longleftrightarrow> b \<le> - a"
+proof -
+ have mm: "!! a (b::'a). (-(-a)) < -b \<Longrightarrow> -(-b) < -a" by (simp only: minus_less_iff)
+ have "(- (- a) <= -b) = (b <= - a)"
+ apply (auto simp only: le_less)
+ apply (drule mm)
+ apply (simp_all)
+ apply (drule mm[simplified], assumption)
+ done
+ then show ?thesis by simp
+qed
+
+lemma minus_le_iff: "- a \<le> b \<longleftrightarrow> - b \<le> a"
+by (auto simp add: le_less minus_less_iff)
+
+lemma less_iff_diff_less_0: "a < b \<longleftrightarrow> a - b < 0"
+proof -
+ have "(a < b) = (a + (- b) < b + (-b))"
+ by (simp only: add_less_cancel_right)
+ also have "... = (a - b < 0)" by (simp add: diff_minus)
+ finally show ?thesis .
+qed
+
+lemma diff_less_eq[algebra_simps]: "a - b < c \<longleftrightarrow> a < c + b"
+apply (subst less_iff_diff_less_0 [of a])
+apply (rule less_iff_diff_less_0 [of _ c, THEN ssubst])
+apply (simp add: diff_minus add_ac)
+done
+
+lemma less_diff_eq[algebra_simps]: "a < c - b \<longleftrightarrow> a + b < c"
+apply (subst less_iff_diff_less_0 [of "plus a b"])
+apply (subst less_iff_diff_less_0 [of a])
+apply (simp add: diff_minus add_ac)
+done
+
+lemma diff_le_eq[algebra_simps]: "a - b \<le> c \<longleftrightarrow> a \<le> c + b"
+by (auto simp add: le_less diff_less_eq diff_add_cancel add_diff_cancel)
+
+lemma le_diff_eq[algebra_simps]: "a \<le> c - b \<longleftrightarrow> a + b \<le> c"
+by (auto simp add: le_less less_diff_eq diff_add_cancel add_diff_cancel)
+
+lemma le_iff_diff_le_0: "a \<le> b \<longleftrightarrow> a - b \<le> 0"
+by (simp add: algebra_simps)
+
+text{*Legacy - use @{text algebra_simps} *}
+lemmas group_simps[noatp] = algebra_simps
+
+end
+
+text{*Legacy - use @{text algebra_simps} *}
+lemmas group_simps[noatp] = algebra_simps
+
+class linordered_ab_semigroup_add =
+ linorder + ordered_ab_semigroup_add
+
+class linordered_cancel_ab_semigroup_add =
+ linorder + ordered_cancel_ab_semigroup_add
+begin
+
+subclass linordered_ab_semigroup_add ..
+
+subclass ordered_ab_semigroup_add_imp_le
+proof
+ fix a b c :: 'a
+ assume le: "c + a <= c + b"
+ show "a <= b"
+ proof (rule ccontr)
+ assume w: "~ a \<le> b"
+ hence "b <= a" by (simp add: linorder_not_le)
+ hence le2: "c + b <= c + a" by (rule add_left_mono)
+ have "a = b"
+ apply (insert le)
+ apply (insert le2)
+ apply (drule antisym, simp_all)
+ done
+ with w show False
+ by (simp add: linorder_not_le [symmetric])
+ qed
+qed
+
+end
+
+class linordered_ab_group_add = linorder + ordered_ab_group_add
+begin
+
+subclass linordered_cancel_ab_semigroup_add ..
+
+lemma neg_less_eq_nonneg [simp]:
+ "- a \<le> a \<longleftrightarrow> 0 \<le> a"
+proof
+ assume A: "- a \<le> a" show "0 \<le> a"
+ proof (rule classical)
+ assume "\<not> 0 \<le> a"
+ then have "a < 0" by auto
+ with A have "- a < 0" by (rule le_less_trans)
+ then show ?thesis by auto
+ qed
+next
+ assume A: "0 \<le> a" show "- a \<le> a"
+ proof (rule order_trans)
+ show "- a \<le> 0" using A by (simp add: minus_le_iff)
+ next
+ show "0 \<le> a" using A .
+ qed
+qed
+
+lemma neg_less_nonneg [simp]:
+ "- a < a \<longleftrightarrow> 0 < a"
+proof
+ assume A: "- a < a" show "0 < a"
+ proof (rule classical)
+ assume "\<not> 0 < a"
+ then have "a \<le> 0" by auto
+ with A have "- a < 0" by (rule less_le_trans)
+ then show ?thesis by auto
+ qed
+next
+ assume A: "0 < a" show "- a < a"
+ proof (rule less_trans)
+ show "- a < 0" using A by (simp add: minus_le_iff)
+ next
+ show "0 < a" using A .
+ qed
+qed
+
+lemma less_eq_neg_nonpos [simp]:
+ "a \<le> - a \<longleftrightarrow> a \<le> 0"
+proof
+ assume A: "a \<le> - a" show "a \<le> 0"
+ proof (rule classical)
+ assume "\<not> a \<le> 0"
+ then have "0 < a" by auto
+ then have "0 < - a" using A by (rule less_le_trans)
+ then show ?thesis by auto
+ qed
+next
+ assume A: "a \<le> 0" show "a \<le> - a"
+ proof (rule order_trans)
+ show "0 \<le> - a" using A by (simp add: minus_le_iff)
+ next
+ show "a \<le> 0" using A .
+ qed
+qed
+
+lemma equal_neg_zero [simp]:
+ "a = - a \<longleftrightarrow> a = 0"
+proof
+ assume "a = 0" then show "a = - a" by simp
+next
+ assume A: "a = - a" show "a = 0"
+ proof (cases "0 \<le> a")
+ case True with A have "0 \<le> - a" by auto
+ with le_minus_iff have "a \<le> 0" by simp
+ with True show ?thesis by (auto intro: order_trans)
+ next
+ case False then have B: "a \<le> 0" by auto
+ with A have "- a \<le> 0" by auto
+ with B show ?thesis by (auto intro: order_trans)
+ qed
+qed
+
+lemma neg_equal_zero [simp]:
+ "- a = a \<longleftrightarrow> a = 0"
+ by (auto dest: sym)
+
+lemma double_zero [simp]:
+ "a + a = 0 \<longleftrightarrow> a = 0"
+proof
+ assume assm: "a + a = 0"
+ then have a: "- a = a" by (rule minus_unique)
+ then show "a = 0" by (simp add: neg_equal_zero)
+qed simp
+
+lemma double_zero_sym [simp]:
+ "0 = a + a \<longleftrightarrow> a = 0"
+ by (rule, drule sym) simp_all
+
+lemma zero_less_double_add_iff_zero_less_single_add [simp]:
+ "0 < a + a \<longleftrightarrow> 0 < a"
+proof
+ assume "0 < a + a"
+ then have "0 - a < a" by (simp only: diff_less_eq)
+ then have "- a < a" by simp
+ then show "0 < a" by (simp add: neg_less_nonneg)
+next
+ assume "0 < a"
+ with this have "0 + 0 < a + a"
+ by (rule add_strict_mono)
+ then show "0 < a + a" by simp
+qed
+
+lemma zero_le_double_add_iff_zero_le_single_add [simp]:
+ "0 \<le> a + a \<longleftrightarrow> 0 \<le> a"
+ by (auto simp add: le_less)
+
+lemma double_add_less_zero_iff_single_add_less_zero [simp]:
+ "a + a < 0 \<longleftrightarrow> a < 0"
+proof -
+ have "\<not> a + a < 0 \<longleftrightarrow> \<not> a < 0"
+ by (simp add: not_less)
+ then show ?thesis by simp
+qed
+
+lemma double_add_le_zero_iff_single_add_le_zero [simp]:
+ "a + a \<le> 0 \<longleftrightarrow> a \<le> 0"
+proof -
+ have "\<not> a + a \<le> 0 \<longleftrightarrow> \<not> a \<le> 0"
+ by (simp add: not_le)
+ then show ?thesis by simp
+qed
+
+lemma le_minus_self_iff:
+ "a \<le> - a \<longleftrightarrow> a \<le> 0"
+proof -
+ from add_le_cancel_left [of "- a" "a + a" 0]
+ have "a \<le> - a \<longleftrightarrow> a + a \<le> 0"
+ by (simp add: add_assoc [symmetric])
+ thus ?thesis by simp
+qed
+
+lemma minus_le_self_iff:
+ "- a \<le> a \<longleftrightarrow> 0 \<le> a"
+proof -
+ from add_le_cancel_left [of "- a" 0 "a + a"]
+ have "- a \<le> a \<longleftrightarrow> 0 \<le> a + a"
+ by (simp add: add_assoc [symmetric])
+ thus ?thesis by simp
+qed
+
+lemma minus_max_eq_min:
+ "- max x y = min (-x) (-y)"
+ by (auto simp add: max_def min_def)
+
+lemma minus_min_eq_max:
+ "- min x y = max (-x) (-y)"
+ by (auto simp add: max_def min_def)
+
+end
+
+-- {* FIXME localize the following *}
+
+lemma add_increasing:
+ fixes c :: "'a::{ordered_ab_semigroup_add_imp_le, comm_monoid_add}"
+ shows "[|0\<le>a; b\<le>c|] ==> b \<le> a + c"
+by (insert add_mono [of 0 a b c], simp)
+
+lemma add_increasing2:
+ fixes c :: "'a::{ordered_ab_semigroup_add_imp_le, comm_monoid_add}"
+ shows "[|0\<le>c; b\<le>a|] ==> b \<le> a + c"
+by (simp add:add_increasing add_commute[of a])
+
+lemma add_strict_increasing:
+ fixes c :: "'a::{ordered_ab_semigroup_add_imp_le, comm_monoid_add}"
+ shows "[|0<a; b\<le>c|] ==> b < a + c"
+by (insert add_less_le_mono [of 0 a b c], simp)
+
+lemma add_strict_increasing2:
+ fixes c :: "'a::{ordered_ab_semigroup_add_imp_le, comm_monoid_add}"
+ shows "[|0\<le>a; b<c|] ==> b < a + c"
+by (insert add_le_less_mono [of 0 a b c], simp)
+
+
+class ordered_ab_group_add_abs = ordered_ab_group_add + abs +
+ assumes abs_ge_zero [simp]: "\<bar>a\<bar> \<ge> 0"
+ and abs_ge_self: "a \<le> \<bar>a\<bar>"
+ and abs_leI: "a \<le> b \<Longrightarrow> - a \<le> b \<Longrightarrow> \<bar>a\<bar> \<le> b"
+ and abs_minus_cancel [simp]: "\<bar>-a\<bar> = \<bar>a\<bar>"
+ and abs_triangle_ineq: "\<bar>a + b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
+begin
+
+lemma abs_minus_le_zero: "- \<bar>a\<bar> \<le> 0"
+ unfolding neg_le_0_iff_le by simp
+
+lemma abs_of_nonneg [simp]:
+ assumes nonneg: "0 \<le> a" shows "\<bar>a\<bar> = a"
+proof (rule antisym)
+ from nonneg le_imp_neg_le have "- a \<le> 0" by simp
+ from this nonneg have "- a \<le> a" by (rule order_trans)
+ then show "\<bar>a\<bar> \<le> a" by (auto intro: abs_leI)
+qed (rule abs_ge_self)
+
+lemma abs_idempotent [simp]: "\<bar>\<bar>a\<bar>\<bar> = \<bar>a\<bar>"
+by (rule antisym)
+ (auto intro!: abs_ge_self abs_leI order_trans [of "uminus (abs a)" zero "abs a"])
+
+lemma abs_eq_0 [simp]: "\<bar>a\<bar> = 0 \<longleftrightarrow> a = 0"
+proof -
+ have "\<bar>a\<bar> = 0 \<Longrightarrow> a = 0"
+ proof (rule antisym)
+ assume zero: "\<bar>a\<bar> = 0"
+ with abs_ge_self show "a \<le> 0" by auto
+ from zero have "\<bar>-a\<bar> = 0" by simp
+ with abs_ge_self [of "uminus a"] have "- a \<le> 0" by auto
+ with neg_le_0_iff_le show "0 \<le> a" by auto
+ qed
+ then show ?thesis by auto
+qed
+
+lemma abs_zero [simp]: "\<bar>0\<bar> = 0"
+by simp
+
+lemma abs_0_eq [simp, noatp]: "0 = \<bar>a\<bar> \<longleftrightarrow> a = 0"
+proof -
+ have "0 = \<bar>a\<bar> \<longleftrightarrow> \<bar>a\<bar> = 0" by (simp only: eq_ac)
+ thus ?thesis by simp
+qed
+
+lemma abs_le_zero_iff [simp]: "\<bar>a\<bar> \<le> 0 \<longleftrightarrow> a = 0"
+proof
+ assume "\<bar>a\<bar> \<le> 0"
+ then have "\<bar>a\<bar> = 0" by (rule antisym) simp
+ thus "a = 0" by simp
+next
+ assume "a = 0"
+ thus "\<bar>a\<bar> \<le> 0" by simp
+qed
+
+lemma zero_less_abs_iff [simp]: "0 < \<bar>a\<bar> \<longleftrightarrow> a \<noteq> 0"
+by (simp add: less_le)
+
+lemma abs_not_less_zero [simp]: "\<not> \<bar>a\<bar> < 0"
+proof -
+ have a: "\<And>x y. x \<le> y \<Longrightarrow> \<not> y < x" by auto
+ show ?thesis by (simp add: a)
+qed
+
+lemma abs_ge_minus_self: "- a \<le> \<bar>a\<bar>"
+proof -
+ have "- a \<le> \<bar>-a\<bar>" by (rule abs_ge_self)
+ then show ?thesis by simp
+qed
+
+lemma abs_minus_commute:
+ "\<bar>a - b\<bar> = \<bar>b - a\<bar>"
+proof -
+ have "\<bar>a - b\<bar> = \<bar>- (a - b)\<bar>" by (simp only: abs_minus_cancel)
+ also have "... = \<bar>b - a\<bar>" by simp
+ finally show ?thesis .
+qed
+
+lemma abs_of_pos: "0 < a \<Longrightarrow> \<bar>a\<bar> = a"
+by (rule abs_of_nonneg, rule less_imp_le)
+
+lemma abs_of_nonpos [simp]:
+ assumes "a \<le> 0" shows "\<bar>a\<bar> = - a"
+proof -
+ let ?b = "- a"
+ have "- ?b \<le> 0 \<Longrightarrow> \<bar>- ?b\<bar> = - (- ?b)"
+ unfolding abs_minus_cancel [of "?b"]
+ unfolding neg_le_0_iff_le [of "?b"]
+ unfolding minus_minus by (erule abs_of_nonneg)
+ then show ?thesis using assms by auto
+qed
+
+lemma abs_of_neg: "a < 0 \<Longrightarrow> \<bar>a\<bar> = - a"
+by (rule abs_of_nonpos, rule less_imp_le)
+
+lemma abs_le_D1: "\<bar>a\<bar> \<le> b \<Longrightarrow> a \<le> b"
+by (insert abs_ge_self, blast intro: order_trans)
+
+lemma abs_le_D2: "\<bar>a\<bar> \<le> b \<Longrightarrow> - a \<le> b"
+by (insert abs_le_D1 [of "uminus a"], simp)
+
+lemma abs_le_iff: "\<bar>a\<bar> \<le> b \<longleftrightarrow> a \<le> b \<and> - a \<le> b"
+by (blast intro: abs_leI dest: abs_le_D1 abs_le_D2)
+
+lemma abs_triangle_ineq2: "\<bar>a\<bar> - \<bar>b\<bar> \<le> \<bar>a - b\<bar>"
+ apply (simp add: algebra_simps)
+ apply (subgoal_tac "abs a = abs (plus b (minus a b))")
+ apply (erule ssubst)
+ apply (rule abs_triangle_ineq)
+ apply (rule arg_cong[of _ _ abs])
+ apply (simp add: algebra_simps)
+done
+
+lemma abs_triangle_ineq3: "\<bar>\<bar>a\<bar> - \<bar>b\<bar>\<bar> \<le> \<bar>a - b\<bar>"
+ apply (subst abs_le_iff)
+ apply auto
+ apply (rule abs_triangle_ineq2)
+ apply (subst abs_minus_commute)
+ apply (rule abs_triangle_ineq2)
+done
+
+lemma abs_triangle_ineq4: "\<bar>a - b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
+proof -
+ have "abs(a - b) = abs(a + - b)" by (subst diff_minus, rule refl)
+ also have "... <= abs a + abs (- b)" by (rule abs_triangle_ineq)
+ finally show ?thesis by simp
+qed
+
+lemma abs_diff_triangle_ineq: "\<bar>a + b - (c + d)\<bar> \<le> \<bar>a - c\<bar> + \<bar>b - d\<bar>"
+proof -
+ have "\<bar>a + b - (c+d)\<bar> = \<bar>(a-c) + (b-d)\<bar>" by (simp add: diff_minus add_ac)
+ also have "... \<le> \<bar>a-c\<bar> + \<bar>b-d\<bar>" by (rule abs_triangle_ineq)
+ finally show ?thesis .
+qed
+
+lemma abs_add_abs [simp]:
+ "\<bar>\<bar>a\<bar> + \<bar>b\<bar>\<bar> = \<bar>a\<bar> + \<bar>b\<bar>" (is "?L = ?R")
+proof (rule antisym)
+ show "?L \<ge> ?R" by(rule abs_ge_self)
+next
+ have "?L \<le> \<bar>\<bar>a\<bar>\<bar> + \<bar>\<bar>b\<bar>\<bar>" by(rule abs_triangle_ineq)
+ also have "\<dots> = ?R" by simp
+ finally show "?L \<le> ?R" .
+qed
+
+end
+
+text {* Needed for abelian cancellation simprocs: *}
+
+lemma add_cancel_21: "((x::'a::ab_group_add) + (y + z) = y + u) = (x + z = u)"
+apply (subst add_left_commute)
+apply (subst add_left_cancel)
+apply simp
+done
+
+lemma add_cancel_end: "(x + (y + z) = y) = (x = - (z::'a::ab_group_add))"
+apply (subst add_cancel_21[of _ _ _ 0, simplified])
+apply (simp add: add_right_cancel[symmetric, of "x" "-z" "z", simplified])
+done
+
+lemma less_eqI: "(x::'a::ordered_ab_group_add) - y = x' - y' \<Longrightarrow> (x < y) = (x' < y')"
+by (simp add: less_iff_diff_less_0[of x y] less_iff_diff_less_0[of x' y'])
+
+lemma le_eqI: "(x::'a::ordered_ab_group_add) - y = x' - y' \<Longrightarrow> (y <= x) = (y' <= x')"
+apply (simp add: le_iff_diff_le_0[of y x] le_iff_diff_le_0[of y' x'])
+apply (simp add: neg_le_iff_le[symmetric, of "y-x" 0] neg_le_iff_le[symmetric, of "y'-x'" 0])
+done
+
+lemma eq_eqI: "(x::'a::ab_group_add) - y = x' - y' \<Longrightarrow> (x = y) = (x' = y')"
+by (simp only: eq_iff_diff_eq_0[of x y] eq_iff_diff_eq_0[of x' y'])
+
+lemma diff_def: "(x::'a::ab_group_add) - y == x + (-y)"
+by (simp add: diff_minus)
+
+lemma le_add_right_mono:
+ assumes
+ "a <= b + (c::'a::ordered_ab_group_add)"
+ "c <= d"
+ shows "a <= b + d"
+ apply (rule_tac order_trans[where y = "b+c"])
+ apply (simp_all add: prems)
+ done
+
+
+subsection {* Tools setup *}
+
+lemma add_mono_thms_linordered_semiring [noatp]:
+ fixes i j k :: "'a\<Colon>ordered_ab_semigroup_add"
+ shows "i \<le> j \<and> k \<le> l \<Longrightarrow> i + k \<le> j + l"
+ and "i = j \<and> k \<le> l \<Longrightarrow> i + k \<le> j + l"
+ and "i \<le> j \<and> k = l \<Longrightarrow> i + k \<le> j + l"
+ and "i = j \<and> k = l \<Longrightarrow> i + k = j + l"
+by (rule add_mono, clarify+)+
+
+lemma add_mono_thms_linordered_field [noatp]:
+ fixes i j k :: "'a\<Colon>ordered_cancel_ab_semigroup_add"
+ shows "i < j \<and> k = l \<Longrightarrow> i + k < j + l"
+ and "i = j \<and> k < l \<Longrightarrow> i + k < j + l"
+ and "i < j \<and> k \<le> l \<Longrightarrow> i + k < j + l"
+ and "i \<le> j \<and> k < l \<Longrightarrow> i + k < j + l"
+ and "i < j \<and> k < l \<Longrightarrow> i + k < j + l"
+by (auto intro: add_strict_right_mono add_strict_left_mono
+ add_less_le_mono add_le_less_mono add_strict_mono)
+
+text{*Simplification of @{term "x-y < 0"}, etc.*}
+lemmas diff_less_0_iff_less [simp, noatp] = less_iff_diff_less_0 [symmetric]
+lemmas diff_le_0_iff_le [simp, noatp] = le_iff_diff_le_0 [symmetric]
+
+ML {*
+structure ab_group_add_cancel = Abel_Cancel
+(
+
+(* term order for abelian groups *)
+
+fun agrp_ord (Const (a, _)) = find_index (fn a' => a = a')
+ [@{const_name Algebras.zero}, @{const_name Algebras.plus},
+ @{const_name Algebras.uminus}, @{const_name Algebras.minus}]
+ | agrp_ord _ = ~1;
+
+fun termless_agrp (a, b) = (TermOrd.term_lpo agrp_ord (a, b) = LESS);
+
+local
+ val ac1 = mk_meta_eq @{thm add_assoc};
+ val ac2 = mk_meta_eq @{thm add_commute};
+ val ac3 = mk_meta_eq @{thm add_left_commute};
+ fun solve_add_ac thy _ (_ $ (Const (@{const_name Algebras.plus},_) $ _ $ _) $ _) =
+ SOME ac1
+ | solve_add_ac thy _ (_ $ x $ (Const (@{const_name Algebras.plus},_) $ y $ z)) =
+ if termless_agrp (y, x) then SOME ac3 else NONE
+ | solve_add_ac thy _ (_ $ x $ y) =
+ if termless_agrp (y, x) then SOME ac2 else NONE
+ | solve_add_ac thy _ _ = NONE
+in
+ val add_ac_proc = Simplifier.simproc @{theory}
+ "add_ac_proc" ["x + y::'a::ab_semigroup_add"] solve_add_ac;
+end;
+
+val eq_reflection = @{thm eq_reflection};
+
+val T = @{typ "'a::ab_group_add"};
+
+val cancel_ss = HOL_basic_ss settermless termless_agrp
+ addsimprocs [add_ac_proc] addsimps
+ [@{thm add_0_left}, @{thm add_0_right}, @{thm diff_def},
+ @{thm minus_add_distrib}, @{thm minus_minus}, @{thm minus_zero},
+ @{thm right_minus}, @{thm left_minus}, @{thm add_minus_cancel},
+ @{thm minus_add_cancel}];
+
+val sum_pats = [@{cterm "x + y::'a::ab_group_add"}, @{cterm "x - y::'a::ab_group_add"}];
+
+val eqI_rules = [@{thm less_eqI}, @{thm le_eqI}, @{thm eq_eqI}];
+
+val dest_eqI =
+ fst o HOLogic.dest_bin "op =" HOLogic.boolT o HOLogic.dest_Trueprop o concl_of;
+
+);
+*}
+
+ML {*
+ Addsimprocs [ab_group_add_cancel.sum_conv, ab_group_add_cancel.rel_conv];
+*}
+
+code_modulename SML
+ Groups Arith
+
+code_modulename OCaml
+ Groups Arith
+
+code_modulename Haskell
+ Groups Arith
+
+end
--- a/src/HOL/Hoare/Hoare.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Hoare/Hoare.thy Tue Feb 09 17:06:05 2010 +0100
@@ -24,12 +24,7 @@
| Cond "'a bexp" "'a com" "'a com" ("(1IF _/ THEN _ / ELSE _/ FI)" [0,0,0] 61)
| While "'a bexp" "'a assn" "'a com" ("(1WHILE _/ INV {_} //DO _ /OD)" [0,0,0] 61)
-syntax
- "@assign" :: "id => 'b => 'a com" ("(2_ :=/ _)" [70,65] 61)
- "@annskip" :: "'a com" ("SKIP")
-
-translations
- "SKIP" == "Basic id"
+abbreviation annskip ("SKIP") where "SKIP == Basic id"
types 'a sem = "'a => 'a => bool"
@@ -50,16 +45,19 @@
"Valid p c q == !s s'. Sem c s s' --> s : p --> s' : q"
-syntax
- "@hoare_vars" :: "[idts, 'a assn,'a com,'a assn] => bool"
- ("VARS _// {_} // _ // {_}" [0,0,55,0] 50)
-syntax ("" output)
- "@hoare" :: "['a assn,'a com,'a assn] => bool"
- ("{_} // _ // {_}" [0,55,0] 50)
(** parse translations **)
-ML{*
+syntax
+ "_assign" :: "id => 'b => 'a com" ("(2_ :=/ _)" [70,65] 61)
+
+syntax
+ "_hoare_vars" :: "[idts, 'a assn,'a com,'a assn] => bool"
+ ("VARS _// {_} // _ // {_}" [0,0,55,0] 50)
+syntax ("" output)
+ "_hoare" :: "['a assn,'a com,'a assn] => bool"
+ ("{_} // _ // {_}" [0,55,0] 50)
+ML {*
local
@@ -91,7 +89,7 @@
*}
(* com_tr *)
ML{*
-fun com_tr (Const("@assign",_) $ Free (a,_) $ e) xs =
+fun com_tr (Const("_assign",_) $ Free (a,_) $ e) xs =
Syntax.const "Basic" $ mk_fexp a e xs
| com_tr (Const ("Basic",_) $ f) xs = Syntax.const "Basic" $ f
| com_tr (Const ("Seq",_) $ c1 $ c2) xs =
@@ -123,7 +121,7 @@
end
*}
-parse_translation {* [("@hoare_vars", hoare_vars_tr)] *}
+parse_translation {* [("_hoare_vars", hoare_vars_tr)] *}
(*****************************************************************************)
@@ -175,8 +173,8 @@
fun mk_assign f =
let val (vs, ts) = mk_vts f;
val (ch, which) = find_ch (vs~~ts) ((length vs)-1) (rev vs)
- in if ch then Syntax.const "@assign" $ fst(which) $ snd(which)
- else Syntax.const "@skip" end;
+ in if ch then Syntax.const "_assign" $ fst(which) $ snd(which)
+ else Syntax.const @{const_syntax annskip} end;
fun com_tr' (Const ("Basic",_) $ f) = if is_f f then mk_assign f
else Syntax.const "Basic" $ f
@@ -190,10 +188,10 @@
fun spec_tr' [p, c, q] =
- Syntax.const "@hoare" $ assn_tr' p $ com_tr' c $ assn_tr' q
+ Syntax.const "_hoare" $ assn_tr' p $ com_tr' c $ assn_tr' q
*}
-print_translation {* [("Valid", spec_tr')] *}
+print_translation {* [(@{const_syntax Valid}, spec_tr')] *}
lemma SkipRule: "p \<subseteq> q \<Longrightarrow> Valid p (Basic id) q"
by (auto simp:Valid_def)
--- a/src/HOL/Hoare/HoareAbort.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Hoare/HoareAbort.thy Tue Feb 09 17:06:05 2010 +0100
@@ -21,12 +21,7 @@
| Cond "'a bexp" "'a com" "'a com" ("(1IF _/ THEN _ / ELSE _/ FI)" [0,0,0] 61)
| While "'a bexp" "'a assn" "'a com" ("(1WHILE _/ INV {_} //DO _ /OD)" [0,0,0] 61)
-syntax
- "@assign" :: "id => 'b => 'a com" ("(2_ :=/ _)" [70,65] 61)
- "@annskip" :: "'a com" ("SKIP")
-
-translations
- "SKIP" == "Basic id"
+abbreviation annskip ("SKIP") where "SKIP == Basic id"
types 'a sem = "'a option => 'a option => bool"
@@ -51,16 +46,19 @@
"Valid p c q == \<forall>s s'. Sem c s s' \<longrightarrow> s : Some ` p \<longrightarrow> s' : Some ` q"
-syntax
- "@hoare_vars" :: "[idts, 'a assn,'a com,'a assn] => bool"
- ("VARS _// {_} // _ // {_}" [0,0,55,0] 50)
-syntax ("" output)
- "@hoare" :: "['a assn,'a com,'a assn] => bool"
- ("{_} // _ // {_}" [0,55,0] 50)
(** parse translations **)
-ML{*
+syntax
+ "_assign" :: "id => 'b => 'a com" ("(2_ :=/ _)" [70,65] 61)
+
+syntax
+ "_hoare_vars" :: "[idts, 'a assn,'a com,'a assn] => bool"
+ ("VARS _// {_} // _ // {_}" [0,0,55,0] 50)
+syntax ("" output)
+ "_hoare" :: "['a assn,'a com,'a assn] => bool"
+ ("{_} // _ // {_}" [0,55,0] 50)
+ML {*
local
fun free a = Free(a,dummyT)
@@ -92,7 +90,7 @@
*}
(* com_tr *)
ML{*
-fun com_tr (Const("@assign",_) $ Free (a,_) $ e) xs =
+fun com_tr (Const("_assign",_) $ Free (a,_) $ e) xs =
Syntax.const "Basic" $ mk_fexp a e xs
| com_tr (Const ("Basic",_) $ f) xs = Syntax.const "Basic" $ f
| com_tr (Const ("Seq",_) $ c1 $ c2) xs =
@@ -124,7 +122,7 @@
end
*}
-parse_translation {* [("@hoare_vars", hoare_vars_tr)] *}
+parse_translation {* [("_hoare_vars", hoare_vars_tr)] *}
(*****************************************************************************)
@@ -176,8 +174,8 @@
fun mk_assign f =
let val (vs, ts) = mk_vts f;
val (ch, which) = find_ch (vs~~ts) ((length vs)-1) (rev vs)
- in if ch then Syntax.const "@assign" $ fst(which) $ snd(which)
- else Syntax.const "@skip" end;
+ in if ch then Syntax.const "_assign" $ fst(which) $ snd(which)
+ else Syntax.const @{const_syntax annskip} end;
fun com_tr' (Const ("Basic",_) $ f) = if is_f f then mk_assign f
else Syntax.const "Basic" $ f
@@ -191,10 +189,10 @@
fun spec_tr' [p, c, q] =
- Syntax.const "@hoare" $ assn_tr' p $ com_tr' c $ assn_tr' q
+ Syntax.const "_hoare" $ assn_tr' p $ com_tr' c $ assn_tr' q
*}
-print_translation {* [("Valid", spec_tr')] *}
+print_translation {* [(@{const_syntax Valid}, spec_tr')] *}
(*** The proof rules ***)
@@ -257,7 +255,7 @@
syntax
guarded_com :: "bool \<Rightarrow> 'a com \<Rightarrow> 'a com" ("(2_ \<rightarrow>/ _)" 71)
- array_update :: "'a list \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a com" ("(2_[_] :=/ _)" [70,65] 61)
+ array_update :: "'a list \<Rightarrow> nat \<Rightarrow> 'a \<Rightarrow> 'a com" ("(2_[_] :=/ _)" [70, 65] 61)
translations
"P \<rightarrow> c" == "IF P THEN c ELSE Abort FI"
"a[i] := v" => "(i < CONST length a) \<rightarrow> (a := CONST list_update a i v)"
--- a/src/HOL/Imperative_HOL/ex/Imperative_Quicksort.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Imperative_HOL/ex/Imperative_Quicksort.thy Tue Feb 09 17:06:05 2010 +0100
@@ -629,6 +629,8 @@
return a
done"
+code_reserved SML upto
+
ML {* @{code qsort} (Array.fromList [42, 2, 3, 5, 0, 1705, 8, 3, 15]) () *}
export_code qsort in SML_imp module_name QSort
--- a/src/HOL/Imperative_HOL/ex/Linked_Lists.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Imperative_HOL/ex/Linked_Lists.thy Tue Feb 09 17:06:05 2010 +0100
@@ -986,6 +986,8 @@
return zs
done)"
+code_reserved SML upto
+
ML {* @{code test_1} () *}
ML {* @{code test_2} () *}
ML {* @{code test_3} () *}
--- a/src/HOL/Import/HOL/arithmetic.imp Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/HOL/arithmetic.imp Tue Feb 09 17:06:05 2010 +0100
@@ -162,12 +162,12 @@
"LESS_OR" > "Nat.Suc_leI"
"LESS_NOT_SUC" > "HOL4Base.arithmetic.LESS_NOT_SUC"
"LESS_MULT_MONO" > "Nat.Suc_mult_less_cancel1"
- "LESS_MULT2" > "Ring_and_Field.mult_pos_pos"
+ "LESS_MULT2" > "Rings.mult_pos_pos"
"LESS_MONO_REV" > "Nat.Suc_less_SucD"
"LESS_MONO_MULT" > "Nat.mult_le_mono1"
"LESS_MONO_EQ" > "Nat.Suc_less_eq"
- "LESS_MONO_ADD_INV" > "OrderedGroup.add_less_imp_less_right"
- "LESS_MONO_ADD_EQ" > "OrderedGroup.add_less_cancel_right"
+ "LESS_MONO_ADD_INV" > "Groups.add_less_imp_less_right"
+ "LESS_MONO_ADD_EQ" > "Groups.add_less_cancel_right"
"LESS_MONO_ADD" > "Nat.add_less_mono1"
"LESS_MOD" > "Divides.mod_less"
"LESS_LESS_SUC" > "HOL4Base.arithmetic.LESS_LESS_SUC"
@@ -180,7 +180,7 @@
"LESS_EQ_SUC_REFL" > "HOL4Base.arithmetic.LESS_EQ_SUC_REFL"
"LESS_EQ_SUB_LESS" > "HOL4Base.arithmetic.LESS_EQ_SUB_LESS"
"LESS_EQ_REFL" > "Finite_Set.max.f_below.below_refl"
- "LESS_EQ_MONO_ADD_EQ" > "OrderedGroup.add_le_cancel_right"
+ "LESS_EQ_MONO_ADD_EQ" > "Groups.add_le_cancel_right"
"LESS_EQ_MONO" > "Nat.Suc_le_mono"
"LESS_EQ_LESS_TRANS" > "Nat.le_less_trans"
"LESS_EQ_LESS_EQ_MONO" > "Nat.add_le_mono"
--- a/src/HOL/Import/HOL/divides.imp Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/HOL/divides.imp Tue Feb 09 17:06:05 2010 +0100
@@ -9,16 +9,16 @@
"divides_def" > "HOL4Compat.divides_def"
"ONE_DIVIDES_ALL" > "HOL4Base.divides.ONE_DIVIDES_ALL"
"NOT_LT_DIV" > "NatSimprocs.nat_dvd_not_less"
- "DIVIDES_TRANS" > "Ring_and_Field.dvd_trans"
- "DIVIDES_SUB" > "Ring_and_Field.dvd_diff"
- "DIVIDES_REFL" > "Ring_and_Field.dvd_refl"
+ "DIVIDES_TRANS" > "Rings.dvd_trans"
+ "DIVIDES_SUB" > "Rings.dvd_diff"
+ "DIVIDES_REFL" > "Rings.dvd_refl"
"DIVIDES_MULT_LEFT" > "HOL4Base.divides.DIVIDES_MULT_LEFT"
"DIVIDES_MULT" > "Divides.dvd_mult2"
"DIVIDES_LE" > "Divides.dvd_imp_le"
"DIVIDES_FACT" > "HOL4Base.divides.DIVIDES_FACT"
"DIVIDES_ANTISYM" > "Divides.dvd_antisym"
"DIVIDES_ADD_2" > "HOL4Base.divides.DIVIDES_ADD_2"
- "DIVIDES_ADD_1" > "Ring_and_Field.dvd_add"
+ "DIVIDES_ADD_1" > "Rings.dvd_add"
"ALL_DIVIDES_0" > "Divides.dvd_0_right"
end
--- a/src/HOL/Import/HOL/prob_extra.imp Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/HOL/prob_extra.imp Tue Feb 09 17:06:05 2010 +0100
@@ -23,9 +23,9 @@
"REAL_SUP_LE_X" > "HOL4Prob.prob_extra.REAL_SUP_LE_X"
"REAL_SUP_EXISTS_UNIQUE" > "HOL4Prob.prob_extra.REAL_SUP_EXISTS_UNIQUE"
"REAL_POW" > "RealPow.realpow_real_of_nat"
- "REAL_LE_INV_LE" > "Ring_and_Field.le_imp_inverse_le"
+ "REAL_LE_INV_LE" > "Rings.le_imp_inverse_le"
"REAL_LE_EQ" > "Set.basic_trans_rules_26"
- "REAL_INVINV_ALL" > "Ring_and_Field.inverse_inverse_eq"
+ "REAL_INVINV_ALL" > "Rings.inverse_inverse_eq"
"REAL_INF_MIN" > "HOL4Prob.prob_extra.REAL_INF_MIN"
"RAND_THM" > "HOL.arg_cong"
"POW_HALF_TWICE" > "HOL4Prob.prob_extra.POW_HALF_TWICE"
--- a/src/HOL/Import/HOL/real.imp Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/HOL/real.imp Tue Feb 09 17:06:05 2010 +0100
@@ -25,13 +25,13 @@
"sumc" > "HOL4Real.real.sumc"
"sum_def" > "HOL4Real.real.sum_def"
"sum" > "HOL4Real.real.sum"
- "real_sub" > "OrderedGroup.diff_def"
+ "real_sub" > "Groups.diff_def"
"real_of_num" > "HOL4Compat.real_of_num"
"real_lte" > "HOL4Compat.real_lte"
"real_lt" > "Orderings.linorder_not_le"
"real_gt" > "HOL4Compat.real_gt"
"real_ge" > "HOL4Compat.real_ge"
- "real_div" > "Ring_and_Field.field_class.divide_inverse"
+ "real_div" > "Rings.field_class.divide_inverse"
"pow" > "HOL4Compat.pow"
"abs" > "HOL4Compat.abs"
"SUP_LEMMA3" > "HOL4Real.real.SUP_LEMMA3"
@@ -74,24 +74,24 @@
"REAL_SUB_TRIANGLE" > "HOL4Real.real.REAL_SUB_TRIANGLE"
"REAL_SUB_SUB2" > "HOL4Real.real.REAL_SUB_SUB2"
"REAL_SUB_SUB" > "HOL4Real.real.REAL_SUB_SUB"
- "REAL_SUB_RZERO" > "OrderedGroup.diff_0_right"
- "REAL_SUB_RNEG" > "OrderedGroup.diff_minus_eq_add"
- "REAL_SUB_REFL" > "OrderedGroup.diff_self"
- "REAL_SUB_RDISTRIB" > "Ring_and_Field.ring_eq_simps_3"
+ "REAL_SUB_RZERO" > "Groups.diff_0_right"
+ "REAL_SUB_RNEG" > "Groups.diff_minus_eq_add"
+ "REAL_SUB_REFL" > "Groups.diff_self"
+ "REAL_SUB_RDISTRIB" > "Rings.ring_eq_simps_3"
"REAL_SUB_NEG2" > "HOL4Real.real.REAL_SUB_NEG2"
- "REAL_SUB_LZERO" > "OrderedGroup.diff_0"
+ "REAL_SUB_LZERO" > "Groups.diff_0"
"REAL_SUB_LT" > "HOL4Real.real.REAL_SUB_LT"
"REAL_SUB_LNEG" > "HOL4Real.real.REAL_SUB_LNEG"
"REAL_SUB_LE" > "HOL4Real.real.REAL_SUB_LE"
- "REAL_SUB_LDISTRIB" > "Ring_and_Field.ring_eq_simps_4"
+ "REAL_SUB_LDISTRIB" > "Rings.ring_eq_simps_4"
"REAL_SUB_INV2" > "HOL4Real.real.REAL_SUB_INV2"
"REAL_SUB_ADD2" > "HOL4Real.real.REAL_SUB_ADD2"
- "REAL_SUB_ADD" > "OrderedGroup.diff_add_cancel"
- "REAL_SUB_ABS" > "OrderedGroup.abs_triangle_ineq2"
- "REAL_SUB_0" > "OrderedGroup.diff_eq_0_iff_eq"
+ "REAL_SUB_ADD" > "Groups.diff_add_cancel"
+ "REAL_SUB_ABS" > "Groups.abs_triangle_ineq2"
+ "REAL_SUB_0" > "Groups.diff_eq_0_iff_eq"
"REAL_RNEG_UNIQ" > "RealDef.real_add_eq_0_iff"
- "REAL_RINV_UNIQ" > "Ring_and_Field.inverse_unique"
- "REAL_RDISTRIB" > "Ring_and_Field.ring_eq_simps_1"
+ "REAL_RINV_UNIQ" > "Rings.inverse_unique"
+ "REAL_RDISTRIB" > "Rings.ring_eq_simps_1"
"REAL_POW_POW" > "Power.power_mult"
"REAL_POW_MONO_LT" > "HOL4Real.real.REAL_POW_MONO_LT"
"REAL_POW_LT2" > "HOL4Real.real.REAL_POW_LT2"
@@ -103,7 +103,7 @@
"REAL_POS_NZ" > "HOL4Real.real.REAL_POS_NZ"
"REAL_POS" > "RealDef.real_of_nat_ge_zero"
"REAL_POASQ" > "HOL4Real.real.REAL_POASQ"
- "REAL_OVER1" > "Ring_and_Field.divide_1"
+ "REAL_OVER1" > "Rings.divide_1"
"REAL_OF_NUM_SUC" > "RealDef.real_of_nat_Suc"
"REAL_OF_NUM_POW" > "RealPow.realpow_real_of_nat"
"REAL_OF_NUM_MUL" > "RealDef.real_of_nat_mult"
@@ -113,172 +113,172 @@
"REAL_NZ_IMP_LT" > "HOL4Real.real.REAL_NZ_IMP_LT"
"REAL_NOT_LT" > "HOL4Compat.real_lte"
"REAL_NOT_LE" > "Orderings.linorder_not_le"
- "REAL_NEG_SUB" > "OrderedGroup.minus_diff_eq"
- "REAL_NEG_RMUL" > "Ring_and_Field.mult_minus_right"
- "REAL_NEG_NEG" > "OrderedGroup.minus_minus"
- "REAL_NEG_MUL2" > "Ring_and_Field.minus_mult_minus"
+ "REAL_NEG_SUB" > "Groups.minus_diff_eq"
+ "REAL_NEG_RMUL" > "Rings.mult_minus_right"
+ "REAL_NEG_NEG" > "Groups.minus_minus"
+ "REAL_NEG_MUL2" > "Rings.minus_mult_minus"
"REAL_NEG_MINUS1" > "HOL4Real.real.REAL_NEG_MINUS1"
- "REAL_NEG_LT0" > "OrderedGroup.neg_less_0_iff_less"
- "REAL_NEG_LMUL" > "Ring_and_Field.mult_minus_left"
- "REAL_NEG_LE0" > "OrderedGroup.neg_le_0_iff_le"
- "REAL_NEG_INV" > "Ring_and_Field.nonzero_inverse_minus_eq"
- "REAL_NEG_GT0" > "OrderedGroup.neg_0_less_iff_less"
- "REAL_NEG_GE0" > "OrderedGroup.neg_0_le_iff_le"
- "REAL_NEG_EQ0" > "OrderedGroup.neg_equal_0_iff_equal"
+ "REAL_NEG_LT0" > "Groups.neg_less_0_iff_less"
+ "REAL_NEG_LMUL" > "Rings.mult_minus_left"
+ "REAL_NEG_LE0" > "Groups.neg_le_0_iff_le"
+ "REAL_NEG_INV" > "Rings.nonzero_inverse_minus_eq"
+ "REAL_NEG_GT0" > "Groups.neg_0_less_iff_less"
+ "REAL_NEG_GE0" > "Groups.neg_0_le_iff_le"
+ "REAL_NEG_EQ0" > "Groups.neg_equal_0_iff_equal"
"REAL_NEG_EQ" > "HOL4Real.real.REAL_NEG_EQ"
- "REAL_NEG_ADD" > "OrderedGroup.minus_add_distrib"
- "REAL_NEG_0" > "OrderedGroup.minus_zero"
- "REAL_NEGNEG" > "OrderedGroup.minus_minus"
+ "REAL_NEG_ADD" > "Groups.minus_add_distrib"
+ "REAL_NEG_0" > "Groups.minus_zero"
+ "REAL_NEGNEG" > "Groups.minus_minus"
"REAL_MUL_SYM" > "Int.zmult_ac_2"
- "REAL_MUL_RZERO" > "Ring_and_Field.mult_zero_right"
- "REAL_MUL_RNEG" > "Ring_and_Field.mult_minus_right"
- "REAL_MUL_RINV" > "Ring_and_Field.right_inverse"
+ "REAL_MUL_RZERO" > "Rings.mult_zero_right"
+ "REAL_MUL_RNEG" > "Rings.mult_minus_right"
+ "REAL_MUL_RINV" > "Rings.right_inverse"
"REAL_MUL_RID" > "Finite_Set.AC_mult.f_e.ident"
- "REAL_MUL_LZERO" > "Ring_and_Field.mult_zero_left"
- "REAL_MUL_LNEG" > "Ring_and_Field.mult_minus_left"
+ "REAL_MUL_LZERO" > "Rings.mult_zero_left"
+ "REAL_MUL_LNEG" > "Rings.mult_minus_left"
"REAL_MUL_LINV" > "HOL4Compat.REAL_MUL_LINV"
"REAL_MUL_LID" > "Finite_Set.AC_mult.f_e.left_ident"
"REAL_MUL_ASSOC" > "HOL4Compat.REAL_MUL_ASSOC"
"REAL_MUL" > "RealDef.real_of_nat_mult"
"REAL_MIDDLE2" > "HOL4Real.real.REAL_MIDDLE2"
"REAL_MIDDLE1" > "HOL4Real.real.REAL_MIDDLE1"
- "REAL_MEAN" > "Ring_and_Field.dense"
+ "REAL_MEAN" > "Rings.dense"
"REAL_LT_TRANS" > "Set.basic_trans_rules_21"
"REAL_LT_TOTAL" > "HOL4Compat.REAL_LT_TOTAL"
- "REAL_LT_SUB_RADD" > "OrderedGroup.compare_rls_6"
- "REAL_LT_SUB_LADD" > "OrderedGroup.compare_rls_7"
- "REAL_LT_RMUL_IMP" > "Ring_and_Field.ordered_semiring_strict_class.mult_strict_right_mono"
+ "REAL_LT_SUB_RADD" > "Groups.compare_rls_6"
+ "REAL_LT_SUB_LADD" > "Groups.compare_rls_7"
+ "REAL_LT_RMUL_IMP" > "Rings.mult_strict_right_mono"
"REAL_LT_RMUL_0" > "HOL4Real.real.REAL_LT_RMUL_0"
"REAL_LT_RMUL" > "RealDef.real_mult_less_iff1"
"REAL_LT_REFL" > "Orderings.order_less_irrefl"
- "REAL_LT_RDIV_EQ" > "Ring_and_Field.pos_less_divide_eq"
+ "REAL_LT_RDIV_EQ" > "Rings.pos_less_divide_eq"
"REAL_LT_RDIV_0" > "HOL4Real.real.REAL_LT_RDIV_0"
"REAL_LT_RDIV" > "HOL4Real.real.REAL_LT_RDIV"
- "REAL_LT_RADD" > "OrderedGroup.add_less_cancel_right"
+ "REAL_LT_RADD" > "Groups.add_less_cancel_right"
"REAL_LT_NZ" > "HOL4Real.real.REAL_LT_NZ"
"REAL_LT_NEGTOTAL" > "HOL4Real.real.REAL_LT_NEGTOTAL"
- "REAL_LT_NEG" > "OrderedGroup.neg_less_iff_less"
+ "REAL_LT_NEG" > "Groups.neg_less_iff_less"
"REAL_LT_MULTIPLE" > "HOL4Real.real.REAL_LT_MULTIPLE"
- "REAL_LT_MUL2" > "Ring_and_Field.mult_strict_mono'"
- "REAL_LT_MUL" > "Ring_and_Field.mult_pos_pos"
- "REAL_LT_LMUL_IMP" > "Ring_and_Field.ordered_comm_semiring_strict_class.mult_strict_mono"
+ "REAL_LT_MUL2" > "Rings.mult_strict_mono'"
+ "REAL_LT_MUL" > "Rings.mult_pos_pos"
+ "REAL_LT_LMUL_IMP" > "Rings.linordered_comm_semiring_strict_class.mult_strict_mono"
"REAL_LT_LMUL_0" > "HOL4Real.real.REAL_LT_LMUL_0"
"REAL_LT_LMUL" > "HOL4Real.real.REAL_LT_LMUL"
"REAL_LT_LE" > "Orderings.order_class.order_less_le"
- "REAL_LT_LDIV_EQ" > "Ring_and_Field.pos_divide_less_eq"
- "REAL_LT_LADD" > "OrderedGroup.add_less_cancel_left"
- "REAL_LT_INV_EQ" > "Ring_and_Field.inverse_positive_iff_positive"
- "REAL_LT_INV" > "Ring_and_Field.less_imp_inverse_less"
+ "REAL_LT_LDIV_EQ" > "Rings.pos_divide_less_eq"
+ "REAL_LT_LADD" > "Groups.add_less_cancel_left"
+ "REAL_LT_INV_EQ" > "Rings.inverse_positive_iff_positive"
+ "REAL_LT_INV" > "Rings.less_imp_inverse_less"
"REAL_LT_IMP_NE" > "Orderings.less_imp_neq"
"REAL_LT_IMP_LE" > "Orderings.order_less_imp_le"
- "REAL_LT_IADD" > "OrderedGroup.add_strict_left_mono"
+ "REAL_LT_IADD" > "Groups.add_strict_left_mono"
"REAL_LT_HALF2" > "HOL4Real.real.REAL_LT_HALF2"
"REAL_LT_HALF1" > "NatSimprocs.half_gt_zero_iff"
"REAL_LT_GT" > "Orderings.order_less_not_sym"
"REAL_LT_FRACTION_0" > "HOL4Real.real.REAL_LT_FRACTION_0"
"REAL_LT_FRACTION" > "HOL4Real.real.REAL_LT_FRACTION"
- "REAL_LT_DIV" > "Ring_and_Field.divide_pos_pos"
+ "REAL_LT_DIV" > "Rings.divide_pos_pos"
"REAL_LT_ANTISYM" > "HOL4Real.real.REAL_LT_ANTISYM"
- "REAL_LT_ADD_SUB" > "OrderedGroup.compare_rls_7"
+ "REAL_LT_ADD_SUB" > "Groups.compare_rls_7"
"REAL_LT_ADDR" > "HOL4Real.real.REAL_LT_ADDR"
"REAL_LT_ADDNEG2" > "HOL4Real.real.REAL_LT_ADDNEG2"
"REAL_LT_ADDNEG" > "HOL4Real.real.REAL_LT_ADDNEG"
"REAL_LT_ADDL" > "HOL4Real.real.REAL_LT_ADDL"
- "REAL_LT_ADD2" > "OrderedGroup.add_strict_mono"
+ "REAL_LT_ADD2" > "Groups.add_strict_mono"
"REAL_LT_ADD1" > "HOL4Real.real.REAL_LT_ADD1"
- "REAL_LT_ADD" > "OrderedGroup.add_pos_pos"
+ "REAL_LT_ADD" > "Groups.add_pos_pos"
"REAL_LT_1" > "HOL4Real.real.REAL_LT_1"
- "REAL_LT_01" > "Ring_and_Field.ordered_semidom_class.zero_less_one"
+ "REAL_LT_01" > "Rings.zero_less_one"
"REAL_LTE_TRANS" > "Set.basic_trans_rules_24"
"REAL_LTE_TOTAL" > "HOL4Real.real.REAL_LTE_TOTAL"
"REAL_LTE_ANTSYM" > "HOL4Real.real.REAL_LTE_ANTSYM"
- "REAL_LTE_ADD2" > "OrderedGroup.add_less_le_mono"
- "REAL_LTE_ADD" > "OrderedGroup.add_pos_nonneg"
+ "REAL_LTE_ADD2" > "Groups.add_less_le_mono"
+ "REAL_LTE_ADD" > "Groups.add_pos_nonneg"
"REAL_LT1_POW2" > "HOL4Real.real.REAL_LT1_POW2"
"REAL_LT" > "RealDef.real_of_nat_less_iff"
"REAL_LNEG_UNIQ" > "HOL4Real.real.REAL_LNEG_UNIQ"
"REAL_LINV_UNIQ" > "HOL4Real.real.REAL_LINV_UNIQ"
"REAL_LE_TRANS" > "Set.basic_trans_rules_25"
"REAL_LE_TOTAL" > "Orderings.linorder_class.linorder_linear"
- "REAL_LE_SUB_RADD" > "OrderedGroup.compare_rls_8"
- "REAL_LE_SUB_LADD" > "OrderedGroup.compare_rls_9"
- "REAL_LE_SQUARE" > "Ring_and_Field.zero_le_square"
- "REAL_LE_RNEG" > "OrderedGroup.le_eq_neg"
- "REAL_LE_RMUL_IMP" > "Ring_and_Field.pordered_semiring_class.mult_right_mono"
+ "REAL_LE_SUB_RADD" > "Groups.compare_rls_8"
+ "REAL_LE_SUB_LADD" > "Groups.compare_rls_9"
+ "REAL_LE_SQUARE" > "Rings.zero_le_square"
+ "REAL_LE_RNEG" > "Groups.le_eq_neg"
+ "REAL_LE_RMUL_IMP" > "Rings.mult_right_mono"
"REAL_LE_RMUL" > "RealDef.real_mult_le_cancel_iff1"
"REAL_LE_REFL" > "Finite_Set.max.f_below.below_refl"
- "REAL_LE_RDIV_EQ" > "Ring_and_Field.pos_le_divide_eq"
- "REAL_LE_RDIV" > "Ring_and_Field.mult_imp_le_div_pos"
- "REAL_LE_RADD" > "OrderedGroup.add_le_cancel_right"
+ "REAL_LE_RDIV_EQ" > "Rings.pos_le_divide_eq"
+ "REAL_LE_RDIV" > "Rings.mult_imp_le_div_pos"
+ "REAL_LE_RADD" > "Groups.add_le_cancel_right"
"REAL_LE_POW2" > "Nat_Numeral.zero_compare_simps_12"
"REAL_LE_NEGTOTAL" > "HOL4Real.real.REAL_LE_NEGTOTAL"
- "REAL_LE_NEGR" > "OrderedGroup.le_minus_self_iff"
- "REAL_LE_NEGL" > "OrderedGroup.minus_le_self_iff"
- "REAL_LE_NEG2" > "OrderedGroup.neg_le_iff_le"
- "REAL_LE_NEG" > "OrderedGroup.neg_le_iff_le"
+ "REAL_LE_NEGR" > "Groups.le_minus_self_iff"
+ "REAL_LE_NEGL" > "Groups.minus_le_self_iff"
+ "REAL_LE_NEG2" > "Groups.neg_le_iff_le"
+ "REAL_LE_NEG" > "Groups.neg_le_iff_le"
"REAL_LE_MUL2" > "HOL4Real.real.REAL_LE_MUL2"
- "REAL_LE_MUL" > "Ring_and_Field.mult_nonneg_nonneg"
+ "REAL_LE_MUL" > "Rings.mult_nonneg_nonneg"
"REAL_LE_LT" > "Orderings.order_le_less"
"REAL_LE_LNEG" > "RealDef.real_0_le_add_iff"
- "REAL_LE_LMUL_IMP" > "Ring_and_Field.pordered_comm_semiring_class.mult_mono"
+ "REAL_LE_LMUL_IMP" > "Rings.mult_mono"
"REAL_LE_LMUL" > "RealDef.real_mult_le_cancel_iff2"
- "REAL_LE_LDIV_EQ" > "Ring_and_Field.pos_divide_le_eq"
- "REAL_LE_LDIV" > "Ring_and_Field.mult_imp_div_pos_le"
- "REAL_LE_LADD_IMP" > "OrderedGroup.pordered_ab_semigroup_add_class.add_left_mono"
- "REAL_LE_LADD" > "OrderedGroup.add_le_cancel_left"
- "REAL_LE_INV_EQ" > "Ring_and_Field.inverse_nonnegative_iff_nonnegative"
+ "REAL_LE_LDIV_EQ" > "Rings.pos_divide_le_eq"
+ "REAL_LE_LDIV" > "Rings.mult_imp_div_pos_le"
+ "REAL_LE_LADD_IMP" > "Groups.add_left_mono"
+ "REAL_LE_LADD" > "Groups.add_le_cancel_left"
+ "REAL_LE_INV_EQ" > "Rings.inverse_nonnegative_iff_nonnegative"
"REAL_LE_INV" > "HOL4Real.real.REAL_LE_INV"
- "REAL_LE_DOUBLE" > "OrderedGroup.zero_le_double_add_iff_zero_le_single_add"
+ "REAL_LE_DOUBLE" > "Groups.zero_le_double_add_iff_zero_le_single_add"
"REAL_LE_DIV" > "HOL4Real.real.REAL_LE_DIV"
"REAL_LE_ANTISYM" > "Orderings.order_eq_iff"
"REAL_LE_ADDR" > "HOL4Real.real.REAL_LE_ADDR"
"REAL_LE_ADDL" > "HOL4Real.real.REAL_LE_ADDL"
- "REAL_LE_ADD2" > "OrderedGroup.add_mono"
- "REAL_LE_ADD" > "OrderedGroup.add_nonneg_nonneg"
- "REAL_LE_01" > "Ring_and_Field.zero_le_one"
+ "REAL_LE_ADD2" > "Groups.add_mono"
+ "REAL_LE_ADD" > "Groups.add_nonneg_nonneg"
+ "REAL_LE_01" > "Rings.zero_le_one"
"REAL_LET_TRANS" > "Set.basic_trans_rules_23"
"REAL_LET_TOTAL" > "Orderings.linorder_le_less_linear"
"REAL_LET_ANTISYM" > "HOL4Real.real.REAL_LET_ANTISYM"
- "REAL_LET_ADD2" > "OrderedGroup.add_le_less_mono"
- "REAL_LET_ADD" > "OrderedGroup.add_nonneg_pos"
+ "REAL_LET_ADD2" > "Groups.add_le_less_mono"
+ "REAL_LET_ADD" > "Groups.add_nonneg_pos"
"REAL_LE1_POW2" > "HOL4Real.real.REAL_LE1_POW2"
"REAL_LE" > "RealDef.real_of_nat_le_iff"
- "REAL_LDISTRIB" > "Ring_and_Field.ring_eq_simps_2"
- "REAL_INV_POS" > "Ring_and_Field.positive_imp_inverse_positive"
- "REAL_INV_NZ" > "Ring_and_Field.nonzero_imp_inverse_nonzero"
+ "REAL_LDISTRIB" > "Rings.ring_eq_simps_2"
+ "REAL_INV_POS" > "Rings.positive_imp_inverse_positive"
+ "REAL_INV_NZ" > "Rings.nonzero_imp_inverse_nonzero"
"REAL_INV_MUL" > "HOL4Real.real.REAL_INV_MUL"
"REAL_INV_LT1" > "RealDef.real_inverse_gt_one"
- "REAL_INV_INV" > "Ring_and_Field.inverse_inverse_eq"
- "REAL_INV_EQ_0" > "Ring_and_Field.inverse_nonzero_iff_nonzero"
- "REAL_INV_1OVER" > "Ring_and_Field.inverse_eq_divide"
- "REAL_INV_0" > "Ring_and_Field.division_by_zero_class.inverse_zero"
- "REAL_INVINV" > "Ring_and_Field.nonzero_inverse_inverse_eq"
- "REAL_INV1" > "Ring_and_Field.inverse_1"
+ "REAL_INV_INV" > "Rings.inverse_inverse_eq"
+ "REAL_INV_EQ_0" > "Rings.inverse_nonzero_iff_nonzero"
+ "REAL_INV_1OVER" > "Rings.inverse_eq_divide"
+ "REAL_INV_0" > "Rings.division_by_zero_class.inverse_zero"
+ "REAL_INVINV" > "Rings.nonzero_inverse_inverse_eq"
+ "REAL_INV1" > "Rings.inverse_1"
"REAL_INJ" > "RealDef.real_of_nat_inject"
"REAL_HALF_DOUBLE" > "RComplete.real_sum_of_halves"
"REAL_FACT_NZ" > "HOL4Real.real.REAL_FACT_NZ"
- "REAL_EQ_SUB_RADD" > "Ring_and_Field.ring_eq_simps_15"
- "REAL_EQ_SUB_LADD" > "Ring_and_Field.ring_eq_simps_16"
- "REAL_EQ_RMUL_IMP" > "Ring_and_Field.field_mult_cancel_right_lemma"
- "REAL_EQ_RMUL" > "Ring_and_Field.field_mult_cancel_right"
+ "REAL_EQ_SUB_RADD" > "Rings.ring_eq_simps_15"
+ "REAL_EQ_SUB_LADD" > "Rings.ring_eq_simps_16"
+ "REAL_EQ_RMUL_IMP" > "Rings.field_mult_cancel_right_lemma"
+ "REAL_EQ_RMUL" > "Rings.field_mult_cancel_right"
"REAL_EQ_RDIV_EQ" > "HOL4Real.real.REAL_EQ_RDIV_EQ"
- "REAL_EQ_RADD" > "OrderedGroup.add_right_cancel"
- "REAL_EQ_NEG" > "OrderedGroup.neg_equal_iff_equal"
- "REAL_EQ_MUL_LCANCEL" > "Ring_and_Field.field_mult_cancel_left"
+ "REAL_EQ_RADD" > "Groups.add_right_cancel"
+ "REAL_EQ_NEG" > "Groups.neg_equal_iff_equal"
+ "REAL_EQ_MUL_LCANCEL" > "Rings.field_mult_cancel_left"
"REAL_EQ_LMUL_IMP" > "HOL4Real.real.REAL_EQ_LMUL_IMP"
"REAL_EQ_LMUL2" > "RealDef.real_mult_left_cancel"
- "REAL_EQ_LMUL" > "Ring_and_Field.field_mult_cancel_left"
+ "REAL_EQ_LMUL" > "Rings.field_mult_cancel_left"
"REAL_EQ_LDIV_EQ" > "HOL4Real.real.REAL_EQ_LDIV_EQ"
- "REAL_EQ_LADD" > "OrderedGroup.add_left_cancel"
+ "REAL_EQ_LADD" > "Groups.add_left_cancel"
"REAL_EQ_IMP_LE" > "Orderings.order_eq_refl"
- "REAL_ENTIRE" > "Ring_and_Field.field_mult_eq_0_iff"
+ "REAL_ENTIRE" > "Rings.field_mult_eq_0_iff"
"REAL_DOWN2" > "RealDef.real_lbound_gt_zero"
"REAL_DOWN" > "HOL4Real.real.REAL_DOWN"
"REAL_DOUBLE" > "Int.mult_2"
"REAL_DIV_RMUL" > "HOL4Real.real.REAL_DIV_RMUL"
- "REAL_DIV_REFL" > "Ring_and_Field.divide_self"
+ "REAL_DIV_REFL" > "Rings.divide_self"
"REAL_DIV_MUL2" > "HOL4Real.real.REAL_DIV_MUL2"
- "REAL_DIV_LZERO" > "Ring_and_Field.divide_zero_left"
+ "REAL_DIV_LZERO" > "Rings.divide_zero_left"
"REAL_DIV_LMUL" > "HOL4Real.real.REAL_DIV_LMUL"
"REAL_DIFFSQ" > "HOL4Real.real.REAL_DIFFSQ"
"REAL_ARCH_LEAST" > "HOL4Real.real.REAL_ARCH_LEAST"
@@ -286,20 +286,20 @@
"REAL_ADD_SYM" > "Finite_Set.AC_add.f.AC_2"
"REAL_ADD_SUB2" > "HOL4Real.real.REAL_ADD_SUB2"
"REAL_ADD_SUB" > "HOL4Real.real.REAL_ADD_SUB"
- "REAL_ADD_RINV" > "OrderedGroup.right_minus"
+ "REAL_ADD_RINV" > "Groups.right_minus"
"REAL_ADD_RID_UNIQ" > "HOL4Real.real.REAL_ADD_RID_UNIQ"
"REAL_ADD_RID" > "Finite_Set.AC_add.f_e.ident"
- "REAL_ADD_RDISTRIB" > "Ring_and_Field.ring_eq_simps_1"
+ "REAL_ADD_RDISTRIB" > "Rings.ring_eq_simps_1"
"REAL_ADD_LINV" > "HOL4Compat.REAL_ADD_LINV"
"REAL_ADD_LID_UNIQ" > "HOL4Real.real.REAL_ADD_LID_UNIQ"
"REAL_ADD_LID" > "Finite_Set.AC_add.f_e.left_ident"
- "REAL_ADD_LDISTRIB" > "Ring_and_Field.ring_eq_simps_2"
+ "REAL_ADD_LDISTRIB" > "Rings.ring_eq_simps_2"
"REAL_ADD_ASSOC" > "HOL4Compat.REAL_ADD_ASSOC"
"REAL_ADD2_SUB2" > "HOL4Real.real.REAL_ADD2_SUB2"
"REAL_ADD" > "RealDef.real_of_nat_add"
- "REAL_ABS_TRIANGLE" > "OrderedGroup.abs_triangle_ineq"
- "REAL_ABS_POS" > "OrderedGroup.abs_ge_zero"
- "REAL_ABS_MUL" > "Ring_and_Field.abs_mult"
+ "REAL_ABS_TRIANGLE" > "Groups.abs_triangle_ineq"
+ "REAL_ABS_POS" > "Groups.abs_ge_zero"
+ "REAL_ABS_MUL" > "Rings.abs_mult"
"REAL_ABS_0" > "Int.bin_arith_simps_28"
"REAL_10" > "HOL4Compat.REAL_10"
"REAL_1" > "HOL4Real.real.REAL_1"
@@ -326,25 +326,25 @@
"POW_2" > "Nat_Numeral.power2_eq_square"
"POW_1" > "Power.power_one_right"
"POW_0" > "Power.power_0_Suc"
- "ABS_ZERO" > "OrderedGroup.abs_eq_0"
- "ABS_TRIANGLE" > "OrderedGroup.abs_triangle_ineq"
+ "ABS_ZERO" > "Groups.abs_eq_0"
+ "ABS_TRIANGLE" > "Groups.abs_triangle_ineq"
"ABS_SUM" > "HOL4Real.real.ABS_SUM"
- "ABS_SUB_ABS" > "OrderedGroup.abs_triangle_ineq3"
- "ABS_SUB" > "OrderedGroup.abs_minus_commute"
+ "ABS_SUB_ABS" > "Groups.abs_triangle_ineq3"
+ "ABS_SUB" > "Groups.abs_minus_commute"
"ABS_STILLNZ" > "HOL4Real.real.ABS_STILLNZ"
"ABS_SIGN2" > "HOL4Real.real.ABS_SIGN2"
"ABS_SIGN" > "HOL4Real.real.ABS_SIGN"
"ABS_REFL" > "HOL4Real.real.ABS_REFL"
"ABS_POW2" > "Nat_Numeral.abs_power2"
- "ABS_POS" > "OrderedGroup.abs_ge_zero"
- "ABS_NZ" > "OrderedGroup.zero_less_abs_iff"
- "ABS_NEG" > "OrderedGroup.abs_minus_cancel"
+ "ABS_POS" > "Groups.abs_ge_zero"
+ "ABS_NZ" > "Groups.zero_less_abs_iff"
+ "ABS_NEG" > "Groups.abs_minus_cancel"
"ABS_N" > "RealDef.abs_real_of_nat_cancel"
- "ABS_MUL" > "Ring_and_Field.abs_mult"
+ "ABS_MUL" > "Rings.abs_mult"
"ABS_LT_MUL2" > "HOL4Real.real.ABS_LT_MUL2"
- "ABS_LE" > "OrderedGroup.abs_ge_self"
- "ABS_INV" > "Ring_and_Field.nonzero_abs_inverse"
- "ABS_DIV" > "Ring_and_Field.nonzero_abs_divide"
+ "ABS_LE" > "Groups.abs_ge_self"
+ "ABS_INV" > "Rings.nonzero_abs_inverse"
+ "ABS_DIV" > "Rings.nonzero_abs_divide"
"ABS_CIRCLE" > "HOL4Real.real.ABS_CIRCLE"
"ABS_CASES" > "HOL4Real.real.ABS_CASES"
"ABS_BOUNDS" > "RealDef.abs_le_interval_iff"
@@ -352,7 +352,7 @@
"ABS_BETWEEN2" > "HOL4Real.real.ABS_BETWEEN2"
"ABS_BETWEEN1" > "HOL4Real.real.ABS_BETWEEN1"
"ABS_BETWEEN" > "HOL4Real.real.ABS_BETWEEN"
- "ABS_ABS" > "OrderedGroup.abs_idempotent"
+ "ABS_ABS" > "Groups.abs_idempotent"
"ABS_1" > "Int.bin_arith_simps_29"
"ABS_0" > "Int.bin_arith_simps_28"
--- a/src/HOL/Import/HOL/realax.imp Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/HOL/realax.imp Tue Feb 09 17:06:05 2010 +0100
@@ -98,10 +98,10 @@
"REAL_LT_TRANS" > "Set.basic_trans_rules_21"
"REAL_LT_TOTAL" > "HOL4Compat.REAL_LT_TOTAL"
"REAL_LT_REFL" > "Orderings.order_less_irrefl"
- "REAL_LT_MUL" > "Ring_and_Field.mult_pos_pos"
- "REAL_LT_IADD" > "OrderedGroup.add_strict_left_mono"
- "REAL_LDISTRIB" > "Ring_and_Field.ring_eq_simps_2"
- "REAL_INV_0" > "Ring_and_Field.division_by_zero_class.inverse_zero"
+ "REAL_LT_MUL" > "Rings.mult_pos_pos"
+ "REAL_LT_IADD" > "Groups.add_strict_left_mono"
+ "REAL_LDISTRIB" > "Rings.ring_eq_simps_2"
+ "REAL_INV_0" > "Rings.division_by_zero_class.inverse_zero"
"REAL_ADD_SYM" > "Finite_Set.AC_add.f.AC_2"
"REAL_ADD_LINV" > "HOL4Compat.REAL_ADD_LINV"
"REAL_ADD_LID" > "Finite_Set.AC_add.f_e.left_ident"
--- a/src/HOL/Import/hol4rews.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/hol4rews.ML Tue Feb 09 17:06:05 2010 +0100
@@ -371,7 +371,7 @@
fun process ((bthy,bthm), hth as (_,thm)) thy =
let
val thm1 = rewrite_rule (map (Thm.transfer thy) rews) (Thm.transfer thy thm);
- val thm2 = Drule.standard thm1;
+ val thm2 = Drule.export_without_context thm1;
in
thy
|> PureThy.store_thm (Binding.name bthm, thm2)
--- a/src/HOL/Import/shuffler.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Import/shuffler.ML Tue Feb 09 17:06:05 2010 +0100
@@ -100,7 +100,7 @@
val th4 = implies_elim_list (assume cPPQ) [th3,th3]
|> implies_intr_list [cPPQ,cP]
in
- equal_intr th4 th1 |> Drule.standard
+ equal_intr th4 th1 |> Drule.export_without_context
end
val imp_comm =
@@ -120,7 +120,7 @@
val th2 = implies_elim_list (assume cQPR) [assume cQ,assume cP]
|> implies_intr_list [cQPR,cP,cQ]
in
- equal_intr th1 th2 |> Drule.standard
+ equal_intr th1 th2 |> Drule.export_without_context
end
val def_norm =
@@ -147,7 +147,7 @@
|> forall_intr cv
|> implies_intr cPQ
in
- equal_intr th1 th2 |> Drule.standard
+ equal_intr th1 th2 |> Drule.export_without_context
end
val all_comm =
@@ -173,7 +173,7 @@
|> forall_intr_list [cy,cx]
|> implies_intr cl
in
- equal_intr th1 th2 |> Drule.standard
+ equal_intr th1 th2 |> Drule.export_without_context
end
val equiv_comm =
@@ -187,7 +187,7 @@
val th1 = assume ctu |> symmetric |> implies_intr ctu
val th2 = assume cut |> symmetric |> implies_intr cut
in
- equal_intr th1 th2 |> Drule.standard
+ equal_intr th1 th2 |> Drule.export_without_context
end
(* This simplification procedure rewrites !!x y. P x y
--- a/src/HOL/Int.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Int.thy Tue Feb 09 17:06:05 2010 +0100
@@ -208,7 +208,7 @@
end
-instance int :: pordered_cancel_ab_semigroup_add
+instance int :: ordered_cancel_ab_semigroup_add
proof
fix i j k :: int
show "i \<le> j \<Longrightarrow> k + i \<le> k + j"
@@ -245,7 +245,7 @@
done
text{*The integers form an ordered integral domain*}
-instance int :: ordered_idom
+instance int :: linordered_idom
proof
fix i j k :: int
show "i < j \<Longrightarrow> 0 < k \<Longrightarrow> k * i < k * j"
@@ -256,13 +256,6 @@
by (simp only: zsgn_def)
qed
-instance int :: lordered_ring
-proof
- fix k :: int
- show "abs k = sup k (- k)"
- by (auto simp add: sup_int_def zabs_def less_minus_self_iff [symmetric])
-qed
-
lemma zless_imp_add1_zle: "w < z \<Longrightarrow> w + (1\<Colon>int) \<le> z"
apply (cases w, cases z)
apply (simp add: less le add One_int_def)
@@ -314,7 +307,7 @@
by (cases z, simp add: algebra_simps of_int minus)
lemma of_int_diff [simp]: "of_int (w - z) = of_int w - of_int z"
-by (simp add: OrderedGroup.diff_minus diff_minus)
+by (simp add: diff_minus Groups.diff_minus)
lemma of_int_mult [simp]: "of_int (w*z) = of_int w * of_int z"
apply (cases w, cases z)
@@ -331,7 +324,7 @@
end
-context ordered_idom
+context linordered_idom
begin
lemma of_int_le_iff [simp]:
@@ -370,8 +363,8 @@
end
-text{*Every @{text ordered_idom} has characteristic zero.*}
-subclass (in ordered_idom) ring_char_0 by intro_locales
+text{*Every @{text linordered_idom} has characteristic zero.*}
+subclass (in linordered_idom) ring_char_0 by intro_locales
lemma of_int_eq_id [simp]: "of_int = id"
proof
@@ -526,10 +519,10 @@
text{*This version is proved for all ordered rings, not just integers!
It is proved here because attribute @{text arith_split} is not available
- in theory @{text Ring_and_Field}.
+ in theory @{text Rings}.
But is it really better than just rewriting with @{text abs_if}?*}
lemma abs_split [arith_split,noatp]:
- "P(abs(a::'a::ordered_idom)) = ((0 \<le> a --> P a) & (a < 0 --> P(-a)))"
+ "P(abs(a::'a::linordered_idom)) = ((0 \<le> a --> P a) & (a < 0 --> P(-a)))"
by (force dest: order_less_le_trans simp add: abs_if linorder_not_less)
lemma negD: "(x \<Colon> int) < 0 \<Longrightarrow> \<exists>n. x = - (of_nat (Suc n))"
@@ -804,7 +797,7 @@
text {* Preliminaries *}
lemma even_less_0_iff:
- "a + a < 0 \<longleftrightarrow> a < (0::'a::ordered_idom)"
+ "a + a < 0 \<longleftrightarrow> a < (0::'a::linordered_idom)"
proof -
have "a + a < 0 \<longleftrightarrow> (1+1)*a < 0" by (simp add: left_distrib)
also have "(1+1)*a < 0 \<longleftrightarrow> a < 0"
@@ -1147,7 +1140,7 @@
subsubsection {* The Less-Than Relation *}
lemma double_less_0_iff:
- "(a + a < 0) = (a < (0::'a::ordered_idom))"
+ "(a + a < 0) = (a < (0::'a::linordered_idom))"
proof -
have "(a + a < 0) = ((1+1)*a < 0)" by (simp add: left_distrib)
also have "... = (a < 0)"
@@ -1180,7 +1173,7 @@
text {* Absolute value (@{term abs}) *}
lemma abs_number_of:
- "abs(number_of x::'a::{ordered_idom,number_ring}) =
+ "abs(number_of x::'a::{linordered_idom,number_ring}) =
(if number_of x < (0::'a) then -number_of x else number_of x)"
by (simp add: abs_if)
@@ -1214,11 +1207,11 @@
text {* Simplification of relational operations *}
lemma less_number_of [simp]:
- "(number_of x::'a::{ordered_idom,number_ring}) < number_of y \<longleftrightarrow> x < y"
+ "(number_of x::'a::{linordered_idom,number_ring}) < number_of y \<longleftrightarrow> x < y"
unfolding number_of_eq by (rule of_int_less_iff)
lemma le_number_of [simp]:
- "(number_of x::'a::{ordered_idom,number_ring}) \<le> number_of y \<longleftrightarrow> x \<le> y"
+ "(number_of x::'a::{linordered_idom,number_ring}) \<le> number_of y \<longleftrightarrow> x \<le> y"
unfolding number_of_eq by (rule of_int_le_iff)
lemma eq_number_of [simp]:
@@ -1362,7 +1355,7 @@
lemma Ints_odd_less_0:
assumes in_Ints: "a \<in> Ints"
- shows "(1 + a + a < 0) = (a < (0::'a::ordered_idom))"
+ shows "(1 + a + a < 0) = (a < (0::'a::linordered_idom))"
proof -
from in_Ints have "a \<in> range of_int" unfolding Ints_def [symmetric] .
then obtain z where a: "a = of_int z" ..
@@ -1519,11 +1512,11 @@
finally show ?thesis .
qed
-lemma abs_minus_one [simp]: "abs (-1) = (1::'a::{ordered_idom,number_ring})"
+lemma abs_minus_one [simp]: "abs (-1) = (1::'a::{linordered_idom,number_ring})"
by (simp add: abs_if)
lemma abs_power_minus_one [simp]:
- "abs(-1 ^ n) = (1::'a::{ordered_idom,number_ring})"
+ "abs(-1 ^ n) = (1::'a::{linordered_idom,number_ring})"
by (simp add: power_abs)
lemma of_int_number_of_eq [simp]:
@@ -1906,12 +1899,12 @@
text{*To Simplify Inequalities Where One Side is the Constant 1*}
lemma less_minus_iff_1 [simp,noatp]:
- fixes b::"'b::{ordered_idom,number_ring}"
+ fixes b::"'b::{linordered_idom,number_ring}"
shows "(1 < - b) = (b < -1)"
by auto
lemma le_minus_iff_1 [simp,noatp]:
- fixes b::"'b::{ordered_idom,number_ring}"
+ fixes b::"'b::{linordered_idom,number_ring}"
shows "(1 \<le> - b) = (b \<le> -1)"
by auto
@@ -1921,12 +1914,12 @@
by (subst equation_minus_iff, auto)
lemma minus_less_iff_1 [simp,noatp]:
- fixes a::"'b::{ordered_idom,number_ring}"
+ fixes a::"'b::{linordered_idom,number_ring}"
shows "(- a < 1) = (-1 < a)"
by auto
lemma minus_le_iff_1 [simp,noatp]:
- fixes a::"'b::{ordered_idom,number_ring}"
+ fixes a::"'b::{linordered_idom,number_ring}"
shows "(- a \<le> 1) = (-1 \<le> a)"
by auto
@@ -1990,7 +1983,7 @@
by (simp add: divide_inverse inverse_minus_eq)
lemma half_gt_zero_iff:
- "(0 < r/2) = (0 < (r::'a::{ordered_field,division_by_zero,number_ring}))"
+ "(0 < r/2) = (0 < (r::'a::{linordered_field,division_by_zero,number_ring}))"
by auto
lemmas half_gt_zero [simp] = half_gt_zero_iff [THEN iffD2, standard]
@@ -2324,9 +2317,9 @@
lemmas zadd_assoc = add_assoc [of "z1::int" "z2" "z3", standard]
lemmas zadd_left_commute = add_left_commute [of "x::int" "y" "z", standard]
lemmas zadd_ac = zadd_assoc zadd_commute zadd_left_commute
-lemmas zmult_ac = OrderedGroup.mult_ac
-lemmas zadd_0 = OrderedGroup.add_0_left [of "z::int", standard]
-lemmas zadd_0_right = OrderedGroup.add_0_left [of "z::int", standard]
+lemmas zmult_ac = mult_ac
+lemmas zadd_0 = add_0_left [of "z::int", standard]
+lemmas zadd_0_right = add_0_right [of "z::int", standard]
lemmas zadd_zminus_inverse2 = left_minus [of "z::int", standard]
lemmas zmult_zminus = mult_minus_left [of "z::int" "w", standard]
lemmas zmult_commute = mult_commute [of "z::int" "w", standard]
--- a/src/HOL/IsaMakefile Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/IsaMakefile Tue Feb 09 17:06:05 2010 +0100
@@ -145,15 +145,16 @@
Complete_Lattice.thy \
Datatype.thy \
Extraction.thy \
+ Fields.thy \
Finite_Set.thy \
Fun.thy \
FunDef.thy \
+ Groups.thy \
Inductive.thy \
Lattices.thy \
Nat.thy \
Nitpick.thy \
Option.thy \
- OrderedGroup.thy \
Orderings.thy \
Plain.thy \
Power.thy \
@@ -162,7 +163,7 @@
Record.thy \
Refute.thy \
Relation.thy \
- Ring_and_Field.thy \
+ Rings.thy \
SAT.thy \
Set.thy \
Sum_Type.thy \
@@ -385,6 +386,7 @@
Library/Permutations.thy Library/Bit.thy Library/FrechetDeriv.thy \
Library/Fraction_Field.thy Library/Fundamental_Theorem_Algebra.thy \
Library/Inner_Product.thy Library/Kleene_Algebra.thy \
+ Library/Lattice_Algebras.thy \
Library/Lattice_Syntax.thy Library/Library.thy \
Library/List_Prefix.thy Library/List_Set.thy Library/State_Monad.thy \
Library/Nat_Int_Bij.thy Library/Multiset.thy Library/Permutation.thy \
@@ -395,7 +397,7 @@
Library/Library/document/root.tex Library/Library/document/root.bib \
Library/Transitive_Closure_Table.thy Library/While_Combinator.thy \
Library/Product_ord.thy Library/Char_nat.thy \
- Library/Char_ord.thy Library/Option_ord.thy \
+ Library/Structure_Syntax.thy \
Library/Sublist_Order.thy Library/List_lexord.thy \
Library/Coinductive_List.thy Library/AssocList.thy \
Library/Formal_Power_Series.thy Library/Binomial.thy \
@@ -787,18 +789,21 @@
Decision_Procs/Approximation.thy \
Decision_Procs/Commutative_Ring.thy \
Decision_Procs/Commutative_Ring_Complete.thy \
- Decision_Procs/commutative_ring_tac.ML \
Decision_Procs/Cooper.thy \
- Decision_Procs/cooper_tac.ML \
+ Decision_Procs/Decision_Procs.thy \
Decision_Procs/Dense_Linear_Order.thy \
Decision_Procs/Ferrack.thy \
- Decision_Procs/ferrack_tac.ML \
Decision_Procs/MIR.thy \
- Decision_Procs/mir_tac.ML \
- Decision_Procs/Decision_Procs.thy \
- Decision_Procs/ex/Dense_Linear_Order_Ex.thy \
+ Decision_Procs/Parametric_Ferrante_Rackoff.thy \
+ Decision_Procs/Polynomial_List.thy \
+ Decision_Procs/Reflected_Multivariate_Polynomial.thy \
+ Decision_Procs/commutative_ring_tac.ML \
+ Decision_Procs/cooper_tac.ML \
Decision_Procs/ex/Approximation_Ex.thy \
Decision_Procs/ex/Commutative_Ring_Ex.thy \
+ Decision_Procs/ex/Dense_Linear_Order_Ex.thy \
+ Decision_Procs/ferrack_tac.ML \
+ Decision_Procs/mir_tac.ML \
Decision_Procs/ROOT.ML
@$(ISABELLE_TOOL) usedir $(OUT)/HOL Decision_Procs
--- a/src/HOL/Isar_Examples/Hoare.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Isar_Examples/Hoare.thy Tue Feb 09 17:06:05 2010 +0100
@@ -228,11 +228,11 @@
"_Assert" :: "'a => 'a set" ("(\<lbrace>_\<rbrace>)" [0] 1000)
translations
- ".{b}." => "Collect .(b)."
+ ".{b}." => "CONST Collect .(b)."
"B [a/\<acute>x]" => ".{\<acute>(_update_name x (\<lambda>_. a)) \<in> B}."
- "\<acute>x := a" => "Basic .(\<acute>(_update_name x (\<lambda>_. a)))."
- "IF b THEN c1 ELSE c2 FI" => "Cond .{b}. c1 c2"
- "WHILE b INV i DO c OD" => "While .{b}. i c"
+ "\<acute>x := a" => "CONST Basic .(\<acute>(_update_name x (\<lambda>_. a)))."
+ "IF b THEN c1 ELSE c2 FI" => "CONST Cond .{b}. c1 c2"
+ "WHILE b INV i DO c OD" => "CONST While .{b}. i c"
"WHILE b DO c OD" == "WHILE b INV CONST undefined DO c OD"
parse_translation {*
--- a/src/HOL/Lattices.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Lattices.thy Tue Feb 09 17:06:05 2010 +0100
@@ -16,13 +16,13 @@
top ("\<top>") and
bot ("\<bottom>")
-class lower_semilattice = order +
+class semilattice_inf = order +
fixes inf :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<sqinter>" 70)
assumes inf_le1 [simp]: "x \<sqinter> y \<sqsubseteq> x"
and inf_le2 [simp]: "x \<sqinter> y \<sqsubseteq> y"
and inf_greatest: "x \<sqsubseteq> y \<Longrightarrow> x \<sqsubseteq> z \<Longrightarrow> x \<sqsubseteq> y \<sqinter> z"
-class upper_semilattice = order +
+class semilattice_sup = order +
fixes sup :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<squnion>" 65)
assumes sup_ge1 [simp]: "x \<sqsubseteq> x \<squnion> y"
and sup_ge2 [simp]: "y \<sqsubseteq> x \<squnion> y"
@@ -32,18 +32,18 @@
text {* Dual lattice *}
lemma dual_semilattice:
- "lower_semilattice (op \<ge>) (op >) sup"
-by (rule lower_semilattice.intro, rule dual_order)
+ "semilattice_inf (op \<ge>) (op >) sup"
+by (rule semilattice_inf.intro, rule dual_order)
(unfold_locales, simp_all add: sup_least)
end
-class lattice = lower_semilattice + upper_semilattice
+class lattice = semilattice_inf + semilattice_sup
subsubsection {* Intro and elim rules*}
-context lower_semilattice
+context semilattice_inf
begin
lemma le_infI1:
@@ -69,13 +69,13 @@
by (auto intro: le_infI1 antisym dest: eq_iff [THEN iffD1])
lemma mono_inf:
- fixes f :: "'a \<Rightarrow> 'b\<Colon>lower_semilattice"
+ fixes f :: "'a \<Rightarrow> 'b\<Colon>semilattice_inf"
shows "mono f \<Longrightarrow> f (A \<sqinter> B) \<sqsubseteq> f A \<sqinter> f B"
by (auto simp add: mono_def intro: Lattices.inf_greatest)
end
-context upper_semilattice
+context semilattice_sup
begin
lemma le_supI1:
@@ -103,7 +103,7 @@
by (auto intro: le_supI2 antisym dest: eq_iff [THEN iffD1])
lemma mono_sup:
- fixes f :: "'a \<Rightarrow> 'b\<Colon>upper_semilattice"
+ fixes f :: "'a \<Rightarrow> 'b\<Colon>semilattice_sup"
shows "mono f \<Longrightarrow> f A \<squnion> f B \<sqsubseteq> f (A \<squnion> B)"
by (auto simp add: mono_def intro: Lattices.sup_least)
@@ -112,7 +112,7 @@
subsubsection {* Equational laws *}
-sublocale lower_semilattice < inf!: semilattice inf
+sublocale semilattice_inf < inf!: semilattice inf
proof
fix a b c
show "(a \<sqinter> b) \<sqinter> c = a \<sqinter> (b \<sqinter> c)"
@@ -123,7 +123,7 @@
by (rule antisym) auto
qed
-context lower_semilattice
+context semilattice_inf
begin
lemma inf_assoc: "(x \<sqinter> y) \<sqinter> z = x \<sqinter> (y \<sqinter> z)"
@@ -151,7 +151,7 @@
end
-sublocale upper_semilattice < sup!: semilattice sup
+sublocale semilattice_sup < sup!: semilattice sup
proof
fix a b c
show "(a \<squnion> b) \<squnion> c = a \<squnion> (b \<squnion> c)"
@@ -162,7 +162,7 @@
by (rule antisym) auto
qed
-context upper_semilattice
+context semilattice_sup
begin
lemma sup_assoc: "(x \<squnion> y) \<squnion> z = x \<squnion> (y \<squnion> z)"
@@ -195,7 +195,7 @@
lemma dual_lattice:
"lattice (op \<ge>) (op >) sup inf"
- by (rule lattice.intro, rule dual_semilattice, rule upper_semilattice.intro, rule dual_order)
+ by (rule lattice.intro, rule dual_semilattice, rule semilattice_sup.intro, rule dual_order)
(unfold_locales, auto)
lemma inf_sup_absorb: "x \<sqinter> (x \<squnion> y) = x"
@@ -246,7 +246,7 @@
subsubsection {* Strict order *}
-context lower_semilattice
+context semilattice_inf
begin
lemma less_infI1:
@@ -259,13 +259,13 @@
end
-context upper_semilattice
+context semilattice_sup
begin
lemma less_supI1:
"x \<sqsubset> a \<Longrightarrow> x \<sqsubset> a \<squnion> b"
proof -
- interpret dual: lower_semilattice "op \<ge>" "op >" sup
+ interpret dual: semilattice_inf "op \<ge>" "op >" sup
by (fact dual_semilattice)
assume "x \<sqsubset> a"
then show "x \<sqsubset> a \<squnion> b"
@@ -275,7 +275,7 @@
lemma less_supI2:
"x \<sqsubset> b \<Longrightarrow> x \<sqsubset> a \<squnion> b"
proof -
- interpret dual: lower_semilattice "op \<ge>" "op >" sup
+ interpret dual: semilattice_inf "op \<ge>" "op >" sup
by (fact dual_semilattice)
assume "x \<sqsubset> b"
then show "x \<sqsubset> a \<squnion> b"
@@ -492,7 +492,7 @@
subsection {* Uniqueness of inf and sup *}
-lemma (in lower_semilattice) inf_unique:
+lemma (in semilattice_inf) inf_unique:
fixes f (infixl "\<triangle>" 70)
assumes le1: "\<And>x y. x \<triangle> y \<sqsubseteq> x" and le2: "\<And>x y. x \<triangle> y \<sqsubseteq> y"
and greatest: "\<And>x y z. x \<sqsubseteq> y \<Longrightarrow> x \<sqsubseteq> z \<Longrightarrow> x \<sqsubseteq> y \<triangle> z"
@@ -504,7 +504,7 @@
show "x \<sqinter> y \<sqsubseteq> x \<triangle> y" by (rule leI) simp_all
qed
-lemma (in upper_semilattice) sup_unique:
+lemma (in semilattice_sup) sup_unique:
fixes f (infixl "\<nabla>" 70)
assumes ge1 [simp]: "\<And>x y. x \<sqsubseteq> x \<nabla> y" and ge2: "\<And>x y. y \<sqsubseteq> x \<nabla> y"
and least: "\<And>x y z. y \<sqsubseteq> x \<Longrightarrow> z \<sqsubseteq> x \<Longrightarrow> y \<nabla> z \<sqsubseteq> x"
@@ -527,10 +527,10 @@
by (auto simp add: min_def max_def)
qed (auto simp add: min_def max_def not_le less_imp_le)
-lemma inf_min: "inf = (min \<Colon> 'a\<Colon>{lower_semilattice, linorder} \<Rightarrow> 'a \<Rightarrow> 'a)"
+lemma inf_min: "inf = (min \<Colon> 'a\<Colon>{semilattice_inf, linorder} \<Rightarrow> 'a \<Rightarrow> 'a)"
by (rule ext)+ (auto intro: antisym)
-lemma sup_max: "sup = (max \<Colon> 'a\<Colon>{upper_semilattice, linorder} \<Rightarrow> 'a \<Rightarrow> 'a)"
+lemma sup_max: "sup = (max \<Colon> 'a\<Colon>{semilattice_sup, linorder} \<Rightarrow> 'a \<Rightarrow> 'a)"
by (rule ext)+ (auto intro: antisym)
lemmas le_maxI1 = min_max.sup_ge1
--- a/src/HOL/Library/Abstract_Rat.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Abstract_Rat.thy Tue Feb 09 17:06:05 2010 +0100
@@ -332,7 +332,7 @@
lemma Ndiv[simp]: "INum (x \<div>\<^sub>N y) = INum x / (INum y ::'a :: {ring_char_0, division_by_zero,field})" by (simp add: Ndiv_def)
lemma Nlt0_iff[simp]: assumes nx: "isnormNum x"
- shows "((INum x :: 'a :: {ring_char_0,division_by_zero,ordered_field})< 0) = 0>\<^sub>N x "
+ shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field})< 0) = 0>\<^sub>N x "
proof-
have " \<exists> a b. x = (a,b)" by simp
then obtain a b where x[simp]:"x = (a,b)" by blast
@@ -345,7 +345,7 @@
qed
lemma Nle0_iff[simp]:assumes nx: "isnormNum x"
- shows "((INum x :: 'a :: {ring_char_0,division_by_zero,ordered_field}) \<le> 0) = 0\<ge>\<^sub>N x"
+ shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field}) \<le> 0) = 0\<ge>\<^sub>N x"
proof-
have " \<exists> a b. x = (a,b)" by simp
then obtain a b where x[simp]:"x = (a,b)" by blast
@@ -357,7 +357,7 @@
ultimately show ?thesis by blast
qed
-lemma Ngt0_iff[simp]:assumes nx: "isnormNum x" shows "((INum x :: 'a :: {ring_char_0,division_by_zero,ordered_field})> 0) = 0<\<^sub>N x"
+lemma Ngt0_iff[simp]:assumes nx: "isnormNum x" shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field})> 0) = 0<\<^sub>N x"
proof-
have " \<exists> a b. x = (a,b)" by simp
then obtain a b where x[simp]:"x = (a,b)" by blast
@@ -369,7 +369,7 @@
ultimately show ?thesis by blast
qed
lemma Nge0_iff[simp]:assumes nx: "isnormNum x"
- shows "((INum x :: 'a :: {ring_char_0,division_by_zero,ordered_field}) \<ge> 0) = 0\<le>\<^sub>N x"
+ shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field}) \<ge> 0) = 0\<le>\<^sub>N x"
proof-
have " \<exists> a b. x = (a,b)" by simp
then obtain a b where x[simp]:"x = (a,b)" by blast
@@ -382,7 +382,7 @@
qed
lemma Nlt_iff[simp]: assumes nx: "isnormNum x" and ny: "isnormNum y"
- shows "((INum x :: 'a :: {ring_char_0,division_by_zero,ordered_field}) < INum y) = (x <\<^sub>N y)"
+ shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field}) < INum y) = (x <\<^sub>N y)"
proof-
let ?z = "0::'a"
have "((INum x ::'a) < INum y) = (INum (x -\<^sub>N y) < ?z)" using nx ny by simp
@@ -391,7 +391,7 @@
qed
lemma Nle_iff[simp]: assumes nx: "isnormNum x" and ny: "isnormNum y"
- shows "((INum x :: 'a :: {ring_char_0,division_by_zero,ordered_field})\<le> INum y) = (x \<le>\<^sub>N y)"
+ shows "((INum x :: 'a :: {ring_char_0,division_by_zero,linordered_field})\<le> INum y) = (x \<le>\<^sub>N y)"
proof-
have "((INum x ::'a) \<le> INum y) = (INum (x -\<^sub>N y) \<le> (0::'a))" using nx ny by simp
also have "\<dots> = (0\<ge>\<^sub>N (x -\<^sub>N y))" using Nle0_iff[OF Nsub_normN[OF ny]] by simp
--- a/src/HOL/Library/BigO.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/BigO.thy Tue Feb 09 17:06:05 2010 +0100
@@ -38,11 +38,11 @@
subsection {* Definitions *}
definition
- bigo :: "('a => 'b::ordered_idom) => ('a => 'b) set" ("(1O'(_'))") where
+ bigo :: "('a => 'b::linordered_idom) => ('a => 'b) set" ("(1O'(_'))") where
"O(f::('a => 'b)) =
{h. EX c. ALL x. abs (h x) <= c * abs (f x)}"
-lemma bigo_pos_const: "(EX (c::'a::ordered_idom).
+lemma bigo_pos_const: "(EX (c::'a::linordered_idom).
ALL x. (abs (h x)) <= (c * (abs (f x))))
= (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
apply auto
@@ -352,7 +352,7 @@
done
lemma bigo_mult5: "ALL x. f x ~= 0 ==>
- O(f * g) <= (f::'a => ('b::ordered_field)) *o O(g)"
+ O(f * g) <= (f::'a => ('b::linordered_field)) *o O(g)"
proof -
assume "ALL x. f x ~= 0"
show "O(f * g) <= f *o O(g)"
@@ -381,14 +381,14 @@
qed
lemma bigo_mult6: "ALL x. f x ~= 0 ==>
- O(f * g) = (f::'a => ('b::ordered_field)) *o O(g)"
+ O(f * g) = (f::'a => ('b::linordered_field)) *o O(g)"
apply (rule equalityI)
apply (erule bigo_mult5)
apply (rule bigo_mult2)
done
lemma bigo_mult7: "ALL x. f x ~= 0 ==>
- O(f * g) <= O(f::'a => ('b::ordered_field)) \<otimes> O(g)"
+ O(f * g) <= O(f::'a => ('b::linordered_field)) \<otimes> O(g)"
apply (subst bigo_mult6)
apply assumption
apply (rule set_times_mono3)
@@ -396,7 +396,7 @@
done
lemma bigo_mult8: "ALL x. f x ~= 0 ==>
- O(f * g) = O(f::'a => ('b::ordered_field)) \<otimes> O(g)"
+ O(f * g) = O(f::'a => ('b::linordered_field)) \<otimes> O(g)"
apply (rule equalityI)
apply (erule bigo_mult7)
apply (rule bigo_mult)
@@ -481,16 +481,16 @@
apply (rule bigo_const1)
done
-lemma bigo_const3: "(c::'a::ordered_field) ~= 0 ==> (%x. 1) : O(%x. c)"
+lemma bigo_const3: "(c::'a::linordered_field) ~= 0 ==> (%x. 1) : O(%x. c)"
apply (simp add: bigo_def)
apply (rule_tac x = "abs(inverse c)" in exI)
apply (simp add: abs_mult [symmetric])
done
-lemma bigo_const4: "(c::'a::ordered_field) ~= 0 ==> O(%x. 1) <= O(%x. c)"
+lemma bigo_const4: "(c::'a::linordered_field) ~= 0 ==> O(%x. 1) <= O(%x. c)"
by (rule bigo_elt_subset, rule bigo_const3, assumption)
-lemma bigo_const [simp]: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const [simp]: "(c::'a::linordered_field) ~= 0 ==>
O(%x. c) = O(%x. 1)"
by (rule equalityI, rule bigo_const2, rule bigo_const4, assumption)
@@ -503,21 +503,21 @@
lemma bigo_const_mult2: "O(%x. c * f x) <= O(f)"
by (rule bigo_elt_subset, rule bigo_const_mult1)
-lemma bigo_const_mult3: "(c::'a::ordered_field) ~= 0 ==> f : O(%x. c * f x)"
+lemma bigo_const_mult3: "(c::'a::linordered_field) ~= 0 ==> f : O(%x. c * f x)"
apply (simp add: bigo_def)
apply (rule_tac x = "abs(inverse c)" in exI)
apply (simp add: abs_mult [symmetric] mult_assoc [symmetric])
done
-lemma bigo_const_mult4: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const_mult4: "(c::'a::linordered_field) ~= 0 ==>
O(f) <= O(%x. c * f x)"
by (rule bigo_elt_subset, rule bigo_const_mult3, assumption)
-lemma bigo_const_mult [simp]: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const_mult [simp]: "(c::'a::linordered_field) ~= 0 ==>
O(%x. c * f x) = O(f)"
by (rule equalityI, rule bigo_const_mult2, erule bigo_const_mult4)
-lemma bigo_const_mult5 [simp]: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const_mult5 [simp]: "(c::'a::linordered_field) ~= 0 ==>
(%x. c) *o O(f) = O(f)"
apply (auto del: subsetI)
apply (rule order_trans)
@@ -688,7 +688,7 @@
apply assumption+
done
-lemma bigo_useful_const_mult: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_useful_const_mult: "(c::'a::linordered_field) ~= 0 ==>
(%x. c) * f =o O(h) ==> f =o O(h)"
apply (rule subsetD)
apply (subgoal_tac "(%x. 1 / c) *o O(h) <= O(h)")
@@ -733,7 +733,7 @@
subsection {* Less than or equal to *}
definition
- lesso :: "('a => 'b::ordered_idom) => ('a => 'b) => ('a => 'b)"
+ lesso :: "('a => 'b::linordered_idom) => ('a => 'b) => ('a => 'b)"
(infixl "<o" 70) where
"f <o g = (%x. max (f x - g x) 0)"
@@ -833,7 +833,7 @@
apply (simp add: algebra_simps)
done
-lemma bigo_lesso4: "f <o g =o O(k::'a=>'b::ordered_field) ==>
+lemma bigo_lesso4: "f <o g =o O(k::'a=>'b::linordered_field) ==>
g =o h +o O(k) ==> f <o h =o O(k)"
apply (unfold lesso_def)
apply (drule set_plus_imp_minus)
--- a/src/HOL/Library/Float.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Float.thy Tue Feb 09 17:06:05 2010 +0100
@@ -6,7 +6,7 @@
header {* Floating-Point Numbers *}
theory Float
-imports Complex_Main
+imports Complex_Main Lattice_Algebras
begin
definition
--- a/src/HOL/Library/Infinite_Set.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Infinite_Set.thy Tue Feb 09 17:06:05 2010 +0100
@@ -192,10 +192,11 @@
by (auto simp add: infinite_nat_iff_unbounded)
qed
-lemma nat_infinite [simp]: "infinite (UNIV :: nat set)"
+(* duplicates Finite_Set.infinite_UNIV_nat *)
+lemma nat_infinite: "infinite (UNIV :: nat set)"
by (auto simp add: infinite_nat_iff_unbounded)
-lemma nat_not_finite [elim]: "finite (UNIV::nat set) \<Longrightarrow> R"
+lemma nat_not_finite: "finite (UNIV::nat set) \<Longrightarrow> R"
by simp
text {*
--- a/src/HOL/Library/Kleene_Algebra.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Kleene_Algebra.thy Tue Feb 09 17:06:05 2010 +0100
@@ -72,7 +72,7 @@
class pre_kleene = semiring_1 + order_by_add
begin
-subclass pordered_semiring proof
+subclass ordered_semiring proof
fix x y z :: 'a
assume "x \<le> y"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Library/Lattice_Algebras.thy Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,557 @@
+(* Author: Steven Obua, TU Muenchen *)
+
+header {* Various algebraic structures combined with a lattice *}
+
+theory Lattice_Algebras
+imports Complex_Main
+begin
+
+class semilattice_inf_ab_group_add = ordered_ab_group_add + semilattice_inf
+begin
+
+lemma add_inf_distrib_left:
+ "a + inf b c = inf (a + b) (a + c)"
+apply (rule antisym)
+apply (simp_all add: le_infI)
+apply (rule add_le_imp_le_left [of "uminus a"])
+apply (simp only: add_assoc [symmetric], simp)
+apply rule
+apply (rule add_le_imp_le_left[of "a"], simp only: add_assoc[symmetric], simp)+
+done
+
+lemma add_inf_distrib_right:
+ "inf a b + c = inf (a + c) (b + c)"
+proof -
+ have "c + inf a b = inf (c+a) (c+b)" by (simp add: add_inf_distrib_left)
+ thus ?thesis by (simp add: add_commute)
+qed
+
+end
+
+class semilattice_sup_ab_group_add = ordered_ab_group_add + semilattice_sup
+begin
+
+lemma add_sup_distrib_left:
+ "a + sup b c = sup (a + b) (a + c)"
+apply (rule antisym)
+apply (rule add_le_imp_le_left [of "uminus a"])
+apply (simp only: add_assoc[symmetric], simp)
+apply rule
+apply (rule add_le_imp_le_left [of "a"], simp only: add_assoc[symmetric], simp)+
+apply (rule le_supI)
+apply (simp_all)
+done
+
+lemma add_sup_distrib_right:
+ "sup a b + c = sup (a+c) (b+c)"
+proof -
+ have "c + sup a b = sup (c+a) (c+b)" by (simp add: add_sup_distrib_left)
+ thus ?thesis by (simp add: add_commute)
+qed
+
+end
+
+class lattice_ab_group_add = ordered_ab_group_add + lattice
+begin
+
+subclass semilattice_inf_ab_group_add ..
+subclass semilattice_sup_ab_group_add ..
+
+lemmas add_sup_inf_distribs = add_inf_distrib_right add_inf_distrib_left add_sup_distrib_right add_sup_distrib_left
+
+lemma inf_eq_neg_sup: "inf a b = - sup (-a) (-b)"
+proof (rule inf_unique)
+ fix a b :: 'a
+ show "- sup (-a) (-b) \<le> a"
+ by (rule add_le_imp_le_right [of _ "sup (uminus a) (uminus b)"])
+ (simp, simp add: add_sup_distrib_left)
+next
+ fix a b :: 'a
+ show "- sup (-a) (-b) \<le> b"
+ by (rule add_le_imp_le_right [of _ "sup (uminus a) (uminus b)"])
+ (simp, simp add: add_sup_distrib_left)
+next
+ fix a b c :: 'a
+ assume "a \<le> b" "a \<le> c"
+ then show "a \<le> - sup (-b) (-c)" by (subst neg_le_iff_le [symmetric])
+ (simp add: le_supI)
+qed
+
+lemma sup_eq_neg_inf: "sup a b = - inf (-a) (-b)"
+proof (rule sup_unique)
+ fix a b :: 'a
+ show "a \<le> - inf (-a) (-b)"
+ by (rule add_le_imp_le_right [of _ "inf (uminus a) (uminus b)"])
+ (simp, simp add: add_inf_distrib_left)
+next
+ fix a b :: 'a
+ show "b \<le> - inf (-a) (-b)"
+ by (rule add_le_imp_le_right [of _ "inf (uminus a) (uminus b)"])
+ (simp, simp add: add_inf_distrib_left)
+next
+ fix a b c :: 'a
+ assume "a \<le> c" "b \<le> c"
+ then show "- inf (-a) (-b) \<le> c" by (subst neg_le_iff_le [symmetric])
+ (simp add: le_infI)
+qed
+
+lemma neg_inf_eq_sup: "- inf a b = sup (-a) (-b)"
+by (simp add: inf_eq_neg_sup)
+
+lemma neg_sup_eq_inf: "- sup a b = inf (-a) (-b)"
+by (simp add: sup_eq_neg_inf)
+
+lemma add_eq_inf_sup: "a + b = sup a b + inf a b"
+proof -
+ have "0 = - inf 0 (a-b) + inf (a-b) 0" by (simp add: inf_commute)
+ hence "0 = sup 0 (b-a) + inf (a-b) 0" by (simp add: inf_eq_neg_sup)
+ hence "0 = (-a + sup a b) + (inf a b + (-b))"
+ by (simp add: add_sup_distrib_left add_inf_distrib_right)
+ (simp add: algebra_simps)
+ thus ?thesis by (simp add: algebra_simps)
+qed
+
+subsection {* Positive Part, Negative Part, Absolute Value *}
+
+definition
+ nprt :: "'a \<Rightarrow> 'a" where
+ "nprt x = inf x 0"
+
+definition
+ pprt :: "'a \<Rightarrow> 'a" where
+ "pprt x = sup x 0"
+
+lemma pprt_neg: "pprt (- x) = - nprt x"
+proof -
+ have "sup (- x) 0 = sup (- x) (- 0)" unfolding minus_zero ..
+ also have "\<dots> = - inf x 0" unfolding neg_inf_eq_sup ..
+ finally have "sup (- x) 0 = - inf x 0" .
+ then show ?thesis unfolding pprt_def nprt_def .
+qed
+
+lemma nprt_neg: "nprt (- x) = - pprt x"
+proof -
+ from pprt_neg have "pprt (- (- x)) = - nprt (- x)" .
+ then have "pprt x = - nprt (- x)" by simp
+ then show ?thesis by simp
+qed
+
+lemma prts: "a = pprt a + nprt a"
+by (simp add: pprt_def nprt_def add_eq_inf_sup[symmetric])
+
+lemma zero_le_pprt[simp]: "0 \<le> pprt a"
+by (simp add: pprt_def)
+
+lemma nprt_le_zero[simp]: "nprt a \<le> 0"
+by (simp add: nprt_def)
+
+lemma le_eq_neg: "a \<le> - b \<longleftrightarrow> a + b \<le> 0" (is "?l = ?r")
+proof -
+ have a: "?l \<longrightarrow> ?r"
+ apply (auto)
+ apply (rule add_le_imp_le_right[of _ "uminus b" _])
+ apply (simp add: add_assoc)
+ done
+ have b: "?r \<longrightarrow> ?l"
+ apply (auto)
+ apply (rule add_le_imp_le_right[of _ "b" _])
+ apply (simp)
+ done
+ from a b show ?thesis by blast
+qed
+
+lemma pprt_0[simp]: "pprt 0 = 0" by (simp add: pprt_def)
+lemma nprt_0[simp]: "nprt 0 = 0" by (simp add: nprt_def)
+
+lemma pprt_eq_id [simp, noatp]: "0 \<le> x \<Longrightarrow> pprt x = x"
+ by (simp add: pprt_def sup_aci sup_absorb1)
+
+lemma nprt_eq_id [simp, noatp]: "x \<le> 0 \<Longrightarrow> nprt x = x"
+ by (simp add: nprt_def inf_aci inf_absorb1)
+
+lemma pprt_eq_0 [simp, noatp]: "x \<le> 0 \<Longrightarrow> pprt x = 0"
+ by (simp add: pprt_def sup_aci sup_absorb2)
+
+lemma nprt_eq_0 [simp, noatp]: "0 \<le> x \<Longrightarrow> nprt x = 0"
+ by (simp add: nprt_def inf_aci inf_absorb2)
+
+lemma sup_0_imp_0: "sup a (- a) = 0 \<Longrightarrow> a = 0"
+proof -
+ {
+ fix a::'a
+ assume hyp: "sup a (-a) = 0"
+ hence "sup a (-a) + a = a" by (simp)
+ hence "sup (a+a) 0 = a" by (simp add: add_sup_distrib_right)
+ hence "sup (a+a) 0 <= a" by (simp)
+ hence "0 <= a" by (blast intro: order_trans inf_sup_ord)
+ }
+ note p = this
+ assume hyp:"sup a (-a) = 0"
+ hence hyp2:"sup (-a) (-(-a)) = 0" by (simp add: sup_commute)
+ from p[OF hyp] p[OF hyp2] show "a = 0" by simp
+qed
+
+lemma inf_0_imp_0: "inf a (-a) = 0 \<Longrightarrow> a = 0"
+apply (simp add: inf_eq_neg_sup)
+apply (simp add: sup_commute)
+apply (erule sup_0_imp_0)
+done
+
+lemma inf_0_eq_0 [simp, noatp]: "inf a (- a) = 0 \<longleftrightarrow> a = 0"
+by (rule, erule inf_0_imp_0) simp
+
+lemma sup_0_eq_0 [simp, noatp]: "sup a (- a) = 0 \<longleftrightarrow> a = 0"
+by (rule, erule sup_0_imp_0) simp
+
+lemma zero_le_double_add_iff_zero_le_single_add [simp]:
+ "0 \<le> a + a \<longleftrightarrow> 0 \<le> a"
+proof
+ assume "0 <= a + a"
+ hence a:"inf (a+a) 0 = 0" by (simp add: inf_commute inf_absorb1)
+ have "(inf a 0)+(inf a 0) = inf (inf (a+a) 0) a" (is "?l=_")
+ by (simp add: add_sup_inf_distribs inf_aci)
+ hence "?l = 0 + inf a 0" by (simp add: a, simp add: inf_commute)
+ hence "inf a 0 = 0" by (simp only: add_right_cancel)
+ then show "0 <= a" unfolding le_iff_inf by (simp add: inf_commute)
+next
+ assume a: "0 <= a"
+ show "0 <= a + a" by (simp add: add_mono[OF a a, simplified])
+qed
+
+lemma double_zero [simp]:
+ "a + a = 0 \<longleftrightarrow> a = 0"
+proof
+ assume assm: "a + a = 0"
+ then have "a + a + - a = - a" by simp
+ then have "a + (a + - a) = - a" by (simp only: add_assoc)
+ then have a: "- a = a" by simp
+ show "a = 0" apply (rule antisym)
+ apply (unfold neg_le_iff_le [symmetric, of a])
+ unfolding a apply simp
+ unfolding zero_le_double_add_iff_zero_le_single_add [symmetric, of a]
+ unfolding assm unfolding le_less apply simp_all done
+next
+ assume "a = 0" then show "a + a = 0" by simp
+qed
+
+lemma zero_less_double_add_iff_zero_less_single_add [simp]:
+ "0 < a + a \<longleftrightarrow> 0 < a"
+proof (cases "a = 0")
+ case True then show ?thesis by auto
+next
+ case False then show ?thesis (*FIXME tune proof*)
+ unfolding less_le apply simp apply rule
+ apply clarify
+ apply rule
+ apply assumption
+ apply (rule notI)
+ unfolding double_zero [symmetric, of a] apply simp
+ done
+qed
+
+lemma double_add_le_zero_iff_single_add_le_zero [simp]:
+ "a + a \<le> 0 \<longleftrightarrow> a \<le> 0"
+proof -
+ have "a + a \<le> 0 \<longleftrightarrow> 0 \<le> - (a + a)" by (subst le_minus_iff, simp)
+ moreover have "\<dots> \<longleftrightarrow> a \<le> 0" by (simp add: zero_le_double_add_iff_zero_le_single_add)
+ ultimately show ?thesis by blast
+qed
+
+lemma double_add_less_zero_iff_single_less_zero [simp]:
+ "a + a < 0 \<longleftrightarrow> a < 0"
+proof -
+ have "a + a < 0 \<longleftrightarrow> 0 < - (a + a)" by (subst less_minus_iff, simp)
+ moreover have "\<dots> \<longleftrightarrow> a < 0" by (simp add: zero_less_double_add_iff_zero_less_single_add)
+ ultimately show ?thesis by blast
+qed
+
+declare neg_inf_eq_sup [simp] neg_sup_eq_inf [simp]
+
+lemma le_minus_self_iff: "a \<le> - a \<longleftrightarrow> a \<le> 0"
+proof -
+ from add_le_cancel_left [of "uminus a" "plus a a" zero]
+ have "(a <= -a) = (a+a <= 0)"
+ by (simp add: add_assoc[symmetric])
+ thus ?thesis by simp
+qed
+
+lemma minus_le_self_iff: "- a \<le> a \<longleftrightarrow> 0 \<le> a"
+proof -
+ from add_le_cancel_left [of "uminus a" zero "plus a a"]
+ have "(-a <= a) = (0 <= a+a)"
+ by (simp add: add_assoc[symmetric])
+ thus ?thesis by simp
+qed
+
+lemma zero_le_iff_zero_nprt: "0 \<le> a \<longleftrightarrow> nprt a = 0"
+unfolding le_iff_inf by (simp add: nprt_def inf_commute)
+
+lemma le_zero_iff_zero_pprt: "a \<le> 0 \<longleftrightarrow> pprt a = 0"
+unfolding le_iff_sup by (simp add: pprt_def sup_commute)
+
+lemma le_zero_iff_pprt_id: "0 \<le> a \<longleftrightarrow> pprt a = a"
+unfolding le_iff_sup by (simp add: pprt_def sup_commute)
+
+lemma zero_le_iff_nprt_id: "a \<le> 0 \<longleftrightarrow> nprt a = a"
+unfolding le_iff_inf by (simp add: nprt_def inf_commute)
+
+lemma pprt_mono [simp, noatp]: "a \<le> b \<Longrightarrow> pprt a \<le> pprt b"
+unfolding le_iff_sup by (simp add: pprt_def sup_aci sup_assoc [symmetric, of a])
+
+lemma nprt_mono [simp, noatp]: "a \<le> b \<Longrightarrow> nprt a \<le> nprt b"
+unfolding le_iff_inf by (simp add: nprt_def inf_aci inf_assoc [symmetric, of a])
+
+end
+
+lemmas add_sup_inf_distribs = add_inf_distrib_right add_inf_distrib_left add_sup_distrib_right add_sup_distrib_left
+
+
+class lattice_ab_group_add_abs = lattice_ab_group_add + abs +
+ assumes abs_lattice: "\<bar>a\<bar> = sup a (- a)"
+begin
+
+lemma abs_prts: "\<bar>a\<bar> = pprt a - nprt a"
+proof -
+ have "0 \<le> \<bar>a\<bar>"
+ proof -
+ have a: "a \<le> \<bar>a\<bar>" and b: "- a \<le> \<bar>a\<bar>" by (auto simp add: abs_lattice)
+ show ?thesis by (rule add_mono [OF a b, simplified])
+ qed
+ then have "0 \<le> sup a (- a)" unfolding abs_lattice .
+ then have "sup (sup a (- a)) 0 = sup a (- a)" by (rule sup_absorb1)
+ then show ?thesis
+ by (simp add: add_sup_inf_distribs sup_aci
+ pprt_def nprt_def diff_minus abs_lattice)
+qed
+
+subclass ordered_ab_group_add_abs
+proof
+ have abs_ge_zero [simp]: "\<And>a. 0 \<le> \<bar>a\<bar>"
+ proof -
+ fix a b
+ have a: "a \<le> \<bar>a\<bar>" and b: "- a \<le> \<bar>a\<bar>" by (auto simp add: abs_lattice)
+ show "0 \<le> \<bar>a\<bar>" by (rule add_mono [OF a b, simplified])
+ qed
+ have abs_leI: "\<And>a b. a \<le> b \<Longrightarrow> - a \<le> b \<Longrightarrow> \<bar>a\<bar> \<le> b"
+ by (simp add: abs_lattice le_supI)
+ fix a b
+ show "0 \<le> \<bar>a\<bar>" by simp
+ show "a \<le> \<bar>a\<bar>"
+ by (auto simp add: abs_lattice)
+ show "\<bar>-a\<bar> = \<bar>a\<bar>"
+ by (simp add: abs_lattice sup_commute)
+ show "a \<le> b \<Longrightarrow> - a \<le> b \<Longrightarrow> \<bar>a\<bar> \<le> b" by (fact abs_leI)
+ show "\<bar>a + b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
+ proof -
+ have g:"abs a + abs b = sup (a+b) (sup (-a-b) (sup (-a+b) (a + (-b))))" (is "_=sup ?m ?n")
+ by (simp add: abs_lattice add_sup_inf_distribs sup_aci diff_minus)
+ have a:"a+b <= sup ?m ?n" by (simp)
+ have b:"-a-b <= ?n" by (simp)
+ have c:"?n <= sup ?m ?n" by (simp)
+ from b c have d: "-a-b <= sup ?m ?n" by(rule order_trans)
+ have e:"-a-b = -(a+b)" by (simp add: diff_minus)
+ from a d e have "abs(a+b) <= sup ?m ?n"
+ by (drule_tac abs_leI, auto)
+ with g[symmetric] show ?thesis by simp
+ qed
+qed
+
+end
+
+lemma sup_eq_if:
+ fixes a :: "'a\<Colon>{lattice_ab_group_add, linorder}"
+ shows "sup a (- a) = (if a < 0 then - a else a)"
+proof -
+ note add_le_cancel_right [of a a "- a", symmetric, simplified]
+ moreover note add_le_cancel_right [of "-a" a a, symmetric, simplified]
+ then show ?thesis by (auto simp: sup_max min_max.sup_absorb1 min_max.sup_absorb2)
+qed
+
+lemma abs_if_lattice:
+ fixes a :: "'a\<Colon>{lattice_ab_group_add_abs, linorder}"
+ shows "\<bar>a\<bar> = (if a < 0 then - a else a)"
+by auto
+
+lemma estimate_by_abs:
+ "a + b <= (c::'a::lattice_ab_group_add_abs) \<Longrightarrow> a <= c + abs b"
+proof -
+ assume "a+b <= c"
+ hence 2: "a <= c+(-b)" by (simp add: algebra_simps)
+ have 3: "(-b) <= abs b" by (rule abs_ge_minus_self)
+ show ?thesis by (rule le_add_right_mono[OF 2 3])
+qed
+
+class lattice_ring = ordered_ring + lattice_ab_group_add_abs
+begin
+
+subclass semilattice_inf_ab_group_add ..
+subclass semilattice_sup_ab_group_add ..
+
+end
+
+lemma abs_le_mult: "abs (a * b) \<le> (abs a) * (abs (b::'a::lattice_ring))"
+proof -
+ let ?x = "pprt a * pprt b - pprt a * nprt b - nprt a * pprt b + nprt a * nprt b"
+ let ?y = "pprt a * pprt b + pprt a * nprt b + nprt a * pprt b + nprt a * nprt b"
+ have a: "(abs a) * (abs b) = ?x"
+ by (simp only: abs_prts[of a] abs_prts[of b] algebra_simps)
+ {
+ fix u v :: 'a
+ have bh: "\<lbrakk>u = a; v = b\<rbrakk> \<Longrightarrow>
+ u * v = pprt a * pprt b + pprt a * nprt b +
+ nprt a * pprt b + nprt a * nprt b"
+ apply (subst prts[of u], subst prts[of v])
+ apply (simp add: algebra_simps)
+ done
+ }
+ note b = this[OF refl[of a] refl[of b]]
+ note addm = add_mono[of "0::'a" _ "0::'a", simplified]
+ note addm2 = add_mono[of _ "0::'a" _ "0::'a", simplified]
+ have xy: "- ?x <= ?y"
+ apply (simp)
+ apply (rule_tac y="0::'a" in order_trans)
+ apply (rule addm2)
+ apply (simp_all add: mult_nonneg_nonneg mult_nonpos_nonpos)
+ apply (rule addm)
+ apply (simp_all add: mult_nonneg_nonneg mult_nonpos_nonpos)
+ done
+ have yx: "?y <= ?x"
+ apply (simp add:diff_def)
+ apply (rule_tac y=0 in order_trans)
+ apply (rule addm2, (simp add: mult_nonneg_nonpos mult_nonneg_nonpos2)+)
+ apply (rule addm, (simp add: mult_nonneg_nonpos mult_nonneg_nonpos2)+)
+ done
+ have i1: "a*b <= abs a * abs b" by (simp only: a b yx)
+ have i2: "- (abs a * abs b) <= a*b" by (simp only: a b xy)
+ show ?thesis
+ apply (rule abs_leI)
+ apply (simp add: i1)
+ apply (simp add: i2[simplified minus_le_iff])
+ done
+qed
+
+instance lattice_ring \<subseteq> ordered_ring_abs
+proof
+ fix a b :: "'a\<Colon> lattice_ring"
+ assume "(0 \<le> a \<or> a \<le> 0) \<and> (0 \<le> b \<or> b \<le> 0)"
+ show "abs (a*b) = abs a * abs b"
+ proof -
+ have s: "(0 <= a*b) | (a*b <= 0)"
+ apply (auto)
+ apply (rule_tac split_mult_pos_le)
+ apply (rule_tac contrapos_np[of "a*b <= 0"])
+ apply (simp)
+ apply (rule_tac split_mult_neg_le)
+ apply (insert prems)
+ apply (blast)
+ done
+ have mulprts: "a * b = (pprt a + nprt a) * (pprt b + nprt b)"
+ by (simp add: prts[symmetric])
+ show ?thesis
+ proof cases
+ assume "0 <= a * b"
+ then show ?thesis
+ apply (simp_all add: mulprts abs_prts)
+ apply (insert prems)
+ apply (auto simp add:
+ algebra_simps
+ iffD1[OF zero_le_iff_zero_nprt] iffD1[OF le_zero_iff_zero_pprt]
+ iffD1[OF le_zero_iff_pprt_id] iffD1[OF zero_le_iff_nprt_id])
+ apply(drule (1) mult_nonneg_nonpos[of a b], simp)
+ apply(drule (1) mult_nonneg_nonpos2[of b a], simp)
+ done
+ next
+ assume "~(0 <= a*b)"
+ with s have "a*b <= 0" by simp
+ then show ?thesis
+ apply (simp_all add: mulprts abs_prts)
+ apply (insert prems)
+ apply (auto simp add: algebra_simps)
+ apply(drule (1) mult_nonneg_nonneg[of a b],simp)
+ apply(drule (1) mult_nonpos_nonpos[of a b],simp)
+ done
+ qed
+ qed
+qed
+
+lemma mult_le_prts:
+ assumes
+ "a1 <= (a::'a::lattice_ring)"
+ "a <= a2"
+ "b1 <= b"
+ "b <= b2"
+ shows
+ "a * b <= pprt a2 * pprt b2 + pprt a1 * nprt b2 + nprt a2 * pprt b1 + nprt a1 * nprt b1"
+proof -
+ have "a * b = (pprt a + nprt a) * (pprt b + nprt b)"
+ apply (subst prts[symmetric])+
+ apply simp
+ done
+ then have "a * b = pprt a * pprt b + pprt a * nprt b + nprt a * pprt b + nprt a * nprt b"
+ by (simp add: algebra_simps)
+ moreover have "pprt a * pprt b <= pprt a2 * pprt b2"
+ by (simp_all add: prems mult_mono)
+ moreover have "pprt a * nprt b <= pprt a1 * nprt b2"
+ proof -
+ have "pprt a * nprt b <= pprt a * nprt b2"
+ by (simp add: mult_left_mono prems)
+ moreover have "pprt a * nprt b2 <= pprt a1 * nprt b2"
+ by (simp add: mult_right_mono_neg prems)
+ ultimately show ?thesis
+ by simp
+ qed
+ moreover have "nprt a * pprt b <= nprt a2 * pprt b1"
+ proof -
+ have "nprt a * pprt b <= nprt a2 * pprt b"
+ by (simp add: mult_right_mono prems)
+ moreover have "nprt a2 * pprt b <= nprt a2 * pprt b1"
+ by (simp add: mult_left_mono_neg prems)
+ ultimately show ?thesis
+ by simp
+ qed
+ moreover have "nprt a * nprt b <= nprt a1 * nprt b1"
+ proof -
+ have "nprt a * nprt b <= nprt a * nprt b1"
+ by (simp add: mult_left_mono_neg prems)
+ moreover have "nprt a * nprt b1 <= nprt a1 * nprt b1"
+ by (simp add: mult_right_mono_neg prems)
+ ultimately show ?thesis
+ by simp
+ qed
+ ultimately show ?thesis
+ by - (rule add_mono | simp)+
+qed
+
+lemma mult_ge_prts:
+ assumes
+ "a1 <= (a::'a::lattice_ring)"
+ "a <= a2"
+ "b1 <= b"
+ "b <= b2"
+ shows
+ "a * b >= nprt a1 * pprt b2 + nprt a2 * nprt b2 + pprt a1 * pprt b1 + pprt a2 * nprt b1"
+proof -
+ from prems have a1:"- a2 <= -a" by auto
+ from prems have a2: "-a <= -a1" by auto
+ from mult_le_prts[of "-a2" "-a" "-a1" "b1" b "b2", OF a1 a2 prems(3) prems(4), simplified nprt_neg pprt_neg]
+ have le: "- (a * b) <= - nprt a1 * pprt b2 + - nprt a2 * nprt b2 + - pprt a1 * pprt b1 + - pprt a2 * nprt b1" by simp
+ then have "-(- nprt a1 * pprt b2 + - nprt a2 * nprt b2 + - pprt a1 * pprt b1 + - pprt a2 * nprt b1) <= a * b"
+ by (simp only: minus_le_iff)
+ then show ?thesis by simp
+qed
+
+instance int :: lattice_ring
+proof
+ fix k :: int
+ show "abs k = sup k (- k)"
+ by (auto simp add: sup_int_def)
+qed
+
+instance real :: lattice_ring
+proof
+ fix a :: real
+ show "abs a = sup a (- a)"
+ by (auto simp add: sup_real_def)
+qed
+
+end
--- a/src/HOL/Library/Library.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Library.thy Tue Feb 09 17:06:05 2010 +0100
@@ -28,6 +28,7 @@
Fundamental_Theorem_Algebra
Infinite_Set
Inner_Product
+ Lattice_Algebras
Lattice_Syntax
ListVector
Kleene_Algebra
@@ -50,6 +51,7 @@
RBT
SML_Quickcheck
State_Monad
+ Structure_Syntax
Sum_Of_Squares
Transitive_Closure_Table
Univ_Poly
--- a/src/HOL/Library/Multiset.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Multiset.thy Tue Feb 09 17:06:05 2010 +0100
@@ -415,11 +415,11 @@
mset_le_trans simp: mset_less_def)
interpretation mset_order_cancel_semigroup:
- pordered_cancel_ab_semigroup_add "op +" "op \<le>#" "op <#"
+ ordered_cancel_ab_semigroup_add "op +" "op \<le>#" "op <#"
proof qed (erule mset_le_mono_add [OF mset_le_refl])
interpretation mset_order_semigroup_cancel:
- pordered_ab_semigroup_add_imp_le "op +" "op \<le>#" "op <#"
+ ordered_ab_semigroup_add_imp_le "op +" "op \<le>#" "op <#"
proof qed simp
lemma mset_lessD: "A \<subset># B \<Longrightarrow> x \<in># A \<Longrightarrow> x \<in># B"
@@ -1348,7 +1348,7 @@
lemma union_upper2: "B <= A + (B::'a::order multiset)"
by (subst add_commute) (rule union_upper1)
-instance multiset :: (order) pordered_ab_semigroup_add
+instance multiset :: (order) ordered_ab_semigroup_add
apply intro_classes
apply (erule union_le_mono[OF mult_le_refl])
done
--- a/src/HOL/Library/Nat_Infinity.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Nat_Infinity.thy Tue Feb 09 17:06:05 2010 +0100
@@ -234,7 +234,7 @@
subsection {* Ordering *}
-instantiation inat :: ordered_ab_semigroup_add
+instantiation inat :: linordered_ab_semigroup_add
begin
definition
@@ -268,7 +268,7 @@
end
-instance inat :: pordered_comm_semiring
+instance inat :: ordered_comm_semiring
proof
fix a b c :: inat
assume "a \<le> b" and "0 \<le> c"
--- a/src/HOL/Library/Poly_Deriv.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Poly_Deriv.thy Tue Feb 09 17:06:05 2010 +0100
@@ -139,7 +139,7 @@
lemma dvd_add_cancel1:
fixes a b c :: "'a::comm_ring_1"
shows "a dvd b + c \<Longrightarrow> a dvd b \<Longrightarrow> a dvd c"
- by (drule (1) Ring_and_Field.dvd_diff, simp)
+ by (drule (1) Rings.dvd_diff, simp)
lemma lemma_order_pderiv [rule_format]:
"\<forall>p q a. 0 < n &
--- a/src/HOL/Library/Polynomial.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Polynomial.thy Tue Feb 09 17:06:05 2010 +0100
@@ -706,7 +706,7 @@
subsection {* Polynomials form an ordered integral domain *}
definition
- pos_poly :: "'a::ordered_idom poly \<Rightarrow> bool"
+ pos_poly :: "'a::linordered_idom poly \<Rightarrow> bool"
where
"pos_poly p \<longleftrightarrow> 0 < coeff p (degree p)"
@@ -732,7 +732,7 @@
lemma pos_poly_total: "p = 0 \<or> pos_poly p \<or> pos_poly (- p)"
by (induct p) (auto simp add: pos_poly_pCons)
-instantiation poly :: (ordered_idom) ordered_idom
+instantiation poly :: (linordered_idom) linordered_idom
begin
definition
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Library/Structure_Syntax.thy Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,14 @@
+(* Author: Florian Haftmann, TU Muenchen *)
+
+header {* Index syntax for structures *}
+
+theory Structure_Syntax
+imports Pure
+begin
+
+syntax
+ "_index1" :: index ("\<^sub>1")
+translations
+ (index) "\<^sub>1" => (index) "\<^bsub>\<struct>\<^esub>"
+
+end
--- a/src/HOL/Library/Sum_Of_Squares/sos_wrapper.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Sum_Of_Squares/sos_wrapper.ML Tue Feb 09 17:06:05 2010 +0100
@@ -54,10 +54,12 @@
(* call solver *)
val output_file = filename dir "sos_out"
- val (output, rv) = system_out (
- if File.exists cmd then space_implode " "
- [File.shell_path cmd, File.shell_path input_file, File.shell_path output_file]
- else error ("Bad executable: " ^ File.platform_path cmd))
+ val (output, rv) =
+ bash_output
+ (if File.exists cmd then
+ space_implode " "
+ [File.shell_path cmd, File.shell_path input_file, File.shell_path output_file]
+ else error ("Bad executable: " ^ File.platform_path cmd))
(* read and analyze output *)
val (res, res_msg) = find_failure rv
--- a/src/HOL/Library/Univ_Poly.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/Univ_Poly.thy Tue Feb 09 17:06:05 2010 +0100
@@ -990,7 +990,7 @@
text{*bound for polynomial.*}
-lemma poly_mono: "abs(x) \<le> k ==> abs(poly p (x::'a::{ordered_idom})) \<le> poly (map abs p) k"
+lemma poly_mono: "abs(x) \<le> k ==> abs(poly p (x::'a::{linordered_idom})) \<le> poly (map abs p) k"
apply (induct "p", auto)
apply (rule_tac y = "abs a + abs (x * poly p x)" in order_trans)
apply (rule abs_triangle_ineq)
--- a/src/HOL/Library/positivstellensatz.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Library/positivstellensatz.ML Tue Feb 09 17:06:05 2010 +0100
@@ -275,7 +275,7 @@
"((a + b + min x y + c > r) = (a + b + x + c > r & a + b + y + c > r))"
by auto};
-val abs_split' = @{lemma "P (abs (x::'a::ordered_idom)) == (x >= 0 & P x | x < 0 & P (-x))"
+val abs_split' = @{lemma "P (abs (x::'a::linordered_idom)) == (x >= 0 & P x | x < 0 & P (-x))"
by (atomize (full)) (auto split add: abs_split)};
val max_split = @{lemma "P (max x y) == ((x::'a::linorder) <= y & P y | x > y & P x)"
--- a/src/HOL/List.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/List.thy Tue Feb 09 17:06:05 2010 +0100
@@ -2500,12 +2500,12 @@
by (induct xs, simp_all add: algebra_simps)
lemma listsum_abs:
- fixes xs :: "'a::pordered_ab_group_add_abs list"
+ fixes xs :: "'a::ordered_ab_group_add_abs list"
shows "\<bar>listsum xs\<bar> \<le> listsum (map abs xs)"
by (induct xs, simp, simp add: order_trans [OF abs_triangle_ineq])
lemma listsum_mono:
- fixes f g :: "'a \<Rightarrow> 'b::{comm_monoid_add, pordered_ab_semigroup_add}"
+ fixes f g :: "'a \<Rightarrow> 'b::{comm_monoid_add, ordered_ab_semigroup_add}"
shows "(\<And>x. x \<in> set xs \<Longrightarrow> f x \<le> g x) \<Longrightarrow> (\<Sum>x\<leftarrow>xs. f x) \<le> (\<Sum>x\<leftarrow>xs. g x)"
by (induct xs, simp, simp add: add_mono)
--- a/src/HOL/Matrix/ComputeFloat.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/ComputeFloat.thy Tue Feb 09 17:06:05 2010 +0100
@@ -5,7 +5,7 @@
header {* Floating Point Representation of the Reals *}
theory ComputeFloat
-imports Complex_Main
+imports Complex_Main Lattice_Algebras
uses "~~/src/Tools/float.ML" ("~~/src/HOL/Tools/float_arith.ML")
begin
--- a/src/HOL/Matrix/ComputeNumeral.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/ComputeNumeral.thy Tue Feb 09 17:06:05 2010 +0100
@@ -109,22 +109,22 @@
lemmas compute_natarith = bitarith natnorm natsuc natadd natsub natmul nateq natless natle natfac.simps
-lemma number_eq: "(((number_of x)::'a::{number_ring, ordered_idom}) = (number_of y)) = (x = y)"
+lemma number_eq: "(((number_of x)::'a::{number_ring, linordered_idom}) = (number_of y)) = (x = y)"
unfolding number_of_eq
apply simp
done
-lemma number_le: "(((number_of x)::'a::{number_ring, ordered_idom}) \<le> (number_of y)) = (x \<le> y)"
+lemma number_le: "(((number_of x)::'a::{number_ring, linordered_idom}) \<le> (number_of y)) = (x \<le> y)"
unfolding number_of_eq
apply simp
done
-lemma number_less: "(((number_of x)::'a::{number_ring, ordered_idom}) < (number_of y)) = (x < y)"
+lemma number_less: "(((number_of x)::'a::{number_ring, linordered_idom}) < (number_of y)) = (x < y)"
unfolding number_of_eq
apply simp
done
-lemma number_diff: "((number_of x)::'a::{number_ring, ordered_idom}) - number_of y = number_of (x + (- y))"
+lemma number_diff: "((number_of x)::'a::{number_ring, linordered_idom}) - number_of y = number_of (x + (- y))"
apply (subst diff_number_of_eq)
apply simp
done
--- a/src/HOL/Matrix/LP.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/LP.thy Tue Feb 09 17:06:05 2010 +0100
@@ -3,12 +3,12 @@
*)
theory LP
-imports Main
+imports Main Lattice_Algebras
begin
lemma linprog_dual_estimate:
assumes
- "A * x \<le> (b::'a::lordered_ring)"
+ "A * x \<le> (b::'a::lattice_ring)"
"0 \<le> y"
"abs (A - A') \<le> \<delta>A"
"b \<le> b'"
@@ -57,7 +57,7 @@
lemma le_ge_imp_abs_diff_1:
assumes
- "A1 <= (A::'a::lordered_ring)"
+ "A1 <= (A::'a::lattice_ring)"
"A <= A2"
shows "abs (A-A1) <= A2-A1"
proof -
@@ -72,7 +72,7 @@
lemma mult_le_prts:
assumes
- "a1 <= (a::'a::lordered_ring)"
+ "a1 <= (a::'a::lattice_ring)"
"a <= a2"
"b1 <= b"
"b <= b2"
@@ -120,7 +120,7 @@
lemma mult_le_dual_prts:
assumes
- "A * x \<le> (b::'a::lordered_ring)"
+ "A * x \<le> (b::'a::lattice_ring)"
"0 \<le> y"
"A1 \<le> A"
"A \<le> A2"
--- a/src/HOL/Matrix/Matrix.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/Matrix.thy Tue Feb 09 17:06:05 2010 +0100
@@ -3,7 +3,7 @@
*)
theory Matrix
-imports Main
+imports Main Lattice_Algebras
begin
types 'a infmatrix = "nat \<Rightarrow> nat \<Rightarrow> 'a"
@@ -1545,7 +1545,7 @@
by (simp add: plus_matrix_def diff_matrix_def minus_matrix_def Rep_matrix_inject[symmetric] ext)
qed
-instance matrix :: (pordered_ab_group_add) pordered_ab_group_add
+instance matrix :: (ordered_ab_group_add) ordered_ab_group_add
proof
fix A B C :: "'a matrix"
assume "A <= B"
@@ -1556,8 +1556,8 @@
done
qed
-instance matrix :: (lordered_ab_group_add) lordered_ab_group_add_meet ..
-instance matrix :: (lordered_ab_group_add) lordered_ab_group_add_join ..
+instance matrix :: (lattice_ab_group_add) semilattice_inf_ab_group_add ..
+instance matrix :: (lattice_ab_group_add) semilattice_sup_ab_group_add ..
instance matrix :: (semiring_0) semiring_0
proof
@@ -1583,7 +1583,7 @@
instance matrix :: (ring) ring ..
-instance matrix :: (pordered_ring) pordered_ring
+instance matrix :: (ordered_ring) ordered_ring
proof
fix A B C :: "'a matrix"
assume a: "A \<le> B"
@@ -1600,9 +1600,9 @@
done
qed
-instance matrix :: (lordered_ring) lordered_ring
+instance matrix :: (lattice_ring) lattice_ring
proof
- fix A B C :: "('a :: lordered_ring) matrix"
+ fix A B C :: "('a :: lattice_ring) matrix"
show "abs A = sup A (-A)"
by (simp add: abs_matrix_def)
qed
@@ -1738,7 +1738,7 @@
by auto
lemma Rep_matrix_zero_imp_mult_zero:
- "! j i k. (Rep_matrix A j k = 0) | (Rep_matrix B k i) = 0 \<Longrightarrow> A * B = (0::('a::lordered_ring) matrix)"
+ "! j i k. (Rep_matrix A j k = 0) | (Rep_matrix B k i) = 0 \<Longrightarrow> A * B = (0::('a::lattice_ring) matrix)"
apply (subst Rep_matrix_inject[symmetric])
apply (rule ext)+
apply (auto simp add: Rep_matrix_mult foldseq_zero zero_imp_mult_zero)
@@ -1803,7 +1803,7 @@
lemma Rep_minus[simp]: "Rep_matrix (-(A::_::group_add)) x y = - (Rep_matrix A x y)"
by (simp add: minus_matrix_def)
-lemma Rep_abs[simp]: "Rep_matrix (abs (A::_::lordered_ab_group_add)) x y = abs (Rep_matrix A x y)"
+lemma Rep_abs[simp]: "Rep_matrix (abs (A::_::lattice_ab_group_add)) x y = abs (Rep_matrix A x y)"
by (simp add: abs_lattice sup_matrix_def)
end
--- a/src/HOL/Matrix/SparseMatrix.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/SparseMatrix.thy Tue Feb 09 17:06:05 2010 +0100
@@ -103,7 +103,7 @@
"minus_spvec [] = []"
| "minus_spvec (a#as) = (fst a, -(snd a))#(minus_spvec as)"
-primrec abs_spvec :: "('a::lordered_ab_group_add_abs) spvec \<Rightarrow> 'a spvec" where
+primrec abs_spvec :: "('a::lattice_ab_group_add_abs) spvec \<Rightarrow> 'a spvec" where
"abs_spvec [] = []"
| "abs_spvec (a#as) = (fst a, abs (snd a))#(abs_spvec as)"
@@ -116,12 +116,12 @@
apply simp
done
-instance matrix :: (lordered_ab_group_add_abs) lordered_ab_group_add_abs
+instance matrix :: (lattice_ab_group_add_abs) lattice_ab_group_add_abs
apply default
unfolding abs_matrix_def .. (*FIXME move*)
lemma sparse_row_vector_abs:
- "sorted_spvec (v :: 'a::lordered_ring spvec) \<Longrightarrow> sparse_row_vector (abs_spvec v) = abs (sparse_row_vector v)"
+ "sorted_spvec (v :: 'a::lattice_ring spvec) \<Longrightarrow> sparse_row_vector (abs_spvec v) = abs (sparse_row_vector v)"
apply (induct v)
apply simp_all
apply (frule_tac sorted_spvec_cons1, simp)
@@ -174,7 +174,7 @@
lemma addmult_spvec_empty2[simp]: "addmult_spvec y a [] = a"
by (induct a) auto
-lemma sparse_row_vector_map: "(! x y. f (x+y) = (f x) + (f y)) \<Longrightarrow> (f::'a\<Rightarrow>('a::lordered_ring)) 0 = 0 \<Longrightarrow>
+lemma sparse_row_vector_map: "(! x y. f (x+y) = (f x) + (f y)) \<Longrightarrow> (f::'a\<Rightarrow>('a::lattice_ring)) 0 = 0 \<Longrightarrow>
sparse_row_vector (map (% x. (fst x, f (snd x))) a) = apply_matrix f (sparse_row_vector a)"
apply (induct a)
apply (simp_all add: apply_matrix_add)
@@ -185,7 +185,7 @@
apply (simp_all add: smult_spvec_cons scalar_mult_add)
done
-lemma sparse_row_vector_addmult_spvec: "sparse_row_vector (addmult_spvec (y::'a::lordered_ring) a b) =
+lemma sparse_row_vector_addmult_spvec: "sparse_row_vector (addmult_spvec (y::'a::lattice_ring) a b) =
(sparse_row_vector a) + (scalar_mult y (sparse_row_vector b))"
apply (induct y a b rule: addmult_spvec.induct)
apply (simp add: scalar_mult_add smult_spvec_cons sparse_row_vector_smult singleton_matrix_add)+
@@ -235,7 +235,7 @@
apply (simp_all add: sorted_spvec_addmult_spvec_helper3)
done
-fun mult_spvec_spmat :: "('a::lordered_ring) spvec \<Rightarrow> 'a spvec \<Rightarrow> 'a spmat \<Rightarrow> 'a spvec" where
+fun mult_spvec_spmat :: "('a::lattice_ring) spvec \<Rightarrow> 'a spvec \<Rightarrow> 'a spmat \<Rightarrow> 'a spvec" where
(* recdef mult_spvec_spmat "measure (% (c, arr, brr). (length arr) + (length brr))" *)
"mult_spvec_spmat c [] brr = c" |
"mult_spvec_spmat c arr [] = c" |
@@ -244,7 +244,7 @@
else if (j < i) then mult_spvec_spmat c ((i,a)#arr) brr
else mult_spvec_spmat (addmult_spvec a c b) arr brr)"
-lemma sparse_row_mult_spvec_spmat[rule_format]: "sorted_spvec (a::('a::lordered_ring) spvec) \<longrightarrow> sorted_spvec B \<longrightarrow>
+lemma sparse_row_mult_spvec_spmat[rule_format]: "sorted_spvec (a::('a::lattice_ring) spvec) \<longrightarrow> sorted_spvec B \<longrightarrow>
sparse_row_vector (mult_spvec_spmat c a B) = (sparse_row_vector c) + (sparse_row_vector a) * (sparse_row_matrix B)"
proof -
have comp_1: "!! a b. a < b \<Longrightarrow> Suc 0 <= nat ((int b)-(int a))" by arith
@@ -337,13 +337,13 @@
qed
lemma sorted_mult_spvec_spmat[rule_format]:
- "sorted_spvec (c::('a::lordered_ring) spvec) \<longrightarrow> sorted_spmat B \<longrightarrow> sorted_spvec (mult_spvec_spmat c a B)"
+ "sorted_spvec (c::('a::lattice_ring) spvec) \<longrightarrow> sorted_spmat B \<longrightarrow> sorted_spvec (mult_spvec_spmat c a B)"
apply (induct c a B rule: mult_spvec_spmat.induct)
apply (simp_all add: sorted_addmult_spvec)
done
consts
- mult_spmat :: "('a::lordered_ring) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat"
+ mult_spmat :: "('a::lattice_ring) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat"
primrec
"mult_spmat [] A = []"
@@ -357,7 +357,7 @@
done
lemma sorted_spvec_mult_spmat[rule_format]:
- "sorted_spvec (A::('a::lordered_ring) spmat) \<longrightarrow> sorted_spvec (mult_spmat A B)"
+ "sorted_spvec (A::('a::lattice_ring) spmat) \<longrightarrow> sorted_spvec (mult_spmat A B)"
apply (induct A)
apply (auto)
apply (drule sorted_spvec_cons1, simp)
@@ -366,13 +366,13 @@
done
lemma sorted_spmat_mult_spmat:
- "sorted_spmat (B::('a::lordered_ring) spmat) \<Longrightarrow> sorted_spmat (mult_spmat A B)"
+ "sorted_spmat (B::('a::lattice_ring) spmat) \<Longrightarrow> sorted_spmat (mult_spmat A B)"
apply (induct A)
apply (auto simp add: sorted_mult_spvec_spmat)
done
-fun add_spvec :: "('a::lordered_ab_group_add) spvec \<Rightarrow> 'a spvec \<Rightarrow> 'a spvec" where
+fun add_spvec :: "('a::lattice_ab_group_add) spvec \<Rightarrow> 'a spvec \<Rightarrow> 'a spvec" where
(* "measure (% (a, b). length a + (length b))" *)
"add_spvec arr [] = arr" |
"add_spvec [] brr = brr" |
@@ -389,7 +389,7 @@
apply (simp_all add: singleton_matrix_add)
done
-fun add_spmat :: "('a::lordered_ab_group_add) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat" where
+fun add_spmat :: "('a::lattice_ab_group_add) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat" where
(* "measure (% (A,B). (length A)+(length B))" *)
"add_spmat [] bs = bs" |
"add_spmat as [] = as" |
@@ -532,7 +532,7 @@
apply (simp_all add: sorted_spvec_add_spvec)
done
-fun le_spvec :: "('a::lordered_ab_group_add) spvec \<Rightarrow> 'a spvec \<Rightarrow> bool" where
+fun le_spvec :: "('a::lattice_ab_group_add) spvec \<Rightarrow> 'a spvec \<Rightarrow> bool" where
(* "measure (% (a,b). (length a) + (length b))" *)
"le_spvec [] [] = True" |
"le_spvec ((_,a)#as) [] = (a <= 0 & le_spvec as [])" |
@@ -542,7 +542,7 @@
else if (j < i) then 0 <= b & le_spvec ((i,a)#as) bs
else a <= b & le_spvec as bs)"
-fun le_spmat :: "('a::lordered_ab_group_add) spmat \<Rightarrow> 'a spmat \<Rightarrow> bool" where
+fun le_spmat :: "('a::lattice_ab_group_add) spmat \<Rightarrow> 'a spmat \<Rightarrow> bool" where
(* "measure (% (a,b). (length a) + (length b))" *)
"le_spmat [] [] = True" |
"le_spmat ((i,a)#as) [] = (le_spvec a [] & le_spmat as [])" |
@@ -566,7 +566,7 @@
lemma disj_matrices_add: "disj_matrices A B \<Longrightarrow> disj_matrices C D \<Longrightarrow> disj_matrices A D \<Longrightarrow> disj_matrices B C \<Longrightarrow>
- (A + B <= C + D) = (A <= C & B <= (D::('a::lordered_ab_group_add) matrix))"
+ (A + B <= C + D) = (A <= C & B <= (D::('a::lattice_ab_group_add) matrix))"
apply (auto)
apply (simp (no_asm_use) only: le_matrix_def disj_matrices_def)
apply (intro strip)
@@ -596,19 +596,19 @@
by (auto simp add: disj_matrices_def)
lemma disj_matrices_add_le_zero: "disj_matrices A B \<Longrightarrow>
- (A + B <= 0) = (A <= 0 & (B::('a::lordered_ab_group_add) matrix) <= 0)"
+ (A + B <= 0) = (A <= 0 & (B::('a::lattice_ab_group_add) matrix) <= 0)"
by (rule disj_matrices_add[of A B 0 0, simplified])
lemma disj_matrices_add_zero_le: "disj_matrices A B \<Longrightarrow>
- (0 <= A + B) = (0 <= A & 0 <= (B::('a::lordered_ab_group_add) matrix))"
+ (0 <= A + B) = (0 <= A & 0 <= (B::('a::lattice_ab_group_add) matrix))"
by (rule disj_matrices_add[of 0 0 A B, simplified])
lemma disj_matrices_add_x_le: "disj_matrices A B \<Longrightarrow> disj_matrices B C \<Longrightarrow>
- (A <= B + C) = (A <= C & 0 <= (B::('a::lordered_ab_group_add) matrix))"
+ (A <= B + C) = (A <= C & 0 <= (B::('a::lattice_ab_group_add) matrix))"
by (auto simp add: disj_matrices_add[of 0 A B C, simplified])
lemma disj_matrices_add_le_x: "disj_matrices A B \<Longrightarrow> disj_matrices B C \<Longrightarrow>
- (B + A <= C) = (A <= C & (B::('a::lordered_ab_group_add) matrix) <= 0)"
+ (B + A <= C) = (A <= C & (B::('a::lattice_ab_group_add) matrix) <= 0)"
by (auto simp add: disj_matrices_add[of B A 0 C,simplified] disj_matrices_commute)
lemma disj_sparse_row_singleton: "i <= j \<Longrightarrow> sorted_spvec((j,y)#v) \<Longrightarrow> disj_matrices (sparse_row_vector v) (singleton_matrix 0 i x)"
@@ -624,7 +624,7 @@
apply (simp_all)
done
-lemma disj_matrices_x_add: "disj_matrices A B \<Longrightarrow> disj_matrices A C \<Longrightarrow> disj_matrices (A::('a::lordered_ab_group_add) matrix) (B+C)"
+lemma disj_matrices_x_add: "disj_matrices A B \<Longrightarrow> disj_matrices A C \<Longrightarrow> disj_matrices (A::('a::lattice_ab_group_add) matrix) (B+C)"
apply (simp add: disj_matrices_def)
apply (auto)
apply (drule_tac j=j and i=i in spec2)+
@@ -633,7 +633,7 @@
apply (simp_all)
done
-lemma disj_matrices_add_x: "disj_matrices A B \<Longrightarrow> disj_matrices A C \<Longrightarrow> disj_matrices (B+C) (A::('a::lordered_ab_group_add) matrix)"
+lemma disj_matrices_add_x: "disj_matrices A B \<Longrightarrow> disj_matrices A C \<Longrightarrow> disj_matrices (B+C) (A::('a::lattice_ab_group_add) matrix)"
by (simp add: disj_matrices_x_add disj_matrices_commute)
lemma disj_singleton_matrices[simp]: "disj_matrices (singleton_matrix j i x) (singleton_matrix u v y) = (j \<noteq> u | i \<noteq> v | x = 0 | y = 0)"
@@ -731,11 +731,11 @@
declare [[simp_depth_limit = 999]]
-primrec abs_spmat :: "('a::lordered_ring) spmat \<Rightarrow> 'a spmat" where
+primrec abs_spmat :: "('a::lattice_ring) spmat \<Rightarrow> 'a spmat" where
"abs_spmat [] = []" |
"abs_spmat (a#as) = (fst a, abs_spvec (snd a))#(abs_spmat as)"
-primrec minus_spmat :: "('a::lordered_ring) spmat \<Rightarrow> 'a spmat" where
+primrec minus_spmat :: "('a::lattice_ring) spmat \<Rightarrow> 'a spmat" where
"minus_spmat [] = []" |
"minus_spmat (a#as) = (fst a, minus_spvec (snd a))#(minus_spmat as)"
@@ -803,7 +803,7 @@
done
constdefs
- diff_spmat :: "('a::lordered_ring) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat"
+ diff_spmat :: "('a::lattice_ring) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat"
"diff_spmat A B == add_spmat A (minus_spmat B)"
lemma sorted_spmat_diff_spmat: "sorted_spmat A \<Longrightarrow> sorted_spmat B \<Longrightarrow> sorted_spmat (diff_spmat A B)"
@@ -845,10 +845,10 @@
lemma if_case_eq: "(if b then x else y) = (case b of True => x | False => y)" by simp
consts
- pprt_spvec :: "('a::{lordered_ab_group_add}) spvec \<Rightarrow> 'a spvec"
- nprt_spvec :: "('a::{lordered_ab_group_add}) spvec \<Rightarrow> 'a spvec"
- pprt_spmat :: "('a::{lordered_ab_group_add}) spmat \<Rightarrow> 'a spmat"
- nprt_spmat :: "('a::{lordered_ab_group_add}) spmat \<Rightarrow> 'a spmat"
+ pprt_spvec :: "('a::{lattice_ab_group_add}) spvec \<Rightarrow> 'a spvec"
+ nprt_spvec :: "('a::{lattice_ab_group_add}) spvec \<Rightarrow> 'a spvec"
+ pprt_spmat :: "('a::{lattice_ab_group_add}) spmat \<Rightarrow> 'a spmat"
+ nprt_spmat :: "('a::{lattice_ab_group_add}) spmat \<Rightarrow> 'a spmat"
primrec
"pprt_spvec [] = []"
@@ -869,7 +869,7 @@
(*case (nprt_spvec (snd a)) of [] \<Rightarrow> (nprt_spmat as) | y#ys \<Rightarrow> (fst a, y#ys)#(nprt_spmat as))"*)
-lemma pprt_add: "disj_matrices A (B::(_::lordered_ring) matrix) \<Longrightarrow> pprt (A+B) = pprt A + pprt B"
+lemma pprt_add: "disj_matrices A (B::(_::lattice_ring) matrix) \<Longrightarrow> pprt (A+B) = pprt A + pprt B"
apply (simp add: pprt_def sup_matrix_def)
apply (simp add: Rep_matrix_inject[symmetric])
apply (rule ext)+
@@ -878,7 +878,7 @@
apply (simp_all add: disj_matrices_contr1)
done
-lemma nprt_add: "disj_matrices A (B::(_::lordered_ring) matrix) \<Longrightarrow> nprt (A+B) = nprt A + nprt B"
+lemma nprt_add: "disj_matrices A (B::(_::lattice_ring) matrix) \<Longrightarrow> nprt (A+B) = nprt A + nprt B"
apply (simp add: nprt_def inf_matrix_def)
apply (simp add: Rep_matrix_inject[symmetric])
apply (rule ext)+
@@ -887,14 +887,14 @@
apply (simp_all add: disj_matrices_contr1)
done
-lemma pprt_singleton[simp]: "pprt (singleton_matrix j i (x::_::lordered_ring)) = singleton_matrix j i (pprt x)"
+lemma pprt_singleton[simp]: "pprt (singleton_matrix j i (x::_::lattice_ring)) = singleton_matrix j i (pprt x)"
apply (simp add: pprt_def sup_matrix_def)
apply (simp add: Rep_matrix_inject[symmetric])
apply (rule ext)+
apply simp
done
-lemma nprt_singleton[simp]: "nprt (singleton_matrix j i (x::_::lordered_ring)) = singleton_matrix j i (nprt x)"
+lemma nprt_singleton[simp]: "nprt (singleton_matrix j i (x::_::lattice_ring)) = singleton_matrix j i (nprt x)"
apply (simp add: nprt_def inf_matrix_def)
apply (simp add: Rep_matrix_inject[symmetric])
apply (rule ext)+
@@ -903,7 +903,7 @@
lemma less_imp_le: "a < b \<Longrightarrow> a <= (b::_::order)" by (simp add: less_def)
-lemma sparse_row_vector_pprt: "sorted_spvec (v :: 'a::lordered_ring spvec) \<Longrightarrow> sparse_row_vector (pprt_spvec v) = pprt (sparse_row_vector v)"
+lemma sparse_row_vector_pprt: "sorted_spvec (v :: 'a::lattice_ring spvec) \<Longrightarrow> sparse_row_vector (pprt_spvec v) = pprt (sparse_row_vector v)"
apply (induct v)
apply (simp_all)
apply (frule sorted_spvec_cons1, auto)
@@ -913,7 +913,7 @@
apply auto
done
-lemma sparse_row_vector_nprt: "sorted_spvec (v :: 'a::lordered_ring spvec) \<Longrightarrow> sparse_row_vector (nprt_spvec v) = nprt (sparse_row_vector v)"
+lemma sparse_row_vector_nprt: "sorted_spvec (v :: 'a::lattice_ring spvec) \<Longrightarrow> sparse_row_vector (nprt_spvec v) = nprt (sparse_row_vector v)"
apply (induct v)
apply (simp_all)
apply (frule sorted_spvec_cons1, auto)
@@ -924,7 +924,7 @@
done
-lemma pprt_move_matrix: "pprt (move_matrix (A::('a::lordered_ring) matrix) j i) = move_matrix (pprt A) j i"
+lemma pprt_move_matrix: "pprt (move_matrix (A::('a::lattice_ring) matrix) j i) = move_matrix (pprt A) j i"
apply (simp add: pprt_def)
apply (simp add: sup_matrix_def)
apply (simp add: Rep_matrix_inject[symmetric])
@@ -932,7 +932,7 @@
apply (simp)
done
-lemma nprt_move_matrix: "nprt (move_matrix (A::('a::lordered_ring) matrix) j i) = move_matrix (nprt A) j i"
+lemma nprt_move_matrix: "nprt (move_matrix (A::('a::lattice_ring) matrix) j i) = move_matrix (nprt A) j i"
apply (simp add: nprt_def)
apply (simp add: inf_matrix_def)
apply (simp add: Rep_matrix_inject[symmetric])
@@ -940,7 +940,7 @@
apply (simp)
done
-lemma sparse_row_matrix_pprt: "sorted_spvec (m :: 'a::lordered_ring spmat) \<Longrightarrow> sorted_spmat m \<Longrightarrow> sparse_row_matrix (pprt_spmat m) = pprt (sparse_row_matrix m)"
+lemma sparse_row_matrix_pprt: "sorted_spvec (m :: 'a::lattice_ring spmat) \<Longrightarrow> sorted_spmat m \<Longrightarrow> sparse_row_matrix (pprt_spmat m) = pprt (sparse_row_matrix m)"
apply (induct m)
apply simp
apply simp
@@ -956,7 +956,7 @@
apply (simp add: pprt_move_matrix)
done
-lemma sparse_row_matrix_nprt: "sorted_spvec (m :: 'a::lordered_ring spmat) \<Longrightarrow> sorted_spmat m \<Longrightarrow> sparse_row_matrix (nprt_spmat m) = nprt (sparse_row_matrix m)"
+lemma sparse_row_matrix_nprt: "sorted_spvec (m :: 'a::lattice_ring spmat) \<Longrightarrow> sorted_spmat m \<Longrightarrow> sparse_row_matrix (nprt_spmat m) = nprt (sparse_row_matrix m)"
apply (induct m)
apply simp
apply simp
@@ -1015,7 +1015,7 @@
done
constdefs
- mult_est_spmat :: "('a::lordered_ring) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat"
+ mult_est_spmat :: "('a::lattice_ring) spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat \<Rightarrow> 'a spmat"
"mult_est_spmat r1 r2 s1 s2 ==
add_spmat (mult_spmat (pprt_spmat s2) (pprt_spmat r2)) (add_spmat (mult_spmat (pprt_spmat s1) (nprt_spmat r2))
(add_spmat (mult_spmat (nprt_spmat s2) (pprt_spmat r1)) (mult_spmat (nprt_spmat s1) (nprt_spmat r1))))"
@@ -1057,7 +1057,7 @@
"sorted_spvec b"
"sorted_spvec r"
"le_spmat ([], y)"
- "A * x \<le> sparse_row_matrix (b::('a::lordered_ring) spmat)"
+ "A * x \<le> sparse_row_matrix (b::('a::lattice_ring) spmat)"
"sparse_row_matrix A1 <= A"
"A <= sparse_row_matrix A2"
"sparse_row_matrix c1 <= c"
--- a/src/HOL/Matrix/cplex/Cplex.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/cplex/Cplex.thy Tue Feb 09 17:06:05 2010 +0100
@@ -25,7 +25,7 @@
"c \<le> sparse_row_matrix c2"
"sparse_row_matrix r1 \<le> x"
"x \<le> sparse_row_matrix r2"
- "A * x \<le> sparse_row_matrix (b::('a::lordered_ring) spmat)"
+ "A * x \<le> sparse_row_matrix (b::('a::lattice_ring) spmat)"
shows
"c * x \<le> sparse_row_matrix (add_spmat (mult_spmat y b)
(let s1 = diff_spmat c1 (mult_spmat y A2); s2 = diff_spmat c2 (mult_spmat y A1) in
@@ -55,7 +55,7 @@
"c \<le> sparse_row_matrix c2"
"sparse_row_matrix r1 \<le> x"
"x \<le> sparse_row_matrix r2"
- "A * x \<le> sparse_row_matrix (b::('a::lordered_ring) spmat)"
+ "A * x \<le> sparse_row_matrix (b::('a::lattice_ring) spmat)"
shows
"c * x \<le> sparse_row_matrix (add_spmat (mult_spmat y b)
(mult_est_spmat r1 r2 (diff_spmat c1 (mult_spmat y A2)) (diff_spmat c2 (mult_spmat y A1))))"
--- a/src/HOL/Matrix/cplex/Cplex_tools.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/cplex/Cplex_tools.ML Tue Feb 09 17:06:05 2010 +0100
@@ -1145,7 +1145,7 @@
val cplex_path = getenv "GLPK_PATH"
val cplex = if cplex_path = "" then "glpsol" else cplex_path
val command = (wrap cplex)^" --lpt "^(wrap lpname)^" --output "^(wrap resultname)
- val answer = #1 (system_out command)
+ val answer = #1 (bash_output command)
in
let
val result = load_glpkResult resultname
@@ -1178,7 +1178,7 @@
val cplex = if cplex_path = "" then "cplex" else cplex_path
val _ = write_script scriptname lpname resultname
val command = (wrap cplex)^" < "^(wrap scriptname)^" > /dev/null"
- val answer = "return code "^(Int.toString (system command))
+ val answer = "return code "^(Int.toString (bash command))
in
let
val result = load_cplexResult resultname
--- a/src/HOL/Matrix/cplex/matrixlp.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Matrix/cplex/matrixlp.ML Tue Feb 09 17:06:05 2010 +0100
@@ -18,7 +18,7 @@
fun inst_real thm =
let val certT = ctyp_of (Thm.theory_of_thm thm) in
- Drule.standard (Thm.instantiate
+ Drule.export_without_context (Thm.instantiate
([(certT (TVar (hd (OldTerm.term_tvars (prop_of thm)))), certT HOLogic.realT)], []) thm)
end
@@ -59,7 +59,7 @@
val ord = prod_ord (prod_ord string_ord int_ord) (list_ord string_ord)
val v = TVar (hd (sort ord (OldTerm.term_tvars (prop_of thm))))
in
- Drule.standard (Thm.instantiate ([(certT v, certT ty)], []) thm)
+ Drule.export_without_context (Thm.instantiate ([(certT v, certT ty)], []) thm)
end
fun inst_tvars [] thms = thms
--- a/src/HOL/Metis_Examples/BigO.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Metis_Examples/BigO.thy Tue Feb 09 17:06:05 2010 +0100
@@ -12,11 +12,11 @@
subsection {* Definitions *}
-definition bigo :: "('a => 'b::ordered_idom) => ('a => 'b) set" ("(1O'(_'))") where
+definition bigo :: "('a => 'b::linordered_idom) => ('a => 'b) set" ("(1O'(_'))") where
"O(f::('a => 'b)) == {h. EX c. ALL x. abs (h x) <= c * abs (f x)}"
declare [[ atp_problem_prefix = "BigO__bigo_pos_const" ]]
-lemma bigo_pos_const: "(EX (c::'a::ordered_idom).
+lemma bigo_pos_const: "(EX (c::'a::linordered_idom).
ALL x. (abs (h x)) <= (c * (abs (f x))))
= (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
apply auto
@@ -30,7 +30,7 @@
declare [[sledgehammer_modulus = 1]]
-lemma (*bigo_pos_const:*) "(EX (c::'a::ordered_idom).
+lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom).
ALL x. (abs (h x)) <= (c * (abs (f x))))
= (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
apply auto
@@ -39,59 +39,59 @@
apply (rule_tac x = "abs c" in exI, auto)
proof (neg_clausify)
fix c x
-have 0: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. \<bar>X1 * X2\<bar> = \<bar>X2 * X1\<bar>"
+have 0: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. \<bar>X1 * X2\<bar> = \<bar>X2 * X1\<bar>"
by (metis abs_mult mult_commute)
-have 1: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom.
- X1 \<le> (0\<Colon>'a\<Colon>ordered_idom) \<or> \<bar>X2\<bar> * X1 = \<bar>X2 * X1\<bar>"
+have 1: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom.
+ X1 \<le> (0\<Colon>'a\<Colon>linordered_idom) \<or> \<bar>X2\<bar> * X1 = \<bar>X2 * X1\<bar>"
by (metis abs_mult_pos linorder_linear)
-have 2: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom.
- \<not> (0\<Colon>'a\<Colon>ordered_idom) < X1 * X2 \<or>
- \<not> (0\<Colon>'a\<Colon>ordered_idom) \<le> X2 \<or> \<not> X1 \<le> (0\<Colon>'a\<Colon>ordered_idom)"
+have 2: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom.
+ \<not> (0\<Colon>'a\<Colon>linordered_idom) < X1 * X2 \<or>
+ \<not> (0\<Colon>'a\<Colon>linordered_idom) \<le> X2 \<or> \<not> X1 \<le> (0\<Colon>'a\<Colon>linordered_idom)"
by (metis linorder_not_less mult_nonneg_nonpos2)
assume 3: "\<And>x\<Colon>'b\<Colon>type.
- \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>
- \<le> (c\<Colon>'a\<Colon>ordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
-assume 4: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
- \<le> \<bar>c\<Colon>'a\<Colon>ordered_idom\<bar> * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
-have 5: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
- \<le> \<bar>(c\<Colon>'a\<Colon>ordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
+ \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>
+ \<le> (c\<Colon>'a\<Colon>linordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
+assume 4: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
+ \<le> \<bar>c\<Colon>'a\<Colon>linordered_idom\<bar> * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
+have 5: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
+ \<le> \<bar>(c\<Colon>'a\<Colon>linordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
by (metis 4 abs_mult)
-have 6: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom.
- \<not> X1 \<le> (0\<Colon>'a\<Colon>ordered_idom) \<or> X1 \<le> \<bar>X2\<bar>"
+have 6: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom.
+ \<not> X1 \<le> (0\<Colon>'a\<Colon>linordered_idom) \<or> X1 \<le> \<bar>X2\<bar>"
by (metis abs_ge_zero xt1(6))
-have 7: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom.
- X1 \<le> \<bar>X2\<bar> \<or> (0\<Colon>'a\<Colon>ordered_idom) < X1"
+have 7: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom.
+ X1 \<le> \<bar>X2\<bar> \<or> (0\<Colon>'a\<Colon>linordered_idom) < X1"
by (metis not_leE 6)
-have 8: "(0\<Colon>'a\<Colon>ordered_idom) < \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
+have 8: "(0\<Colon>'a\<Colon>linordered_idom) < \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
by (metis 5 7)
-have 9: "\<And>X1\<Colon>'a\<Colon>ordered_idom.
- \<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar> \<le> X1 \<or>
- (0\<Colon>'a\<Colon>ordered_idom) < X1"
+have 9: "\<And>X1\<Colon>'a\<Colon>linordered_idom.
+ \<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar> \<le> X1 \<or>
+ (0\<Colon>'a\<Colon>linordered_idom) < X1"
by (metis 8 order_less_le_trans)
-have 10: "(0\<Colon>'a\<Colon>ordered_idom)
-< (c\<Colon>'a\<Colon>ordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
+have 10: "(0\<Colon>'a\<Colon>linordered_idom)
+< (c\<Colon>'a\<Colon>linordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
by (metis 3 9)
-have 11: "\<not> (c\<Colon>'a\<Colon>ordered_idom) \<le> (0\<Colon>'a\<Colon>ordered_idom)"
+have 11: "\<not> (c\<Colon>'a\<Colon>linordered_idom) \<le> (0\<Colon>'a\<Colon>linordered_idom)"
by (metis abs_ge_zero 2 10)
-have 12: "\<And>X1\<Colon>'a\<Colon>ordered_idom. (c\<Colon>'a\<Colon>ordered_idom) * \<bar>X1\<bar> = \<bar>X1 * c\<bar>"
+have 12: "\<And>X1\<Colon>'a\<Colon>linordered_idom. (c\<Colon>'a\<Colon>linordered_idom) * \<bar>X1\<bar> = \<bar>X1 * c\<bar>"
by (metis mult_commute 1 11)
have 13: "\<And>X1\<Colon>'b\<Colon>type.
- - (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1
- \<le> (c\<Colon>'a\<Colon>ordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1\<bar>"
+ - (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1
+ \<le> (c\<Colon>'a\<Colon>linordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1\<bar>"
by (metis 3 abs_le_D2)
have 14: "\<And>X1\<Colon>'b\<Colon>type.
- - (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1
- \<le> \<bar>(c\<Colon>'a\<Colon>ordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1\<bar>"
+ - (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1
+ \<le> \<bar>(c\<Colon>'a\<Colon>linordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1\<bar>"
by (metis 0 12 13)
-have 15: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. \<bar>X1 * \<bar>X2\<bar>\<bar> = \<bar>X1 * X2\<bar>"
+have 15: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. \<bar>X1 * \<bar>X2\<bar>\<bar> = \<bar>X1 * X2\<bar>"
by (metis abs_mult abs_mult_pos abs_ge_zero)
-have 16: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. X1 \<le> \<bar>X2\<bar> \<or> \<not> X1 \<le> X2"
+have 16: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. X1 \<le> \<bar>X2\<bar> \<or> \<not> X1 \<le> X2"
by (metis xt1(6) abs_ge_self)
-have 17: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. \<not> \<bar>X1\<bar> \<le> X2 \<or> X1 \<le> \<bar>X2\<bar>"
+have 17: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. \<not> \<bar>X1\<bar> \<le> X2 \<or> X1 \<le> \<bar>X2\<bar>"
by (metis 16 abs_le_D1)
have 18: "\<And>X1\<Colon>'b\<Colon>type.
- (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1
- \<le> \<bar>(c\<Colon>'a\<Colon>ordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1\<bar>"
+ (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1
+ \<le> \<bar>(c\<Colon>'a\<Colon>linordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1\<bar>"
by (metis 17 3 15)
show "False"
by (metis abs_le_iff 5 18 14)
@@ -99,7 +99,7 @@
declare [[sledgehammer_modulus = 2]]
-lemma (*bigo_pos_const:*) "(EX (c::'a::ordered_idom).
+lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom).
ALL x. (abs (h x)) <= (c * (abs (f x))))
= (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
apply auto
@@ -108,31 +108,31 @@
apply (rule_tac x = "abs c" in exI, auto);
proof (neg_clausify)
fix c x
-have 0: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. \<bar>X1 * X2\<bar> = \<bar>X2 * X1\<bar>"
+have 0: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. \<bar>X1 * X2\<bar> = \<bar>X2 * X1\<bar>"
by (metis abs_mult mult_commute)
assume 1: "\<And>x\<Colon>'b\<Colon>type.
- \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>
- \<le> (c\<Colon>'a\<Colon>ordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
-assume 2: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
- \<le> \<bar>c\<Colon>'a\<Colon>ordered_idom\<bar> * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
-have 3: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
- \<le> \<bar>(c\<Colon>'a\<Colon>ordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
+ \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>
+ \<le> (c\<Colon>'a\<Colon>linordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
+assume 2: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
+ \<le> \<bar>c\<Colon>'a\<Colon>linordered_idom\<bar> * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
+have 3: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
+ \<le> \<bar>(c\<Colon>'a\<Colon>linordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
by (metis 2 abs_mult)
-have 4: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom.
- \<not> X1 \<le> (0\<Colon>'a\<Colon>ordered_idom) \<or> X1 \<le> \<bar>X2\<bar>"
+have 4: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom.
+ \<not> X1 \<le> (0\<Colon>'a\<Colon>linordered_idom) \<or> X1 \<le> \<bar>X2\<bar>"
by (metis abs_ge_zero xt1(6))
-have 5: "(0\<Colon>'a\<Colon>ordered_idom) < \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
+have 5: "(0\<Colon>'a\<Colon>linordered_idom) < \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
by (metis not_leE 4 3)
-have 6: "(0\<Colon>'a\<Colon>ordered_idom)
-< (c\<Colon>'a\<Colon>ordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
+have 6: "(0\<Colon>'a\<Colon>linordered_idom)
+< (c\<Colon>'a\<Colon>linordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>"
by (metis 1 order_less_le_trans 5)
-have 7: "\<And>X1\<Colon>'a\<Colon>ordered_idom. (c\<Colon>'a\<Colon>ordered_idom) * \<bar>X1\<bar> = \<bar>X1 * c\<bar>"
+have 7: "\<And>X1\<Colon>'a\<Colon>linordered_idom. (c\<Colon>'a\<Colon>linordered_idom) * \<bar>X1\<bar> = \<bar>X1 * c\<bar>"
by (metis abs_ge_zero linorder_not_less mult_nonneg_nonpos2 6 linorder_linear abs_mult_pos mult_commute)
have 8: "\<And>X1\<Colon>'b\<Colon>type.
- - (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1
- \<le> \<bar>(c\<Colon>'a\<Colon>ordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1\<bar>"
+ - (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1
+ \<le> \<bar>(c\<Colon>'a\<Colon>linordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1\<bar>"
by (metis 0 7 abs_le_D2 1)
-have 9: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. \<not> \<bar>X1\<bar> \<le> X2 \<or> X1 \<le> \<bar>X2\<bar>"
+have 9: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. \<not> \<bar>X1\<bar> \<le> X2 \<or> X1 \<le> \<bar>X2\<bar>"
by (metis abs_ge_self xt1(6) abs_le_D1)
show "False"
by (metis 8 abs_ge_zero abs_mult_pos abs_mult 1 9 3 abs_le_iff)
@@ -140,7 +140,7 @@
declare [[sledgehammer_modulus = 3]]
-lemma (*bigo_pos_const:*) "(EX (c::'a::ordered_idom).
+lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom).
ALL x. (abs (h x)) <= (c * (abs (f x))))
= (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
apply auto
@@ -150,20 +150,20 @@
proof (neg_clausify)
fix c x
assume 0: "\<And>x\<Colon>'b\<Colon>type.
- \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>
- \<le> (c\<Colon>'a\<Colon>ordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
-assume 1: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
- \<le> \<bar>c\<Colon>'a\<Colon>ordered_idom\<bar> * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) x\<bar>"
-have 2: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom.
- X1 \<le> \<bar>X2\<bar> \<or> (0\<Colon>'a\<Colon>ordered_idom) < X1"
+ \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>
+ \<le> (c\<Colon>'a\<Colon>linordered_idom) * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
+assume 1: "\<not> \<bar>(h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) (x\<Colon>'b\<Colon>type)\<bar>
+ \<le> \<bar>c\<Colon>'a\<Colon>linordered_idom\<bar> * \<bar>(f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) x\<bar>"
+have 2: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom.
+ X1 \<le> \<bar>X2\<bar> \<or> (0\<Colon>'a\<Colon>linordered_idom) < X1"
by (metis abs_ge_zero xt1(6) not_leE)
-have 3: "\<not> (c\<Colon>'a\<Colon>ordered_idom) \<le> (0\<Colon>'a\<Colon>ordered_idom)"
+have 3: "\<not> (c\<Colon>'a\<Colon>linordered_idom) \<le> (0\<Colon>'a\<Colon>linordered_idom)"
by (metis abs_ge_zero mult_nonneg_nonpos2 linorder_not_less order_less_le_trans 1 abs_mult 2 0)
-have 4: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2\<Colon>'a\<Colon>ordered_idom. \<bar>X1 * \<bar>X2\<bar>\<bar> = \<bar>X1 * X2\<bar>"
+have 4: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2\<Colon>'a\<Colon>linordered_idom. \<bar>X1 * \<bar>X2\<bar>\<bar> = \<bar>X1 * X2\<bar>"
by (metis abs_ge_zero abs_mult_pos abs_mult)
have 5: "\<And>X1\<Colon>'b\<Colon>type.
- (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1
- \<le> \<bar>(c\<Colon>'a\<Colon>ordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>ordered_idom) X1\<bar>"
+ (h\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1
+ \<le> \<bar>(c\<Colon>'a\<Colon>linordered_idom) * (f\<Colon>'b\<Colon>type \<Rightarrow> 'a\<Colon>linordered_idom) X1\<bar>"
by (metis 4 0 xt1(6) abs_ge_self abs_le_D1)
show "False"
by (metis abs_mult mult_commute 3 abs_mult_pos linorder_linear 0 abs_le_D2 5 1 abs_le_iff)
@@ -172,7 +172,7 @@
declare [[sledgehammer_modulus = 1]]
-lemma (*bigo_pos_const:*) "(EX (c::'a::ordered_idom).
+lemma (*bigo_pos_const:*) "(EX (c::'a::linordered_idom).
ALL x. (abs (h x)) <= (c * (abs (f x))))
= (EX c. 0 < c & (ALL x. (abs(h x)) <= (c * (abs (f x)))))"
apply auto
@@ -181,7 +181,7 @@
apply (rule_tac x = "abs c" in exI, auto);
proof (neg_clausify)
fix c x (*sort/type constraint inserted by hand!*)
-have 0: "\<And>(X1\<Colon>'a\<Colon>ordered_idom) X2. \<bar>X1 * \<bar>X2\<bar>\<bar> = \<bar>X1 * X2\<bar>"
+have 0: "\<And>(X1\<Colon>'a\<Colon>linordered_idom) X2. \<bar>X1 * \<bar>X2\<bar>\<bar> = \<bar>X1 * X2\<bar>"
by (metis abs_ge_zero abs_mult_pos abs_mult)
assume 1: "\<And>A. \<bar>h A\<bar> \<le> c * \<bar>f A\<bar>"
have 2: "\<And>X1 X2. \<not> \<bar>X1\<bar> \<le> X2 \<or> (0\<Colon>'a) \<le> X2"
@@ -368,7 +368,7 @@
f : O(g)"
apply (auto simp add: bigo_def)
(*Version 1: one-shot proof*)
- apply (metis OrderedGroup.abs_le_D1 linorder_class.not_less order_less_le Orderings.xt1(12) Ring_and_Field.abs_mult)
+ apply (metis abs_le_D1 linorder_class.not_less order_less_le Orderings.xt1(12) abs_mult)
done
lemma (*bigo_bounded_alt:*) "ALL x. 0 <= f x ==> ALL x. f x <= c * g x ==>
@@ -383,11 +383,11 @@
by (metis 0 order_antisym_conv)
have 3: "\<And>X3. \<not> f (x \<bar>X3\<bar>) \<le> \<bar>X3 * g (x \<bar>X3\<bar>)\<bar>"
by (metis 1 abs_mult)
-have 4: "\<And>X1 X3\<Colon>'b\<Colon>ordered_idom. X3 \<le> X1 \<or> X1 \<le> \<bar>X3\<bar>"
+have 4: "\<And>X1 X3\<Colon>'b\<Colon>linordered_idom. X3 \<le> X1 \<or> X1 \<le> \<bar>X3\<bar>"
by (metis linorder_linear abs_le_D1)
have 5: "\<And>X3::'b. \<bar>X3\<bar> * \<bar>X3\<bar> = X3 * X3"
by (metis abs_mult_self)
-have 6: "\<And>X3. \<not> X3 * X3 < (0\<Colon>'b\<Colon>ordered_idom)"
+have 6: "\<And>X3. \<not> X3 * X3 < (0\<Colon>'b\<Colon>linordered_idom)"
by (metis not_square_less_zero)
have 7: "\<And>X1 X3::'b. \<bar>X1\<bar> * \<bar>X3\<bar> = \<bar>X3 * X1\<bar>"
by (metis abs_mult mult_commute)
@@ -438,26 +438,26 @@
proof (neg_clausify)
fix x
assume 0: "\<And>A\<Colon>'a\<Colon>type.
- (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) A
- \<le> (c\<Colon>'b\<Colon>ordered_idom) * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) A"
-assume 1: "\<And>A\<Colon>'b\<Colon>ordered_idom.
- \<not> (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) ((x\<Colon>'b\<Colon>ordered_idom \<Rightarrow> 'a\<Colon>type) A)
- \<le> A * \<bar>(g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) (x A)\<bar>"
+ (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) A
+ \<le> (c\<Colon>'b\<Colon>linordered_idom) * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) A"
+assume 1: "\<And>A\<Colon>'b\<Colon>linordered_idom.
+ \<not> (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) ((x\<Colon>'b\<Colon>linordered_idom \<Rightarrow> 'a\<Colon>type) A)
+ \<le> A * \<bar>(g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) (x A)\<bar>"
have 2: "\<And>X2\<Colon>'a\<Colon>type.
- \<not> (c\<Colon>'b\<Colon>ordered_idom) * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) X2
- < (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) X2"
+ \<not> (c\<Colon>'b\<Colon>linordered_idom) * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) X2
+ < (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) X2"
by (metis 0 linorder_not_le)
-have 3: "\<And>X2\<Colon>'b\<Colon>ordered_idom.
- \<not> (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) ((x\<Colon>'b\<Colon>ordered_idom \<Rightarrow> 'a\<Colon>type) \<bar>X2\<bar>)
- \<le> \<bar>X2 * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) (x \<bar>X2\<bar>)\<bar>"
+have 3: "\<And>X2\<Colon>'b\<Colon>linordered_idom.
+ \<not> (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) ((x\<Colon>'b\<Colon>linordered_idom \<Rightarrow> 'a\<Colon>type) \<bar>X2\<bar>)
+ \<le> \<bar>X2 * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) (x \<bar>X2\<bar>)\<bar>"
by (metis abs_mult 1)
-have 4: "\<And>X2\<Colon>'b\<Colon>ordered_idom.
- \<bar>X2 * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) ((x\<Colon>'b\<Colon>ordered_idom \<Rightarrow> 'a\<Colon>type) \<bar>X2\<bar>)\<bar>
- < (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) (x \<bar>X2\<bar>)"
+have 4: "\<And>X2\<Colon>'b\<Colon>linordered_idom.
+ \<bar>X2 * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) ((x\<Colon>'b\<Colon>linordered_idom \<Rightarrow> 'a\<Colon>type) \<bar>X2\<bar>)\<bar>
+ < (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) (x \<bar>X2\<bar>)"
by (metis 3 linorder_not_less)
-have 5: "\<And>X2\<Colon>'b\<Colon>ordered_idom.
- X2 * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) ((x\<Colon>'b\<Colon>ordered_idom \<Rightarrow> 'a\<Colon>type) \<bar>X2\<bar>)
- < (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) (x \<bar>X2\<bar>)"
+have 5: "\<And>X2\<Colon>'b\<Colon>linordered_idom.
+ X2 * (g\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) ((x\<Colon>'b\<Colon>linordered_idom \<Rightarrow> 'a\<Colon>type) \<bar>X2\<bar>)
+ < (f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) (x \<bar>X2\<bar>)"
by (metis abs_less_iff 4)
show "False"
by (metis 2 5)
@@ -595,62 +595,62 @@
using [[ atp_problem_prefix = "BigO__bigo_mult_simpler" ]]
prefer 2
apply (metis mult_assoc mult_left_commute
- OrderedGroup.abs_of_pos OrderedGroup.mult_left_commute
- Ring_and_Field.abs_mult Ring_and_Field.mult_pos_pos)
+ abs_of_pos mult_left_commute
+ abs_mult mult_pos_pos)
apply (erule ssubst)
apply (subst abs_mult)
(*not qute BigO__bigo_mult_simpler_1 (a hard problem!) as abs_mult has
just been done*)
proof (neg_clausify)
fix a c b ca x
-assume 0: "(0\<Colon>'b\<Colon>ordered_idom) < (c\<Colon>'b\<Colon>ordered_idom)"
-assume 1: "\<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar>
-\<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>"
-assume 2: "\<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar>
-\<le> (ca\<Colon>'b\<Colon>ordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>"
-assume 3: "\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar> *
- \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>
- \<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar> *
- ((ca\<Colon>'b\<Colon>ordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>)"
-have 4: "\<bar>c\<Colon>'b\<Colon>ordered_idom\<bar> = c"
- by (metis OrderedGroup.abs_of_pos 0)
-have 5: "\<And>X1\<Colon>'b\<Colon>ordered_idom. (c\<Colon>'b\<Colon>ordered_idom) * \<bar>X1\<bar> = \<bar>c * X1\<bar>"
- by (metis Ring_and_Field.abs_mult 4)
-have 6: "(0\<Colon>'b\<Colon>ordered_idom) = (1\<Colon>'b\<Colon>ordered_idom) \<or>
-(0\<Colon>'b\<Colon>ordered_idom) < (1\<Colon>'b\<Colon>ordered_idom)"
- by (metis OrderedGroup.abs_not_less_zero Ring_and_Field.abs_one Ring_and_Field.linorder_neqE_ordered_idom)
-have 7: "(0\<Colon>'b\<Colon>ordered_idom) < (1\<Colon>'b\<Colon>ordered_idom)"
- by (metis 6 Ring_and_Field.one_neq_zero)
-have 8: "\<bar>1\<Colon>'b\<Colon>ordered_idom\<bar> = (1\<Colon>'b\<Colon>ordered_idom)"
- by (metis OrderedGroup.abs_of_pos 7)
-have 9: "\<And>X1\<Colon>'b\<Colon>ordered_idom. (0\<Colon>'b\<Colon>ordered_idom) \<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>X1\<bar>"
- by (metis OrderedGroup.abs_ge_zero 5)
-have 10: "\<And>X1\<Colon>'b\<Colon>ordered_idom. X1 * (1\<Colon>'b\<Colon>ordered_idom) = X1"
- by (metis Ring_and_Field.mult_cancel_right2 mult_commute)
-have 11: "\<And>X1\<Colon>'b\<Colon>ordered_idom. \<bar>\<bar>X1\<bar>\<bar> = \<bar>X1\<bar> * \<bar>1\<Colon>'b\<Colon>ordered_idom\<bar>"
- by (metis Ring_and_Field.abs_mult OrderedGroup.abs_idempotent 10)
-have 12: "\<And>X1\<Colon>'b\<Colon>ordered_idom. \<bar>\<bar>X1\<bar>\<bar> = \<bar>X1\<bar>"
+assume 0: "(0\<Colon>'b\<Colon>linordered_idom) < (c\<Colon>'b\<Colon>linordered_idom)"
+assume 1: "\<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar>
+\<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>"
+assume 2: "\<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar>
+\<le> (ca\<Colon>'b\<Colon>linordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>"
+assume 3: "\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar> *
+ \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>
+ \<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar> *
+ ((ca\<Colon>'b\<Colon>linordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>)"
+have 4: "\<bar>c\<Colon>'b\<Colon>linordered_idom\<bar> = c"
+ by (metis abs_of_pos 0)
+have 5: "\<And>X1\<Colon>'b\<Colon>linordered_idom. (c\<Colon>'b\<Colon>linordered_idom) * \<bar>X1\<bar> = \<bar>c * X1\<bar>"
+ by (metis abs_mult 4)
+have 6: "(0\<Colon>'b\<Colon>linordered_idom) = (1\<Colon>'b\<Colon>linordered_idom) \<or>
+(0\<Colon>'b\<Colon>linordered_idom) < (1\<Colon>'b\<Colon>linordered_idom)"
+ by (metis abs_not_less_zero abs_one linorder_neqE_linordered_idom)
+have 7: "(0\<Colon>'b\<Colon>linordered_idom) < (1\<Colon>'b\<Colon>linordered_idom)"
+ by (metis 6 one_neq_zero)
+have 8: "\<bar>1\<Colon>'b\<Colon>linordered_idom\<bar> = (1\<Colon>'b\<Colon>linordered_idom)"
+ by (metis abs_of_pos 7)
+have 9: "\<And>X1\<Colon>'b\<Colon>linordered_idom. (0\<Colon>'b\<Colon>linordered_idom) \<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>X1\<bar>"
+ by (metis abs_ge_zero 5)
+have 10: "\<And>X1\<Colon>'b\<Colon>linordered_idom. X1 * (1\<Colon>'b\<Colon>linordered_idom) = X1"
+ by (metis mult_cancel_right2 mult_commute)
+have 11: "\<And>X1\<Colon>'b\<Colon>linordered_idom. \<bar>\<bar>X1\<bar>\<bar> = \<bar>X1\<bar> * \<bar>1\<Colon>'b\<Colon>linordered_idom\<bar>"
+ by (metis abs_mult abs_idempotent 10)
+have 12: "\<And>X1\<Colon>'b\<Colon>linordered_idom. \<bar>\<bar>X1\<bar>\<bar> = \<bar>X1\<bar>"
by (metis 11 8 10)
-have 13: "\<And>X1\<Colon>'b\<Colon>ordered_idom. (0\<Colon>'b\<Colon>ordered_idom) \<le> \<bar>X1\<bar>"
- by (metis OrderedGroup.abs_ge_zero 12)
-have 14: "\<not> (0\<Colon>'b\<Colon>ordered_idom)
- \<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar> \<or>
-\<not> (0\<Colon>'b\<Colon>ordered_idom) \<le> \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar> \<or>
-\<not> \<bar>b x\<bar> \<le> (ca\<Colon>'b\<Colon>ordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar> \<or>
-\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar> \<le> c * \<bar>f x\<bar>"
- by (metis 3 Ring_and_Field.mult_mono)
-have 15: "\<not> (0\<Colon>'b\<Colon>ordered_idom) \<le> \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar> \<or>
-\<not> \<bar>b x\<bar> \<le> (ca\<Colon>'b\<Colon>ordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar> \<or>
-\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>
- \<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>"
+have 13: "\<And>X1\<Colon>'b\<Colon>linordered_idom. (0\<Colon>'b\<Colon>linordered_idom) \<le> \<bar>X1\<bar>"
+ by (metis abs_ge_zero 12)
+have 14: "\<not> (0\<Colon>'b\<Colon>linordered_idom)
+ \<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar> \<or>
+\<not> (0\<Colon>'b\<Colon>linordered_idom) \<le> \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar> \<or>
+\<not> \<bar>b x\<bar> \<le> (ca\<Colon>'b\<Colon>linordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar> \<or>
+\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar> \<le> c * \<bar>f x\<bar>"
+ by (metis 3 mult_mono)
+have 15: "\<not> (0\<Colon>'b\<Colon>linordered_idom) \<le> \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar> \<or>
+\<not> \<bar>b x\<bar> \<le> (ca\<Colon>'b\<Colon>linordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar> \<or>
+\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>
+ \<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>"
by (metis 14 9)
-have 16: "\<not> \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar>
- \<le> (ca\<Colon>'b\<Colon>ordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar> \<or>
-\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>
- \<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>"
+have 16: "\<not> \<bar>(b\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar>
+ \<le> (ca\<Colon>'b\<Colon>linordered_idom) * \<bar>(g\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar> \<or>
+\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>
+ \<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>"
by (metis 15 13)
-have 17: "\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) (x\<Colon>'a)\<bar>
- \<le> (c\<Colon>'b\<Colon>ordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>ordered_idom) x\<bar>"
+have 17: "\<not> \<bar>(a\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) (x\<Colon>'a)\<bar>
+ \<le> (c\<Colon>'b\<Colon>linordered_idom) * \<bar>(f\<Colon>'a \<Rightarrow> 'b\<Colon>linordered_idom) x\<bar>"
by (metis 16 2)
show 18: "False"
by (metis 17 1)
@@ -682,7 +682,7 @@
lemma bigo_mult5: "ALL x. f x ~= 0 ==>
- O(f * g) <= (f::'a => ('b::ordered_field)) *o O(g)"
+ O(f * g) <= (f::'a => ('b::linordered_field)) *o O(g)"
proof -
assume "ALL x. f x ~= 0"
show "O(f * g) <= f *o O(g)"
@@ -712,14 +712,14 @@
declare [[ atp_problem_prefix = "BigO__bigo_mult6" ]]
lemma bigo_mult6: "ALL x. f x ~= 0 ==>
- O(f * g) = (f::'a => ('b::ordered_field)) *o O(g)"
+ O(f * g) = (f::'a => ('b::linordered_field)) *o O(g)"
by (metis bigo_mult2 bigo_mult5 order_antisym)
(*proof requires relaxing relevance: 2007-01-25*)
declare [[ atp_problem_prefix = "BigO__bigo_mult7" ]]
declare bigo_mult6 [simp]
lemma bigo_mult7: "ALL x. f x ~= 0 ==>
- O(f * g) <= O(f::'a => ('b::ordered_field)) \<otimes> O(g)"
+ O(f * g) <= O(f::'a => ('b::linordered_field)) \<otimes> O(g)"
(*sledgehammer*)
apply (subst bigo_mult6)
apply assumption
@@ -731,7 +731,7 @@
declare [[ atp_problem_prefix = "BigO__bigo_mult8" ]]
declare bigo_mult7[intro!]
lemma bigo_mult8: "ALL x. f x ~= 0 ==>
- O(f * g) = O(f::'a => ('b::ordered_field)) \<otimes> O(g)"
+ O(f * g) = O(f::'a => ('b::linordered_field)) \<otimes> O(g)"
by (metis bigo_mult bigo_mult7 order_antisym_conv)
lemma bigo_minus [intro]: "f : O(g) ==> - f : O(g)"
@@ -810,11 +810,11 @@
lemma (*bigo_const2 [intro]:*) "O(%x. c) <= O(%x. 1)"
by (metis bigo_const1 bigo_elt_subset);
-lemma bigo_const2 [intro]: "O(%x. c::'b::ordered_idom) <= O(%x. 1)";
+lemma bigo_const2 [intro]: "O(%x. c::'b::linordered_idom) <= O(%x. 1)";
(*??FAILS because the two occurrences of COMBK have different polymorphic types
proof (neg_clausify)
-assume 0: "\<not> O(COMBK (c\<Colon>'b\<Colon>ordered_idom)) \<subseteq> O(COMBK (1\<Colon>'b\<Colon>ordered_idom))"
-have 1: "COMBK (c\<Colon>'b\<Colon>ordered_idom) \<notin> O(COMBK (1\<Colon>'b\<Colon>ordered_idom))"
+assume 0: "\<not> O(COMBK (c\<Colon>'b\<Colon>linordered_idom)) \<subseteq> O(COMBK (1\<Colon>'b\<Colon>linordered_idom))"
+have 1: "COMBK (c\<Colon>'b\<Colon>linordered_idom) \<notin> O(COMBK (1\<Colon>'b\<Colon>linordered_idom))"
apply (rule notI)
apply (rule 0 [THEN notE])
apply (rule bigo_elt_subset)
@@ -830,26 +830,26 @@
done
declare [[ atp_problem_prefix = "BigO__bigo_const3" ]]
-lemma bigo_const3: "(c::'a::ordered_field) ~= 0 ==> (%x. 1) : O(%x. c)"
+lemma bigo_const3: "(c::'a::linordered_field) ~= 0 ==> (%x. 1) : O(%x. c)"
apply (simp add: bigo_def)
proof (neg_clausify)
-assume 0: "(c\<Colon>'a\<Colon>ordered_field) \<noteq> (0\<Colon>'a\<Colon>ordered_field)"
-assume 1: "\<And>A\<Colon>'a\<Colon>ordered_field. \<not> (1\<Colon>'a\<Colon>ordered_field) \<le> A * \<bar>c\<Colon>'a\<Colon>ordered_field\<bar>"
-have 2: "(0\<Colon>'a\<Colon>ordered_field) = \<bar>c\<Colon>'a\<Colon>ordered_field\<bar> \<or>
-\<not> (1\<Colon>'a\<Colon>ordered_field) \<le> (1\<Colon>'a\<Colon>ordered_field)"
+assume 0: "(c\<Colon>'a\<Colon>linordered_field) \<noteq> (0\<Colon>'a\<Colon>linordered_field)"
+assume 1: "\<And>A\<Colon>'a\<Colon>linordered_field. \<not> (1\<Colon>'a\<Colon>linordered_field) \<le> A * \<bar>c\<Colon>'a\<Colon>linordered_field\<bar>"
+have 2: "(0\<Colon>'a\<Colon>linordered_field) = \<bar>c\<Colon>'a\<Colon>linordered_field\<bar> \<or>
+\<not> (1\<Colon>'a\<Colon>linordered_field) \<le> (1\<Colon>'a\<Colon>linordered_field)"
by (metis 1 field_inverse)
-have 3: "\<bar>c\<Colon>'a\<Colon>ordered_field\<bar> = (0\<Colon>'a\<Colon>ordered_field)"
+have 3: "\<bar>c\<Colon>'a\<Colon>linordered_field\<bar> = (0\<Colon>'a\<Colon>linordered_field)"
by (metis linorder_neq_iff linorder_antisym_conv1 2)
-have 4: "(0\<Colon>'a\<Colon>ordered_field) = (c\<Colon>'a\<Colon>ordered_field)"
+have 4: "(0\<Colon>'a\<Colon>linordered_field) = (c\<Colon>'a\<Colon>linordered_field)"
by (metis 3 abs_eq_0)
show "False"
by (metis 0 4)
qed
-lemma bigo_const4: "(c::'a::ordered_field) ~= 0 ==> O(%x. 1) <= O(%x. c)"
+lemma bigo_const4: "(c::'a::linordered_field) ~= 0 ==> O(%x. 1) <= O(%x. c)"
by (rule bigo_elt_subset, rule bigo_const3, assumption)
-lemma bigo_const [simp]: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const [simp]: "(c::'a::linordered_field) ~= 0 ==>
O(%x. c) = O(%x. 1)"
by (rule equalityI, rule bigo_const2, rule bigo_const4, assumption)
@@ -858,9 +858,9 @@
apply (simp add: bigo_def abs_mult)
proof (neg_clausify)
fix x
-assume 0: "\<And>xa\<Colon>'b\<Colon>ordered_idom.
- \<not> \<bar>c\<Colon>'b\<Colon>ordered_idom\<bar> *
- \<bar>(f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>ordered_idom) ((x\<Colon>'b\<Colon>ordered_idom \<Rightarrow> 'a\<Colon>type) xa)\<bar>
+assume 0: "\<And>xa\<Colon>'b\<Colon>linordered_idom.
+ \<not> \<bar>c\<Colon>'b\<Colon>linordered_idom\<bar> *
+ \<bar>(f\<Colon>'a\<Colon>type \<Rightarrow> 'b\<Colon>linordered_idom) ((x\<Colon>'b\<Colon>linordered_idom \<Rightarrow> 'a\<Colon>type) xa)\<bar>
\<le> xa * \<bar>f (x xa)\<bar>"
show "False"
by (metis linorder_neq_iff linorder_antisym_conv1 0)
@@ -870,7 +870,7 @@
by (rule bigo_elt_subset, rule bigo_const_mult1)
declare [[ atp_problem_prefix = "BigO__bigo_const_mult3" ]]
-lemma bigo_const_mult3: "(c::'a::ordered_field) ~= 0 ==> f : O(%x. c * f x)"
+lemma bigo_const_mult3: "(c::'a::linordered_field) ~= 0 ==> f : O(%x. c * f x)"
apply (simp add: bigo_def)
(*sledgehammer [no luck]*);
apply (rule_tac x = "abs(inverse c)" in exI)
@@ -879,16 +879,16 @@
apply (auto );
done
-lemma bigo_const_mult4: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const_mult4: "(c::'a::linordered_field) ~= 0 ==>
O(f) <= O(%x. c * f x)"
by (rule bigo_elt_subset, rule bigo_const_mult3, assumption)
-lemma bigo_const_mult [simp]: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const_mult [simp]: "(c::'a::linordered_field) ~= 0 ==>
O(%x. c * f x) = O(f)"
by (rule equalityI, rule bigo_const_mult2, erule bigo_const_mult4)
declare [[ atp_problem_prefix = "BigO__bigo_const_mult5" ]]
-lemma bigo_const_mult5 [simp]: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_const_mult5 [simp]: "(c::'a::linordered_field) ~= 0 ==>
(%x. c) *o O(f) = O(f)"
apply (auto del: subsetI)
apply (rule order_trans)
@@ -1057,7 +1057,7 @@
apply assumption+
done
-lemma bigo_useful_const_mult: "(c::'a::ordered_field) ~= 0 ==>
+lemma bigo_useful_const_mult: "(c::'a::linordered_field) ~= 0 ==>
(%x. c) * f =o O(h) ==> f =o O(h)"
apply (rule subsetD)
apply (subgoal_tac "(%x. 1 / c) *o O(h) <= O(h)")
@@ -1078,7 +1078,7 @@
apply (rule_tac x = c in exI)
apply safe
apply (case_tac "x = 0")
-apply (metis OrderedGroup.abs_ge_zero OrderedGroup.abs_zero order_less_le Ring_and_Field.split_mult_pos_le)
+apply (metis abs_ge_zero abs_zero order_less_le split_mult_pos_le)
apply (subgoal_tac "x = Suc (x - 1)")
apply metis
apply simp
@@ -1100,7 +1100,7 @@
subsection {* Less than or equal to *}
constdefs
- lesso :: "('a => 'b::ordered_idom) => ('a => 'b) => ('a => 'b)"
+ lesso :: "('a => 'b::linordered_idom) => ('a => 'b) => ('a => 'b)"
(infixl "<o" 70)
"f <o g == (%x. max (f x - g x) 0)"
@@ -1165,7 +1165,7 @@
proof (neg_clausify)
fix x
assume 0: "\<And>A. k A \<le> f A"
-have 1: "\<And>(X1\<Colon>'b\<Colon>ordered_idom) X2. \<not> max X1 X2 < X1"
+have 1: "\<And>(X1\<Colon>'b\<Colon>linordered_idom) X2. \<not> max X1 X2 < X1"
by (metis linorder_not_less le_maxI1) (*sort inserted by hand*)
assume 2: "(0\<Colon>'b) \<le> k x - g x"
have 3: "\<not> k x - g x < (0\<Colon>'b)"
@@ -1206,7 +1206,7 @@
apply (metis abs_ge_zero linorder_linear min_max.sup_absorb1 min_max.sup_commute)
done
-lemma bigo_lesso4: "f <o g =o O(k::'a=>'b::ordered_field) ==>
+lemma bigo_lesso4: "f <o g =o O(k::'a=>'b::linordered_field) ==>
g =o h +o O(k) ==> f <o h =o O(k)"
apply (unfold lesso_def)
apply (drule set_plus_imp_minus)
--- a/src/HOL/Metis_Examples/Message.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Metis_Examples/Message.thy Tue Feb 09 17:06:05 2010 +0100
@@ -52,7 +52,7 @@
translations
"{|x, y, z|}" == "{|x, {|y, z|}|}"
- "{|x, y|}" == "MPair x y"
+ "{|x, y|}" == "CONST MPair x y"
constdefs
--- a/src/HOL/Metis_Examples/Tarski.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Metis_Examples/Tarski.thy Tue Feb 09 17:06:05 2010 +0100
@@ -78,11 +78,9 @@
{S. S \<subseteq> pset cl &
(| pset = S, order = induced S (order cl) |): CompleteLattice }"
-syntax
- "@SL" :: "['a set, 'a potype] => bool" ("_ <<= _" [51,50]50)
-
-translations
- "S <<= cl" == "S : sublattice `` {cl}"
+abbreviation
+ sublattice_syntax :: "['a set, 'a potype] => bool" ("_ <<= _" [51, 50] 50)
+ where "S <<= cl \<equiv> S : sublattice `` {cl}"
constdefs
dual :: "'a potype => 'a potype"
--- a/src/HOL/Modelcheck/EindhovenSyn.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Modelcheck/EindhovenSyn.thy Tue Feb 09 17:06:05 2010 +0100
@@ -40,7 +40,7 @@
val pmu =
if eindhoven_home = "" then error "Environment variable EINDHOVEN_HOME not set"
else eindhoven_home ^ "/pmu";
- in #1 (system_out ("echo \"" ^ s ^ "\" | " ^ pmu ^ " -w")) end;
+ in #1 (bash_output ("echo \"" ^ s ^ "\" | " ^ pmu ^ " -w")) end;
in
fn cgoal =>
let
--- a/src/HOL/Modelcheck/mucke_oracle.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Modelcheck/mucke_oracle.ML Tue Feb 09 17:06:05 2010 +0100
@@ -498,7 +498,7 @@
else mucke_home ^ "/mucke";
val mucke_input_file = File.tmp_path (Path.basic "tmp.mu");
val _ = File.write mucke_input_file s;
- val (result, _) = system_out (mucke ^ " -nb -res " ^ File.shell_path mucke_input_file);
+ val (result, _) = bash_output (mucke ^ " -nb -res " ^ File.shell_path mucke_input_file);
in
if not (!trace_mc) then (File.rm mucke_input_file) else ();
result
--- a/src/HOL/Multivariate_Analysis/Derivative.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Multivariate_Analysis/Derivative.thy Tue Feb 09 17:06:05 2010 +0100
@@ -1183,7 +1183,7 @@
fix y assume as:"y \<in> s" "0 < dist y x" "dist y x < e / (B * C * D)"
have "norm (h (f' (y - x)) (g' (y - x))) \<le> norm (f' (y - x)) * norm (g' (y - x)) * B" using B by auto
also have "\<dots> \<le> (norm (y - x) * C) * (D * norm (y - x)) * B" apply(rule mult_right_mono)
- apply(rule pordered_semiring_class.mult_mono) using B C D by (auto simp add: field_simps intro!:mult_nonneg_nonneg)
+ apply(rule mult_mono) using B C D by (auto simp add: field_simps intro!:mult_nonneg_nonneg)
also have "\<dots> = (B * C * D * norm (y - x)) * norm (y - x)" by(auto simp add:field_simps)
also have "\<dots> < e * norm (y - x)" apply(rule mult_strict_right_mono)
using as(3)[unfolded vector_dist_norm] and as(2) unfolding pos_less_divide_eq[OF bcd] by (auto simp add:field_simps)
--- a/src/HOL/Multivariate_Analysis/Determinants.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Multivariate_Analysis/Determinants.thy Tue Feb 09 17:06:05 2010 +0100
@@ -44,7 +44,7 @@
else setprod f {m..n})"
by (auto simp add: atLeastAtMostSuc_conv)
-lemma setprod_le: assumes fS: "finite S" and fg: "\<forall>x\<in>S. f x \<ge> 0 \<and> f x \<le> (g x :: 'a::ordered_idom)"
+lemma setprod_le: assumes fS: "finite S" and fg: "\<forall>x\<in>S. f x \<ge> 0 \<and> f x \<le> (g x :: 'a::linordered_idom)"
shows "setprod f S \<le> setprod g S"
using fS fg
apply(induct S)
@@ -61,7 +61,7 @@
apply simp
done
-lemma setprod_le_1: assumes fS: "finite S" and f: "\<forall>x\<in>S. f x \<ge> 0 \<and> f x \<le> (1::'a::ordered_idom)"
+lemma setprod_le_1: assumes fS: "finite S" and f: "\<forall>x\<in>S. f x \<ge> 0 \<and> f x \<le> (1::'a::linordered_idom)"
shows "setprod f S \<le> 1"
using setprod_le[OF fS f] unfolding setprod_1 .
@@ -277,7 +277,7 @@
qed
lemma det_identical_rows:
- fixes A :: "'a::ordered_idom^'n^'n"
+ fixes A :: "'a::linordered_idom^'n^'n"
assumes ij: "i \<noteq> j"
and r: "row i A = row j A"
shows "det A = 0"
@@ -295,7 +295,7 @@
qed
lemma det_identical_columns:
- fixes A :: "'a::ordered_idom^'n^'n"
+ fixes A :: "'a::linordered_idom^'n^'n"
assumes ij: "i \<noteq> j"
and r: "column i A = column j A"
shows "det A = 0"
@@ -407,7 +407,7 @@
unfolding vector_smult_lzero .
lemma det_row_operation:
- fixes A :: "'a::ordered_idom^'n^'n"
+ fixes A :: "'a::linordered_idom^'n^'n"
assumes ij: "i \<noteq> j"
shows "det (\<chi> k. if k = i then row i A + c *s row j A else row k A) = det A"
proof-
@@ -421,7 +421,7 @@
qed
lemma det_row_span:
- fixes A :: "'a:: ordered_idom^'n^'n"
+ fixes A :: "'a:: linordered_idom^'n^'n"
assumes x: "x \<in> span {row j A |j. j \<noteq> i}"
shows "det (\<chi> k. if k = i then row i A + x else row k A) = det A"
proof-
@@ -462,7 +462,7 @@
(* ------------------------------------------------------------------------- *)
lemma det_dependent_rows:
- fixes A:: "'a::ordered_idom^'n^'n"
+ fixes A:: "'a::linordered_idom^'n^'n"
assumes d: "dependent (rows A)"
shows "det A = 0"
proof-
@@ -488,7 +488,7 @@
ultimately show ?thesis by blast
qed
-lemma det_dependent_columns: assumes d: "dependent(columns (A::'a::ordered_idom^'n^'n))" shows "det A = 0"
+lemma det_dependent_columns: assumes d: "dependent(columns (A::'a::linordered_idom^'n^'n))" shows "det A = 0"
by (metis d det_dependent_rows rows_transp det_transp)
(* ------------------------------------------------------------------------- *)
@@ -608,7 +608,7 @@
qed
lemma det_mul:
- fixes A B :: "'a::ordered_idom^'n^'n"
+ fixes A B :: "'a::linordered_idom^'n^'n"
shows "det (A ** B) = det A * det B"
proof-
let ?U = "UNIV :: 'n set"
@@ -761,7 +761,7 @@
(* ------------------------------------------------------------------------- *)
lemma cramer_lemma_transp:
- fixes A:: "'a::ordered_idom^'n^'n" and x :: "'a ^'n"
+ fixes A:: "'a::linordered_idom^'n^'n" and x :: "'a ^'n"
shows "det ((\<chi> i. if i = k then setsum (\<lambda>i. x$i *s row i A) (UNIV::'n set)
else row i A)::'a^'n^'n) = x$k * det A"
(is "?lhs = ?rhs")
@@ -797,7 +797,7 @@
qed
lemma cramer_lemma:
- fixes A :: "'a::ordered_idom ^'n^'n"
+ fixes A :: "'a::linordered_idom ^'n^'n"
shows "det((\<chi> i j. if j = k then (A *v x)$i else A$i$j):: 'a^'n^'n) = x$k * det A"
proof-
let ?U = "UNIV :: 'n set"
@@ -893,7 +893,7 @@
qed
lemma det_orthogonal_matrix:
- fixes Q:: "'a::ordered_idom^'n^'n"
+ fixes Q:: "'a::linordered_idom^'n^'n"
assumes oQ: "orthogonal_matrix Q"
shows "det Q = 1 \<or> det Q = - 1"
proof-
@@ -1034,7 +1034,7 @@
definition "rotoinversion_matrix Q \<longleftrightarrow> orthogonal_matrix Q \<and> det Q = - 1"
lemma orthogonal_rotation_or_rotoinversion:
- fixes Q :: "'a::ordered_idom^'n^'n"
+ fixes Q :: "'a::linordered_idom^'n^'n"
shows " orthogonal_matrix Q \<longleftrightarrow> rotation_matrix Q \<or> rotoinversion_matrix Q"
by (metis rotoinversion_matrix_def rotation_matrix_def det_orthogonal_matrix)
(* ------------------------------------------------------------------------- *)
--- a/src/HOL/Multivariate_Analysis/Euclidean_Space.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Multivariate_Analysis/Euclidean_Space.thy Tue Feb 09 17:06:05 2010 +0100
@@ -459,7 +459,7 @@
done
lemma setsum_nonneg_eq_0_iff:
- fixes f :: "'a \<Rightarrow> 'b::pordered_ab_group_add"
+ fixes f :: "'a \<Rightarrow> 'b::ordered_ab_group_add"
shows "\<lbrakk>finite A; \<forall>x\<in>A. 0 \<le> f x\<rbrakk> \<Longrightarrow> setsum f A = 0 \<longleftrightarrow> (\<forall>x\<in>A. f x = 0)"
apply (induct set: finite, simp)
apply (simp add: add_nonneg_eq_0_iff setsum_nonneg)
@@ -836,10 +836,10 @@
lemma dot_rneg: "(x::'a::ring ^ 'n) \<bullet> (-y) = -(x \<bullet> y)" by vector
lemma dot_lzero[simp]: "0 \<bullet> x = (0::'a::{comm_monoid_add, mult_zero})" by vector
lemma dot_rzero[simp]: "x \<bullet> 0 = (0::'a::{comm_monoid_add, mult_zero})" by vector
-lemma dot_pos_le[simp]: "(0::'a\<Colon>ordered_ring_strict) <= x \<bullet> x"
+lemma dot_pos_le[simp]: "(0::'a\<Colon>linordered_ring_strict) <= x \<bullet> x"
by (simp add: dot_def setsum_nonneg)
-lemma setsum_squares_eq_0_iff: assumes fS: "finite F" and fp: "\<forall>x \<in> F. f x \<ge> (0 ::'a::pordered_ab_group_add)" shows "setsum f F = 0 \<longleftrightarrow> (ALL x:F. f x = 0)"
+lemma setsum_squares_eq_0_iff: assumes fS: "finite F" and fp: "\<forall>x \<in> F. f x \<ge> (0 ::'a::ordered_ab_group_add)" shows "setsum f F = 0 \<longleftrightarrow> (ALL x:F. f x = 0)"
using fS fp setsum_nonneg[OF fp]
proof (induct set: finite)
case empty thus ?case by simp
@@ -852,10 +852,10 @@
show ?case by (simp add: h)
qed
-lemma dot_eq_0: "x \<bullet> x = 0 \<longleftrightarrow> (x::'a::{ordered_ring_strict,ring_no_zero_divisors} ^ 'n) = 0"
+lemma dot_eq_0: "x \<bullet> x = 0 \<longleftrightarrow> (x::'a::{linordered_ring_strict,ring_no_zero_divisors} ^ 'n) = 0"
by (simp add: dot_def setsum_squares_eq_0_iff Cart_eq)
-lemma dot_pos_lt[simp]: "(0 < x \<bullet> x) \<longleftrightarrow> (x::'a::{ordered_ring_strict,ring_no_zero_divisors} ^ 'n) \<noteq> 0" using dot_eq_0[of x] dot_pos_le[of x]
+lemma dot_pos_lt[simp]: "(0 < x \<bullet> x) \<longleftrightarrow> (x::'a::{linordered_ring_strict,ring_no_zero_divisors} ^ 'n) \<noteq> 0" using dot_eq_0[of x] dot_pos_le[of x]
by (auto simp add: le_less)
subsection{* The collapse of the general concepts to dimension one. *}
@@ -1146,7 +1146,7 @@
shows "norm x1 \<le> b1 \<Longrightarrow> norm x2 \<le> b2 \<Longrightarrow> norm (x1 + x2) \<le> b1 + b2"
by (rule order_trans [OF norm_triangle_ineq add_mono])
-lemma ge_iff_diff_ge_0: "(a::'a::ordered_ring) \<ge> b == a - b \<ge> 0"
+lemma ge_iff_diff_ge_0: "(a::'a::linordered_ring) \<ge> b == a - b \<ge> 0"
by (simp add: ring_simps)
lemma pth_1:
@@ -3827,7 +3827,7 @@
(* FIXME : Move to some general theory ?*)
definition "pairwise R S \<longleftrightarrow> (\<forall>x \<in> S. \<forall>y\<in> S. x\<noteq>y \<longrightarrow> R x y)"
-lemma vector_sub_project_orthogonal: "(b::'a::ordered_field^'n) \<bullet> (x - ((b \<bullet> x) / (b\<bullet>b)) *s b) = 0"
+lemma vector_sub_project_orthogonal: "(b::'a::linordered_field^'n) \<bullet> (x - ((b \<bullet> x) / (b\<bullet>b)) *s b) = 0"
apply (cases "b = 0", simp)
apply (simp add: dot_rsub dot_rmult)
unfolding times_divide_eq_right[symmetric]
--- a/src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Multivariate_Analysis/Topology_Euclidean_Space.thy Tue Feb 09 17:06:05 2010 +0100
@@ -5696,27 +5696,27 @@
by (simp add: vector_smult_lneg[symmetric] vector_smult_assoc vector_sneg_minus1[symmetric])
lemma real_affinity_le:
- "0 < (m::'a::ordered_field) ==> (m * x + c \<le> y \<longleftrightarrow> x \<le> inverse(m) * y + -(c / m))"
+ "0 < (m::'a::linordered_field) ==> (m * x + c \<le> y \<longleftrightarrow> x \<le> inverse(m) * y + -(c / m))"
by (simp add: field_simps inverse_eq_divide)
lemma real_le_affinity:
- "0 < (m::'a::ordered_field) ==> (y \<le> m * x + c \<longleftrightarrow> inverse(m) * y + -(c / m) \<le> x)"
+ "0 < (m::'a::linordered_field) ==> (y \<le> m * x + c \<longleftrightarrow> inverse(m) * y + -(c / m) \<le> x)"
by (simp add: field_simps inverse_eq_divide)
lemma real_affinity_lt:
- "0 < (m::'a::ordered_field) ==> (m * x + c < y \<longleftrightarrow> x < inverse(m) * y + -(c / m))"
+ "0 < (m::'a::linordered_field) ==> (m * x + c < y \<longleftrightarrow> x < inverse(m) * y + -(c / m))"
by (simp add: field_simps inverse_eq_divide)
lemma real_lt_affinity:
- "0 < (m::'a::ordered_field) ==> (y < m * x + c \<longleftrightarrow> inverse(m) * y + -(c / m) < x)"
+ "0 < (m::'a::linordered_field) ==> (y < m * x + c \<longleftrightarrow> inverse(m) * y + -(c / m) < x)"
by (simp add: field_simps inverse_eq_divide)
lemma real_affinity_eq:
- "(m::'a::ordered_field) \<noteq> 0 ==> (m * x + c = y \<longleftrightarrow> x = inverse(m) * y + -(c / m))"
+ "(m::'a::linordered_field) \<noteq> 0 ==> (m * x + c = y \<longleftrightarrow> x = inverse(m) * y + -(c / m))"
by (simp add: field_simps inverse_eq_divide)
lemma real_eq_affinity:
- "(m::'a::ordered_field) \<noteq> 0 ==> (y = m * x + c \<longleftrightarrow> inverse(m) * y + -(c / m) = x)"
+ "(m::'a::linordered_field) \<noteq> 0 ==> (y = m * x + c \<longleftrightarrow> inverse(m) * y + -(c / m) = x)"
by (simp add: field_simps inverse_eq_divide)
lemma vector_affinity_eq:
--- a/src/HOL/NSA/HyperDef.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/NSA/HyperDef.thy Tue Feb 09 17:06:05 2010 +0100
@@ -394,7 +394,7 @@
by (simp only: hypreal_three_squares_add_zero_iff hrealpow_two)
(*FIXME: This and RealPow.abs_realpow_two should be replaced by an abstract
- result proved in Ring_and_Field*)
+ result proved in Rings or Fields*)
lemma hrabs_hrealpow_two [simp]:
"abs(x ^ Suc (Suc 0)) = (x::hypreal) ^ Suc (Suc 0)"
by (simp add: abs_mult)
@@ -459,7 +459,7 @@
by transfer (rule power_inverse)
lemma hyperpow_hrabs:
- "\<And>r n. abs (r::'a::{ordered_idom} star) pow n = abs (r pow n)"
+ "\<And>r n. abs (r::'a::{linordered_idom} star) pow n = abs (r pow n)"
by transfer (rule power_abs [symmetric])
lemma hyperpow_add:
@@ -475,15 +475,15 @@
by transfer simp
lemma hyperpow_gt_zero:
- "\<And>r n. (0::'a::{ordered_semidom} star) < r \<Longrightarrow> 0 < r pow n"
+ "\<And>r n. (0::'a::{linordered_semidom} star) < r \<Longrightarrow> 0 < r pow n"
by transfer (rule zero_less_power)
lemma hyperpow_ge_zero:
- "\<And>r n. (0::'a::{ordered_semidom} star) \<le> r \<Longrightarrow> 0 \<le> r pow n"
+ "\<And>r n. (0::'a::{linordered_semidom} star) \<le> r \<Longrightarrow> 0 \<le> r pow n"
by transfer (rule zero_le_power)
lemma hyperpow_le:
- "\<And>x y n. \<lbrakk>(0::'a::{ordered_semidom} star) < x; x \<le> y\<rbrakk>
+ "\<And>x y n. \<lbrakk>(0::'a::{linordered_semidom} star) < x; x \<le> y\<rbrakk>
\<Longrightarrow> x pow n \<le> y pow n"
by transfer (rule power_mono [OF _ order_less_imp_le])
@@ -492,7 +492,7 @@
by transfer (rule power_one)
lemma hrabs_hyperpow_minus_one [simp]:
- "\<And>n. abs(-1 pow n) = (1::'a::{number_ring,ordered_idom} star)"
+ "\<And>n. abs(-1 pow n) = (1::'a::{number_ring,linordered_idom} star)"
by transfer (rule abs_power_minus_one)
lemma hyperpow_mult:
@@ -501,29 +501,29 @@
by transfer (rule power_mult_distrib)
lemma hyperpow_two_le [simp]:
- "(0::'a::{monoid_mult,ordered_ring_strict} star) \<le> r pow (1 + 1)"
+ "(0::'a::{monoid_mult,linordered_ring_strict} star) \<le> r pow (1 + 1)"
by (auto simp add: hyperpow_two zero_le_mult_iff)
lemma hrabs_hyperpow_two [simp]:
"abs(x pow (1 + 1)) =
- (x::'a::{monoid_mult,ordered_ring_strict} star) pow (1 + 1)"
+ (x::'a::{monoid_mult,linordered_ring_strict} star) pow (1 + 1)"
by (simp only: abs_of_nonneg hyperpow_two_le)
lemma hyperpow_two_hrabs [simp]:
- "abs(x::'a::{ordered_idom} star) pow (1 + 1) = x pow (1 + 1)"
+ "abs(x::'a::{linordered_idom} star) pow (1 + 1) = x pow (1 + 1)"
by (simp add: hyperpow_hrabs)
text{*The precondition could be weakened to @{term "0\<le>x"}*}
lemma hypreal_mult_less_mono:
"[| u<v; x<y; (0::hypreal) < v; 0 < x |] ==> u*x < v* y"
- by (simp add: Ring_and_Field.mult_strict_mono order_less_imp_le)
+ by (simp add: mult_strict_mono order_less_imp_le)
lemma hyperpow_two_gt_one:
- "\<And>r::'a::{ordered_semidom} star. 1 < r \<Longrightarrow> 1 < r pow (1 + 1)"
+ "\<And>r::'a::{linordered_semidom} star. 1 < r \<Longrightarrow> 1 < r pow (1 + 1)"
by transfer (simp add: power_gt1 del: power_Suc)
lemma hyperpow_two_ge_one:
- "\<And>r::'a::{ordered_semidom} star. 1 \<le> r \<Longrightarrow> 1 \<le> r pow (1 + 1)"
+ "\<And>r::'a::{linordered_semidom} star. 1 \<le> r \<Longrightarrow> 1 \<le> r pow (1 + 1)"
by transfer (simp add: one_le_power del: power_Suc)
lemma two_hyperpow_ge_one [simp]: "(1::hypreal) \<le> 2 pow n"
--- a/src/HOL/NSA/HyperNat.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/NSA/HyperNat.thy Tue Feb 09 17:06:05 2010 +0100
@@ -386,39 +386,39 @@
by transfer (rule of_nat_mult)
lemma of_hypnat_less_iff [simp]:
- "\<And>m n. (of_hypnat m < (of_hypnat n::'a::ordered_semidom star)) = (m < n)"
+ "\<And>m n. (of_hypnat m < (of_hypnat n::'a::linordered_semidom star)) = (m < n)"
by transfer (rule of_nat_less_iff)
lemma of_hypnat_0_less_iff [simp]:
- "\<And>n. (0 < (of_hypnat n::'a::ordered_semidom star)) = (0 < n)"
+ "\<And>n. (0 < (of_hypnat n::'a::linordered_semidom star)) = (0 < n)"
by transfer (rule of_nat_0_less_iff)
lemma of_hypnat_less_0_iff [simp]:
- "\<And>m. \<not> (of_hypnat m::'a::ordered_semidom star) < 0"
+ "\<And>m. \<not> (of_hypnat m::'a::linordered_semidom star) < 0"
by transfer (rule of_nat_less_0_iff)
lemma of_hypnat_le_iff [simp]:
- "\<And>m n. (of_hypnat m \<le> (of_hypnat n::'a::ordered_semidom star)) = (m \<le> n)"
+ "\<And>m n. (of_hypnat m \<le> (of_hypnat n::'a::linordered_semidom star)) = (m \<le> n)"
by transfer (rule of_nat_le_iff)
lemma of_hypnat_0_le_iff [simp]:
- "\<And>n. 0 \<le> (of_hypnat n::'a::ordered_semidom star)"
+ "\<And>n. 0 \<le> (of_hypnat n::'a::linordered_semidom star)"
by transfer (rule of_nat_0_le_iff)
lemma of_hypnat_le_0_iff [simp]:
- "\<And>m. ((of_hypnat m::'a::ordered_semidom star) \<le> 0) = (m = 0)"
+ "\<And>m. ((of_hypnat m::'a::linordered_semidom star) \<le> 0) = (m = 0)"
by transfer (rule of_nat_le_0_iff)
lemma of_hypnat_eq_iff [simp]:
- "\<And>m n. (of_hypnat m = (of_hypnat n::'a::ordered_semidom star)) = (m = n)"
+ "\<And>m n. (of_hypnat m = (of_hypnat n::'a::linordered_semidom star)) = (m = n)"
by transfer (rule of_nat_eq_iff)
lemma of_hypnat_eq_0_iff [simp]:
- "\<And>m. ((of_hypnat m::'a::ordered_semidom star) = 0) = (m = 0)"
+ "\<And>m. ((of_hypnat m::'a::linordered_semidom star) = 0) = (m = 0)"
by transfer (rule of_nat_eq_0_iff)
lemma HNatInfinite_of_hypnat_gt_zero:
- "N \<in> HNatInfinite \<Longrightarrow> (0::'a::ordered_semidom star) < of_hypnat N"
+ "N \<in> HNatInfinite \<Longrightarrow> (0::'a::linordered_semidom star) < of_hypnat N"
by (rule ccontr, simp add: linorder_not_less)
end
--- a/src/HOL/NSA/NSComplex.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/NSA/NSComplex.thy Tue Feb 09 17:06:05 2010 +0100
@@ -1,5 +1,4 @@
(* Title: NSComplex.thy
- ID: $Id$
Author: Jacques D. Fleuriot
Copyright: 2001 University of Edinburgh
Conversion to Isar and new proofs by Lawrence C Paulson, 2003/4
@@ -161,7 +160,7 @@
lemma hcomplex_add_minus_eq_minus:
"x + y = (0::hcomplex) ==> x = -y"
-apply (drule OrderedGroup.minus_unique)
+apply (drule minus_unique)
apply (simp add: minus_equation_iff [of x y])
done
@@ -196,7 +195,7 @@
lemma hcomplex_diff_eq_eq [simp]: "((x::hcomplex) - y = z) = (x = z + y)"
(* TODO: delete *)
-by (rule OrderedGroup.diff_eq_eq)
+by (rule diff_eq_eq)
subsection{*Embedding Properties for @{term hcomplex_of_hypreal} Map*}
--- a/src/HOL/NSA/StarDef.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/NSA/StarDef.thy Tue Feb 09 17:06:05 2010 +0100
@@ -530,7 +530,7 @@
end
-instance star :: (Ring_and_Field.dvd) Ring_and_Field.dvd ..
+instance star :: (Rings.dvd) Rings.dvd ..
instantiation star :: (Divides.div) Divides.div
begin
@@ -719,7 +719,7 @@
apply (transfer, erule (1) order_antisym)
done
-instantiation star :: (lower_semilattice) lower_semilattice
+instantiation star :: (semilattice_inf) semilattice_inf
begin
definition
@@ -730,7 +730,7 @@
end
-instantiation star :: (upper_semilattice) upper_semilattice
+instantiation star :: (semilattice_sup) semilattice_sup
begin
definition
@@ -833,28 +833,23 @@
apply (transfer, rule diff_minus)
done
-instance star :: (pordered_ab_semigroup_add) pordered_ab_semigroup_add
+instance star :: (ordered_ab_semigroup_add) ordered_ab_semigroup_add
by (intro_classes, transfer, rule add_left_mono)
-instance star :: (pordered_cancel_ab_semigroup_add) pordered_cancel_ab_semigroup_add ..
+instance star :: (ordered_cancel_ab_semigroup_add) ordered_cancel_ab_semigroup_add ..
-instance star :: (pordered_ab_semigroup_add_imp_le) pordered_ab_semigroup_add_imp_le
+instance star :: (ordered_ab_semigroup_add_imp_le) ordered_ab_semigroup_add_imp_le
by (intro_classes, transfer, rule add_le_imp_le_left)
-instance star :: (pordered_comm_monoid_add) pordered_comm_monoid_add ..
-instance star :: (pordered_ab_group_add) pordered_ab_group_add ..
+instance star :: (ordered_comm_monoid_add) ordered_comm_monoid_add ..
+instance star :: (ordered_ab_group_add) ordered_ab_group_add ..
-instance star :: (pordered_ab_group_add_abs) pordered_ab_group_add_abs
+instance star :: (ordered_ab_group_add_abs) ordered_ab_group_add_abs
by intro_classes (transfer,
simp add: abs_ge_self abs_leI abs_triangle_ineq)+
-instance star :: (ordered_cancel_ab_semigroup_add) ordered_cancel_ab_semigroup_add ..
-instance star :: (lordered_ab_group_add_meet) lordered_ab_group_add_meet ..
-instance star :: (lordered_ab_group_add_meet) lordered_ab_group_add_meet ..
-instance star :: (lordered_ab_group_add) lordered_ab_group_add ..
+instance star :: (linordered_cancel_ab_semigroup_add) linordered_cancel_ab_semigroup_add ..
-instance star :: (lordered_ab_group_add_abs) lordered_ab_group_add_abs
-by (intro_classes, transfer, rule abs_lattice)
subsection {* Ring and field classes *}
@@ -909,32 +904,31 @@
instance star :: (division_by_zero) division_by_zero
by (intro_classes, transfer, rule inverse_zero)
-instance star :: (pordered_semiring) pordered_semiring
+instance star :: (ordered_semiring) ordered_semiring
apply (intro_classes)
apply (transfer, erule (1) mult_left_mono)
apply (transfer, erule (1) mult_right_mono)
done
-instance star :: (pordered_cancel_semiring) pordered_cancel_semiring ..
+instance star :: (ordered_cancel_semiring) ordered_cancel_semiring ..
-instance star :: (ordered_semiring_strict) ordered_semiring_strict
+instance star :: (linordered_semiring_strict) linordered_semiring_strict
apply (intro_classes)
apply (transfer, erule (1) mult_strict_left_mono)
apply (transfer, erule (1) mult_strict_right_mono)
done
-instance star :: (pordered_comm_semiring) pordered_comm_semiring
+instance star :: (ordered_comm_semiring) ordered_comm_semiring
by (intro_classes, transfer, rule mult_mono1_class.mult_mono1)
-instance star :: (pordered_cancel_comm_semiring) pordered_cancel_comm_semiring ..
-
-instance star :: (ordered_comm_semiring_strict) ordered_comm_semiring_strict
-by (intro_classes, transfer, rule ordered_comm_semiring_strict_class.mult_strict_left_mono_comm)
+instance star :: (ordered_cancel_comm_semiring) ordered_cancel_comm_semiring ..
-instance star :: (pordered_ring) pordered_ring ..
-instance star :: (pordered_ring_abs) pordered_ring_abs
+instance star :: (linordered_comm_semiring_strict) linordered_comm_semiring_strict
+by (intro_classes, transfer, rule mult_strict_left_mono_comm)
+
+instance star :: (ordered_ring) ordered_ring ..
+instance star :: (ordered_ring_abs) ordered_ring_abs
by intro_classes (transfer, rule abs_eq_mult)
-instance star :: (lordered_ring) lordered_ring ..
instance star :: (abs_if) abs_if
by (intro_classes, transfer, rule abs_if)
@@ -942,14 +936,14 @@
instance star :: (sgn_if) sgn_if
by (intro_classes, transfer, rule sgn_if)
-instance star :: (ordered_ring_strict) ordered_ring_strict ..
-instance star :: (pordered_comm_ring) pordered_comm_ring ..
+instance star :: (linordered_ring_strict) linordered_ring_strict ..
+instance star :: (ordered_comm_ring) ordered_comm_ring ..
-instance star :: (ordered_semidom) ordered_semidom
+instance star :: (linordered_semidom) linordered_semidom
by (intro_classes, transfer, rule zero_less_one)
-instance star :: (ordered_idom) ordered_idom ..
-instance star :: (ordered_field) ordered_field ..
+instance star :: (linordered_idom) linordered_idom ..
+instance star :: (linordered_field) linordered_field ..
subsection {* Power *}
--- a/src/HOL/Nat.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Nat.thy Tue Feb 09 17:06:05 2010 +0100
@@ -8,7 +8,7 @@
header {* Natural numbers *}
theory Nat
-imports Inductive Product_Type Ring_and_Field
+imports Inductive Product_Type Fields
uses
"~~/src/Tools/rat.ML"
"~~/src/Provers/Arith/cancel_sums.ML"
@@ -176,6 +176,8 @@
end
+hide (open) fact add_0_right
+
instantiation nat :: comm_semiring_1_cancel
begin
@@ -730,7 +732,7 @@
apply (induct n)
apply (simp_all add: order_le_less)
apply (blast elim!: less_SucE
- intro!: add_0_right [symmetric] add_Suc_right [symmetric])
+ intro!: Nat.add_0_right [symmetric] add_Suc_right [symmetric])
done
text {* strict, in 1st argument; proof is by induction on @{text "k > 0"} *}
@@ -741,7 +743,7 @@
done
text{*The naturals form an ordered @{text comm_semiring_1_cancel}*}
-instance nat :: ordered_semidom
+instance nat :: linordered_semidom
proof
fix i j k :: nat
show "0 < (1::nat)" by simp
@@ -1289,7 +1291,7 @@
end
-context ordered_semidom
+context linordered_semidom
begin
lemma zero_le_imp_of_nat: "0 \<le> of_nat m"
@@ -1316,7 +1318,7 @@
lemma of_nat_le_iff [simp]: "of_nat m \<le> of_nat n \<longleftrightarrow> m \<le> n"
by (simp add: not_less [symmetric] linorder_not_less [symmetric])
-text{*Every @{text ordered_semidom} has characteristic zero.*}
+text{*Every @{text linordered_semidom} has characteristic zero.*}
subclass semiring_char_0
proof qed (simp add: eq_iff order_eq_iff)
@@ -1345,7 +1347,7 @@
end
-context ordered_idom
+context linordered_idom
begin
lemma abs_of_nat [simp]: "\<bar>of_nat n\<bar> = of_nat n"
--- a/src/HOL/Nat_Numeral.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Nat_Numeral.thy Tue Feb 09 17:06:05 2010 +0100
@@ -64,7 +64,7 @@
lemma power_even_eq:
"a ^ (2*n) = (a ^ n) ^ 2"
- by (subst OrderedGroup.mult_commute) (simp add: power_mult)
+ by (subst mult_commute) (simp add: power_mult)
lemma power_odd_eq:
"a ^ Suc (2*n) = a * (a ^ n) ^ 2"
@@ -113,7 +113,7 @@
end
-context ordered_ring_strict
+context linordered_ring_strict
begin
lemma sum_squares_ge_zero:
@@ -145,7 +145,7 @@
end
-context ordered_semidom
+context linordered_semidom
begin
lemma power2_le_imp_le:
@@ -162,7 +162,7 @@
end
-context ordered_idom
+context linordered_idom
begin
lemma zero_eq_power2 [simp]:
--- a/src/HOL/Nitpick.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Nitpick.thy Tue Feb 09 17:06:05 2010 +0100
@@ -225,13 +225,13 @@
"Nitpick_Nut" so that they handle the unexpanded overloaded constants
directly, but this is slightly more tricky to implement. *)
lemmas [nitpick_def] = div_int_inst.div_int div_int_inst.mod_int
- div_nat_inst.div_nat div_nat_inst.mod_nat lower_semilattice_fun_inst.inf_fun
+ div_nat_inst.div_nat div_nat_inst.mod_nat semilattice_inf_fun_inst.inf_fun
minus_fun_inst.minus_fun minus_int_inst.minus_int minus_nat_inst.minus_nat
one_int_inst.one_int one_nat_inst.one_nat ord_fun_inst.less_eq_fun
ord_int_inst.less_eq_int ord_int_inst.less_int ord_nat_inst.less_eq_nat
ord_nat_inst.less_nat plus_int_inst.plus_int plus_nat_inst.plus_nat
times_int_inst.times_int times_nat_inst.times_nat uminus_int_inst.uminus_int
- upper_semilattice_fun_inst.sup_fun zero_int_inst.zero_int
+ semilattice_sup_fun_inst.sup_fun zero_int_inst.zero_int
zero_nat_inst.zero_nat
use "Tools/Nitpick/kodkod.ML"
--- a/src/HOL/Nominal/nominal_datatype.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Nominal/nominal_datatype.ML Tue Feb 09 17:06:05 2010 +0100
@@ -152,7 +152,7 @@
fun projections rule =
Project_Rule.projections (ProofContext.init (Thm.theory_of_thm rule)) rule
- |> map (Drule.standard #> Rule_Cases.save rule);
+ |> map (Drule.export_without_context #> Rule_Cases.save rule);
val supp_prod = thm "supp_prod";
val fresh_prod = thm "fresh_prod";
@@ -312,7 +312,7 @@
val unfolded_perm_eq_thms =
if length descr = length new_type_names then []
- else map Drule.standard (List.drop (split_conj_thm
+ else map Drule.export_without_context (List.drop (split_conj_thm
(Goal.prove_global thy2 [] []
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj
(map (fn (c as (s, T), x) =>
@@ -332,7 +332,7 @@
val perm_empty_thms = maps (fn a =>
let val permT = mk_permT (Type (a, []))
- in map Drule.standard (List.take (split_conj_thm
+ in map Drule.export_without_context (List.take (split_conj_thm
(Goal.prove_global thy2 [] []
(augment_sort thy2 [pt_class_of thy2 a]
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj
@@ -364,7 +364,7 @@
val pt_inst = pt_inst_of thy2 a;
val pt2' = pt_inst RS pt2;
val pt2_ax = PureThy.get_thm thy2 (Long_Name.map_base_name (fn s => "pt_" ^ s ^ "2") a);
- in List.take (map Drule.standard (split_conj_thm
+ in List.take (map Drule.export_without_context (split_conj_thm
(Goal.prove_global thy2 [] []
(augment_sort thy2 [pt_class_of thy2 a]
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj
@@ -399,7 +399,7 @@
val pt3' = pt_inst RS pt3;
val pt3_rev' = at_inst RS (pt_inst RS pt3_rev);
val pt3_ax = PureThy.get_thm thy2 (Long_Name.map_base_name (fn s => "pt_" ^ s ^ "3") a);
- in List.take (map Drule.standard (split_conj_thm
+ in List.take (map Drule.export_without_context (split_conj_thm
(Goal.prove_global thy2 [] []
(augment_sort thy2 [pt_class_of thy2 a] (Logic.mk_implies
(HOLogic.mk_Trueprop (Const ("Nominal.prm_eq",
@@ -586,7 +586,7 @@
(fn (x, (_, ("Nominal.noption", _, _))) => NONE | (x, _) => SOME x)
(perm_indnames ~~ descr);
- fun mk_perm_closed name = map (fn th => Drule.standard (th RS mp))
+ fun mk_perm_closed name = map (fn th => Drule.export_without_context (th RS mp))
(List.take (split_conj_thm (Goal.prove_global thy4 [] []
(augment_sort thy4
(pt_class_of thy4 name :: map (cp_class_of thy4 name) (remove (op =) name dt_atoms))
@@ -812,7 +812,8 @@
val rep_const = cterm_of thy
(Const (Sign.intern_const thy ("Rep_" ^ tname), T --> T'));
val dist =
- Drule.standard (cterm_instantiate [(cterm_of thy distinct_f, rep_const)] distinct_lemma);
+ Drule.export_without_context
+ (cterm_instantiate [(cterm_of thy distinct_f, rep_const)] distinct_lemma);
val (thy', defs', eqns') = fold (make_constr_def tname T T')
(constrs ~~ constrs' ~~ constr_syntax) (Sign.add_path tname thy, defs, [])
in
@@ -877,8 +878,9 @@
let
val dist_thm = Goal.prove_global thy8 [] [] t (fn _ =>
simp_tac (global_simpset_of thy8 addsimps (dist_lemma :: rep_thms)) 1)
- in dist_thm :: Drule.standard (dist_thm RS not_sym) ::
- prove_distinct_thms p (k, ts)
+ in
+ dist_thm :: Drule.export_without_context (dist_thm RS not_sym) ::
+ prove_distinct_thms p (k, ts)
end;
val distinct_thms = map2 prove_distinct_thms
@@ -1092,7 +1094,7 @@
val finite_supp_thms = map (fn atom =>
let val atomT = Type (atom, [])
- in map Drule.standard (List.take
+ in map Drule.export_without_context (List.take
(split_conj_thm (Goal.prove_global thy8 [] []
(augment_sort thy8 (fs_class_of thy8 atom :: pt_cp_sort)
(HOLogic.mk_Trueprop
@@ -1540,7 +1542,7 @@
in
(R $ x $ y, R' $ mk_perm [] pi x $ mk_perm [] pi y)
end) (recTs ~~ rec_result_Ts ~~ rec_sets ~~ rec_sets_pi ~~ (1 upto length recTs));
- val ths = map (fn th => Drule.standard (th RS mp)) (split_conj_thm
+ val ths = map (fn th => Drule.export_without_context (th RS mp)) (split_conj_thm
(Goal.prove_global thy11 [] []
(augment_sort thy1 pt_cp_sort
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj (map HOLogic.mk_imp ps))))
@@ -1572,7 +1574,7 @@
(finite $ (Const ("Nominal.supp", T --> aset) $ f)))
(rec_fns ~~ rec_fn_Ts)
in
- map (fn th => Drule.standard (th RS mp)) (split_conj_thm
+ map (fn th => Drule.export_without_context (th RS mp)) (split_conj_thm
(Goal.prove_global thy11 []
(map (augment_sort thy11 fs_cp_sort) fins)
(augment_sort thy11 fs_cp_sort
@@ -1615,7 +1617,7 @@
val y = Free ("y", U);
val y' = Free ("y'", U)
in
- Drule.standard (Goal.prove (ProofContext.init thy11) []
+ Drule.export_without_context (Goal.prove (ProofContext.init thy11) []
(map (augment_sort thy11 fs_cp_sort)
(finite_prems @
[HOLogic.mk_Trueprop (R $ x $ y),
@@ -2060,7 +2062,7 @@
((Binding.name "rec_equiv'", flat rec_equiv_thms'), []),
((Binding.name "rec_fin_supp", flat rec_fin_supp_thms), []),
((Binding.name "rec_fresh", flat rec_fresh_thms), []),
- ((Binding.name "rec_unique", map Drule.standard rec_unique_thms), []),
+ ((Binding.name "rec_unique", map Drule.export_without_context rec_unique_thms), []),
((Binding.name "recs", rec_thms), [])] ||>
Sign.parent_path ||>
map_nominal_datatypes (fold Symtab.update dt_infos);
--- a/src/HOL/Number_Theory/UniqueFactorization.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Number_Theory/UniqueFactorization.thy Tue Feb 09 17:06:05 2010 +0100
@@ -76,7 +76,7 @@
("(3PROD _:#_. _)" [0, 51, 10] 10)
translations
- "PROD i :# A. b" == "msetprod (%i. b) A"
+ "PROD i :# A. b" == "CONST msetprod (%i. b) A"
lemma msetprod_Un: "msetprod f (A+B) = msetprod f A * msetprod f B"
apply (simp add: msetprod_def power_add)
--- a/src/HOL/Old_Number_Theory/WilsonBij.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Old_Number_Theory/WilsonBij.thy Tue Feb 09 17:06:05 2010 +0100
@@ -74,9 +74,9 @@
lemma aux: "[a * (p - 1) = 1] (mod p) = [a = p - 1] (mod p)"
-- {* same as @{text WilsonRuss} *}
apply (unfold zcong_def)
- apply (simp add: OrderedGroup.diff_diff_eq diff_diff_eq2 zdiff_zmult_distrib2)
+ apply (simp add: diff_diff_eq diff_diff_eq2 zdiff_zmult_distrib2)
apply (rule_tac s = "p dvd -((a + 1) + (p * -a))" in trans)
- apply (simp add: mult_commute)
+ apply (simp add: algebra_simps)
apply (subst dvd_minus_iff)
apply (subst zdvd_reduce)
apply (rule_tac s = "p dvd (a + 1) + (p * -1)" in trans)
--- a/src/HOL/Old_Number_Theory/WilsonRuss.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Old_Number_Theory/WilsonRuss.thy Tue Feb 09 17:06:05 2010 +0100
@@ -82,9 +82,9 @@
lemma inv_not_p_minus_1_aux:
"[a * (p - 1) = 1] (mod p) = [a = p - 1] (mod p)"
apply (unfold zcong_def)
- apply (simp add: OrderedGroup.diff_diff_eq diff_diff_eq2 zdiff_zmult_distrib2)
+ apply (simp add: diff_diff_eq diff_diff_eq2 zdiff_zmult_distrib2)
apply (rule_tac s = "p dvd -((a + 1) + (p * -a))" in trans)
- apply (simp add: mult_commute)
+ apply (simp add: algebra_simps)
apply (subst dvd_minus_iff)
apply (subst zdvd_reduce)
apply (rule_tac s = "p dvd (a + 1) + (p * -1)" in trans)
--- a/src/HOL/OrderedGroup.thy Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1446 +0,0 @@
-(* Title: HOL/OrderedGroup.thy
- Author: Gertrud Bauer, Steven Obua, Lawrence C Paulson, Markus Wenzel, Jeremy Avigad
-*)
-
-header {* Ordered Groups *}
-
-theory OrderedGroup
-imports Lattices
-uses "~~/src/Provers/Arith/abel_cancel.ML"
-begin
-
-text {*
- The theory of partially ordered groups is taken from the books:
- \begin{itemize}
- \item \emph{Lattice Theory} by Garret Birkhoff, American Mathematical Society 1979
- \item \emph{Partially Ordered Algebraic Systems}, Pergamon Press 1963
- \end{itemize}
- Most of the used notions can also be looked up in
- \begin{itemize}
- \item \url{http://www.mathworld.com} by Eric Weisstein et. al.
- \item \emph{Algebra I} by van der Waerden, Springer.
- \end{itemize}
-*}
-
-ML {*
-structure Algebra_Simps = Named_Thms(
- val name = "algebra_simps"
- val description = "algebra simplification rules"
-)
-*}
-
-setup Algebra_Simps.setup
-
-text{* The rewrites accumulated in @{text algebra_simps} deal with the
-classical algebraic structures of groups, rings and family. They simplify
-terms by multiplying everything out (in case of a ring) and bringing sums and
-products into a canonical form (by ordered rewriting). As a result it decides
-group and ring equalities but also helps with inequalities.
-
-Of course it also works for fields, but it knows nothing about multiplicative
-inverses or division. This is catered for by @{text field_simps}. *}
-
-subsection {* Semigroups and Monoids *}
-
-class semigroup_add = plus +
- assumes add_assoc [algebra_simps]: "(a + b) + c = a + (b + c)"
-
-sublocale semigroup_add < plus!: semigroup plus proof
-qed (fact add_assoc)
-
-class ab_semigroup_add = semigroup_add +
- assumes add_commute [algebra_simps]: "a + b = b + a"
-
-sublocale ab_semigroup_add < plus!: abel_semigroup plus proof
-qed (fact add_commute)
-
-context ab_semigroup_add
-begin
-
-lemmas add_left_commute [algebra_simps] = plus.left_commute
-
-theorems add_ac = add_assoc add_commute add_left_commute
-
-end
-
-theorems add_ac = add_assoc add_commute add_left_commute
-
-class semigroup_mult = times +
- assumes mult_assoc [algebra_simps]: "(a * b) * c = a * (b * c)"
-
-sublocale semigroup_mult < times!: semigroup times proof
-qed (fact mult_assoc)
-
-class ab_semigroup_mult = semigroup_mult +
- assumes mult_commute [algebra_simps]: "a * b = b * a"
-
-sublocale ab_semigroup_mult < times!: abel_semigroup times proof
-qed (fact mult_commute)
-
-context ab_semigroup_mult
-begin
-
-lemmas mult_left_commute [algebra_simps] = times.left_commute
-
-theorems mult_ac = mult_assoc mult_commute mult_left_commute
-
-end
-
-theorems mult_ac = mult_assoc mult_commute mult_left_commute
-
-class ab_semigroup_idem_mult = ab_semigroup_mult +
- assumes mult_idem: "x * x = x"
-
-sublocale ab_semigroup_idem_mult < times!: semilattice times proof
-qed (fact mult_idem)
-
-context ab_semigroup_idem_mult
-begin
-
-lemmas mult_left_idem = times.left_idem
-
-end
-
-class monoid_add = zero + semigroup_add +
- assumes add_0_left [simp]: "0 + a = a"
- and add_0_right [simp]: "a + 0 = a"
-
-lemma zero_reorient: "0 = x \<longleftrightarrow> x = 0"
-by (rule eq_commute)
-
-class comm_monoid_add = zero + ab_semigroup_add +
- assumes add_0: "0 + a = a"
-begin
-
-subclass monoid_add
- proof qed (insert add_0, simp_all add: add_commute)
-
-end
-
-class monoid_mult = one + semigroup_mult +
- assumes mult_1_left [simp]: "1 * a = a"
- assumes mult_1_right [simp]: "a * 1 = a"
-
-lemma one_reorient: "1 = x \<longleftrightarrow> x = 1"
-by (rule eq_commute)
-
-class comm_monoid_mult = one + ab_semigroup_mult +
- assumes mult_1: "1 * a = a"
-begin
-
-subclass monoid_mult
- proof qed (insert mult_1, simp_all add: mult_commute)
-
-end
-
-class cancel_semigroup_add = semigroup_add +
- assumes add_left_imp_eq: "a + b = a + c \<Longrightarrow> b = c"
- assumes add_right_imp_eq: "b + a = c + a \<Longrightarrow> b = c"
-begin
-
-lemma add_left_cancel [simp]:
- "a + b = a + c \<longleftrightarrow> b = c"
-by (blast dest: add_left_imp_eq)
-
-lemma add_right_cancel [simp]:
- "b + a = c + a \<longleftrightarrow> b = c"
-by (blast dest: add_right_imp_eq)
-
-end
-
-class cancel_ab_semigroup_add = ab_semigroup_add +
- assumes add_imp_eq: "a + b = a + c \<Longrightarrow> b = c"
-begin
-
-subclass cancel_semigroup_add
-proof
- fix a b c :: 'a
- assume "a + b = a + c"
- then show "b = c" by (rule add_imp_eq)
-next
- fix a b c :: 'a
- assume "b + a = c + a"
- then have "a + b = a + c" by (simp only: add_commute)
- then show "b = c" by (rule add_imp_eq)
-qed
-
-end
-
-class cancel_comm_monoid_add = cancel_ab_semigroup_add + comm_monoid_add
-
-
-subsection {* Groups *}
-
-class group_add = minus + uminus + monoid_add +
- assumes left_minus [simp]: "- a + a = 0"
- assumes diff_minus: "a - b = a + (- b)"
-begin
-
-lemma minus_unique:
- assumes "a + b = 0" shows "- a = b"
-proof -
- have "- a = - a + (a + b)" using assms by simp
- also have "\<dots> = b" by (simp add: add_assoc [symmetric])
- finally show ?thesis .
-qed
-
-lemmas equals_zero_I = minus_unique (* legacy name *)
-
-lemma minus_zero [simp]: "- 0 = 0"
-proof -
- have "0 + 0 = 0" by (rule add_0_right)
- thus "- 0 = 0" by (rule minus_unique)
-qed
-
-lemma minus_minus [simp]: "- (- a) = a"
-proof -
- have "- a + a = 0" by (rule left_minus)
- thus "- (- a) = a" by (rule minus_unique)
-qed
-
-lemma right_minus [simp]: "a + - a = 0"
-proof -
- have "a + - a = - (- a) + - a" by simp
- also have "\<dots> = 0" by (rule left_minus)
- finally show ?thesis .
-qed
-
-lemma minus_add_cancel: "- a + (a + b) = b"
-by (simp add: add_assoc [symmetric])
-
-lemma add_minus_cancel: "a + (- a + b) = b"
-by (simp add: add_assoc [symmetric])
-
-lemma minus_add: "- (a + b) = - b + - a"
-proof -
- have "(a + b) + (- b + - a) = 0"
- by (simp add: add_assoc add_minus_cancel)
- thus "- (a + b) = - b + - a"
- by (rule minus_unique)
-qed
-
-lemma right_minus_eq: "a - b = 0 \<longleftrightarrow> a = b"
-proof
- assume "a - b = 0"
- have "a = (a - b) + b" by (simp add:diff_minus add_assoc)
- also have "\<dots> = b" using `a - b = 0` by simp
- finally show "a = b" .
-next
- assume "a = b" thus "a - b = 0" by (simp add: diff_minus)
-qed
-
-lemma diff_self [simp]: "a - a = 0"
-by (simp add: diff_minus)
-
-lemma diff_0 [simp]: "0 - a = - a"
-by (simp add: diff_minus)
-
-lemma diff_0_right [simp]: "a - 0 = a"
-by (simp add: diff_minus)
-
-lemma diff_minus_eq_add [simp]: "a - - b = a + b"
-by (simp add: diff_minus)
-
-lemma neg_equal_iff_equal [simp]:
- "- a = - b \<longleftrightarrow> a = b"
-proof
- assume "- a = - b"
- hence "- (- a) = - (- b)" by simp
- thus "a = b" by simp
-next
- assume "a = b"
- thus "- a = - b" by simp
-qed
-
-lemma neg_equal_0_iff_equal [simp]:
- "- a = 0 \<longleftrightarrow> a = 0"
-by (subst neg_equal_iff_equal [symmetric], simp)
-
-lemma neg_0_equal_iff_equal [simp]:
- "0 = - a \<longleftrightarrow> 0 = a"
-by (subst neg_equal_iff_equal [symmetric], simp)
-
-text{*The next two equations can make the simplifier loop!*}
-
-lemma equation_minus_iff:
- "a = - b \<longleftrightarrow> b = - a"
-proof -
- have "- (- a) = - b \<longleftrightarrow> - a = b" by (rule neg_equal_iff_equal)
- thus ?thesis by (simp add: eq_commute)
-qed
-
-lemma minus_equation_iff:
- "- a = b \<longleftrightarrow> - b = a"
-proof -
- have "- a = - (- b) \<longleftrightarrow> a = -b" by (rule neg_equal_iff_equal)
- thus ?thesis by (simp add: eq_commute)
-qed
-
-lemma diff_add_cancel: "a - b + b = a"
-by (simp add: diff_minus add_assoc)
-
-lemma add_diff_cancel: "a + b - b = a"
-by (simp add: diff_minus add_assoc)
-
-declare diff_minus[symmetric, algebra_simps]
-
-lemma eq_neg_iff_add_eq_0: "a = - b \<longleftrightarrow> a + b = 0"
-proof
- assume "a = - b" then show "a + b = 0" by simp
-next
- assume "a + b = 0"
- moreover have "a + (b + - b) = (a + b) + - b"
- by (simp only: add_assoc)
- ultimately show "a = - b" by simp
-qed
-
-end
-
-class ab_group_add = minus + uminus + comm_monoid_add +
- assumes ab_left_minus: "- a + a = 0"
- assumes ab_diff_minus: "a - b = a + (- b)"
-begin
-
-subclass group_add
- proof qed (simp_all add: ab_left_minus ab_diff_minus)
-
-subclass cancel_comm_monoid_add
-proof
- fix a b c :: 'a
- assume "a + b = a + c"
- then have "- a + a + b = - a + a + c"
- unfolding add_assoc by simp
- then show "b = c" by simp
-qed
-
-lemma uminus_add_conv_diff[algebra_simps]:
- "- a + b = b - a"
-by (simp add:diff_minus add_commute)
-
-lemma minus_add_distrib [simp]:
- "- (a + b) = - a + - b"
-by (rule minus_unique) (simp add: add_ac)
-
-lemma minus_diff_eq [simp]:
- "- (a - b) = b - a"
-by (simp add: diff_minus add_commute)
-
-lemma add_diff_eq[algebra_simps]: "a + (b - c) = (a + b) - c"
-by (simp add: diff_minus add_ac)
-
-lemma diff_add_eq[algebra_simps]: "(a - b) + c = (a + c) - b"
-by (simp add: diff_minus add_ac)
-
-lemma diff_eq_eq[algebra_simps]: "a - b = c \<longleftrightarrow> a = c + b"
-by (auto simp add: diff_minus add_assoc)
-
-lemma eq_diff_eq[algebra_simps]: "a = c - b \<longleftrightarrow> a + b = c"
-by (auto simp add: diff_minus add_assoc)
-
-lemma diff_diff_eq[algebra_simps]: "(a - b) - c = a - (b + c)"
-by (simp add: diff_minus add_ac)
-
-lemma diff_diff_eq2[algebra_simps]: "a - (b - c) = (a + c) - b"
-by (simp add: diff_minus add_ac)
-
-lemma eq_iff_diff_eq_0: "a = b \<longleftrightarrow> a - b = 0"
-by (simp add: algebra_simps)
-
-lemma diff_eq_0_iff_eq [simp, noatp]: "a - b = 0 \<longleftrightarrow> a = b"
-by (simp add: algebra_simps)
-
-end
-
-subsection {* (Partially) Ordered Groups *}
-
-class pordered_ab_semigroup_add = order + ab_semigroup_add +
- assumes add_left_mono: "a \<le> b \<Longrightarrow> c + a \<le> c + b"
-begin
-
-lemma add_right_mono:
- "a \<le> b \<Longrightarrow> a + c \<le> b + c"
-by (simp add: add_commute [of _ c] add_left_mono)
-
-text {* non-strict, in both arguments *}
-lemma add_mono:
- "a \<le> b \<Longrightarrow> c \<le> d \<Longrightarrow> a + c \<le> b + d"
- apply (erule add_right_mono [THEN order_trans])
- apply (simp add: add_commute add_left_mono)
- done
-
-end
-
-class pordered_cancel_ab_semigroup_add =
- pordered_ab_semigroup_add + cancel_ab_semigroup_add
-begin
-
-lemma add_strict_left_mono:
- "a < b \<Longrightarrow> c + a < c + b"
-by (auto simp add: less_le add_left_mono)
-
-lemma add_strict_right_mono:
- "a < b \<Longrightarrow> a + c < b + c"
-by (simp add: add_commute [of _ c] add_strict_left_mono)
-
-text{*Strict monotonicity in both arguments*}
-lemma add_strict_mono:
- "a < b \<Longrightarrow> c < d \<Longrightarrow> a + c < b + d"
-apply (erule add_strict_right_mono [THEN less_trans])
-apply (erule add_strict_left_mono)
-done
-
-lemma add_less_le_mono:
- "a < b \<Longrightarrow> c \<le> d \<Longrightarrow> a + c < b + d"
-apply (erule add_strict_right_mono [THEN less_le_trans])
-apply (erule add_left_mono)
-done
-
-lemma add_le_less_mono:
- "a \<le> b \<Longrightarrow> c < d \<Longrightarrow> a + c < b + d"
-apply (erule add_right_mono [THEN le_less_trans])
-apply (erule add_strict_left_mono)
-done
-
-end
-
-class pordered_ab_semigroup_add_imp_le =
- pordered_cancel_ab_semigroup_add +
- assumes add_le_imp_le_left: "c + a \<le> c + b \<Longrightarrow> a \<le> b"
-begin
-
-lemma add_less_imp_less_left:
- assumes less: "c + a < c + b" shows "a < b"
-proof -
- from less have le: "c + a <= c + b" by (simp add: order_le_less)
- have "a <= b"
- apply (insert le)
- apply (drule add_le_imp_le_left)
- by (insert le, drule add_le_imp_le_left, assumption)
- moreover have "a \<noteq> b"
- proof (rule ccontr)
- assume "~(a \<noteq> b)"
- then have "a = b" by simp
- then have "c + a = c + b" by simp
- with less show "False"by simp
- qed
- ultimately show "a < b" by (simp add: order_le_less)
-qed
-
-lemma add_less_imp_less_right:
- "a + c < b + c \<Longrightarrow> a < b"
-apply (rule add_less_imp_less_left [of c])
-apply (simp add: add_commute)
-done
-
-lemma add_less_cancel_left [simp]:
- "c + a < c + b \<longleftrightarrow> a < b"
-by (blast intro: add_less_imp_less_left add_strict_left_mono)
-
-lemma add_less_cancel_right [simp]:
- "a + c < b + c \<longleftrightarrow> a < b"
-by (blast intro: add_less_imp_less_right add_strict_right_mono)
-
-lemma add_le_cancel_left [simp]:
- "c + a \<le> c + b \<longleftrightarrow> a \<le> b"
-by (auto, drule add_le_imp_le_left, simp_all add: add_left_mono)
-
-lemma add_le_cancel_right [simp]:
- "a + c \<le> b + c \<longleftrightarrow> a \<le> b"
-by (simp add: add_commute [of a c] add_commute [of b c])
-
-lemma add_le_imp_le_right:
- "a + c \<le> b + c \<Longrightarrow> a \<le> b"
-by simp
-
-lemma max_add_distrib_left:
- "max x y + z = max (x + z) (y + z)"
- unfolding max_def by auto
-
-lemma min_add_distrib_left:
- "min x y + z = min (x + z) (y + z)"
- unfolding min_def by auto
-
-end
-
-subsection {* Support for reasoning about signs *}
-
-class pordered_comm_monoid_add =
- pordered_cancel_ab_semigroup_add + comm_monoid_add
-begin
-
-lemma add_pos_nonneg:
- assumes "0 < a" and "0 \<le> b" shows "0 < a + b"
-proof -
- have "0 + 0 < a + b"
- using assms by (rule add_less_le_mono)
- then show ?thesis by simp
-qed
-
-lemma add_pos_pos:
- assumes "0 < a" and "0 < b" shows "0 < a + b"
-by (rule add_pos_nonneg) (insert assms, auto)
-
-lemma add_nonneg_pos:
- assumes "0 \<le> a" and "0 < b" shows "0 < a + b"
-proof -
- have "0 + 0 < a + b"
- using assms by (rule add_le_less_mono)
- then show ?thesis by simp
-qed
-
-lemma add_nonneg_nonneg:
- assumes "0 \<le> a" and "0 \<le> b" shows "0 \<le> a + b"
-proof -
- have "0 + 0 \<le> a + b"
- using assms by (rule add_mono)
- then show ?thesis by simp
-qed
-
-lemma add_neg_nonpos:
- assumes "a < 0" and "b \<le> 0" shows "a + b < 0"
-proof -
- have "a + b < 0 + 0"
- using assms by (rule add_less_le_mono)
- then show ?thesis by simp
-qed
-
-lemma add_neg_neg:
- assumes "a < 0" and "b < 0" shows "a + b < 0"
-by (rule add_neg_nonpos) (insert assms, auto)
-
-lemma add_nonpos_neg:
- assumes "a \<le> 0" and "b < 0" shows "a + b < 0"
-proof -
- have "a + b < 0 + 0"
- using assms by (rule add_le_less_mono)
- then show ?thesis by simp
-qed
-
-lemma add_nonpos_nonpos:
- assumes "a \<le> 0" and "b \<le> 0" shows "a + b \<le> 0"
-proof -
- have "a + b \<le> 0 + 0"
- using assms by (rule add_mono)
- then show ?thesis by simp
-qed
-
-lemmas add_sign_intros =
- add_pos_nonneg add_pos_pos add_nonneg_pos add_nonneg_nonneg
- add_neg_nonpos add_neg_neg add_nonpos_neg add_nonpos_nonpos
-
-lemma add_nonneg_eq_0_iff:
- assumes x: "0 \<le> x" and y: "0 \<le> y"
- shows "x + y = 0 \<longleftrightarrow> x = 0 \<and> y = 0"
-proof (intro iffI conjI)
- have "x = x + 0" by simp
- also have "x + 0 \<le> x + y" using y by (rule add_left_mono)
- also assume "x + y = 0"
- also have "0 \<le> x" using x .
- finally show "x = 0" .
-next
- have "y = 0 + y" by simp
- also have "0 + y \<le> x + y" using x by (rule add_right_mono)
- also assume "x + y = 0"
- also have "0 \<le> y" using y .
- finally show "y = 0" .
-next
- assume "x = 0 \<and> y = 0"
- then show "x + y = 0" by simp
-qed
-
-end
-
-class pordered_ab_group_add =
- ab_group_add + pordered_ab_semigroup_add
-begin
-
-subclass pordered_cancel_ab_semigroup_add ..
-
-subclass pordered_ab_semigroup_add_imp_le
-proof
- fix a b c :: 'a
- assume "c + a \<le> c + b"
- hence "(-c) + (c + a) \<le> (-c) + (c + b)" by (rule add_left_mono)
- hence "((-c) + c) + a \<le> ((-c) + c) + b" by (simp only: add_assoc)
- thus "a \<le> b" by simp
-qed
-
-subclass pordered_comm_monoid_add ..
-
-lemma max_diff_distrib_left:
- shows "max x y - z = max (x - z) (y - z)"
-by (simp add: diff_minus, rule max_add_distrib_left)
-
-lemma min_diff_distrib_left:
- shows "min x y - z = min (x - z) (y - z)"
-by (simp add: diff_minus, rule min_add_distrib_left)
-
-lemma le_imp_neg_le:
- assumes "a \<le> b" shows "-b \<le> -a"
-proof -
- have "-a+a \<le> -a+b" using `a \<le> b` by (rule add_left_mono)
- hence "0 \<le> -a+b" by simp
- hence "0 + (-b) \<le> (-a + b) + (-b)" by (rule add_right_mono)
- thus ?thesis by (simp add: add_assoc)
-qed
-
-lemma neg_le_iff_le [simp]: "- b \<le> - a \<longleftrightarrow> a \<le> b"
-proof
- assume "- b \<le> - a"
- hence "- (- a) \<le> - (- b)" by (rule le_imp_neg_le)
- thus "a\<le>b" by simp
-next
- assume "a\<le>b"
- thus "-b \<le> -a" by (rule le_imp_neg_le)
-qed
-
-lemma neg_le_0_iff_le [simp]: "- a \<le> 0 \<longleftrightarrow> 0 \<le> a"
-by (subst neg_le_iff_le [symmetric], simp)
-
-lemma neg_0_le_iff_le [simp]: "0 \<le> - a \<longleftrightarrow> a \<le> 0"
-by (subst neg_le_iff_le [symmetric], simp)
-
-lemma neg_less_iff_less [simp]: "- b < - a \<longleftrightarrow> a < b"
-by (force simp add: less_le)
-
-lemma neg_less_0_iff_less [simp]: "- a < 0 \<longleftrightarrow> 0 < a"
-by (subst neg_less_iff_less [symmetric], simp)
-
-lemma neg_0_less_iff_less [simp]: "0 < - a \<longleftrightarrow> a < 0"
-by (subst neg_less_iff_less [symmetric], simp)
-
-text{*The next several equations can make the simplifier loop!*}
-
-lemma less_minus_iff: "a < - b \<longleftrightarrow> b < - a"
-proof -
- have "(- (-a) < - b) = (b < - a)" by (rule neg_less_iff_less)
- thus ?thesis by simp
-qed
-
-lemma minus_less_iff: "- a < b \<longleftrightarrow> - b < a"
-proof -
- have "(- a < - (-b)) = (- b < a)" by (rule neg_less_iff_less)
- thus ?thesis by simp
-qed
-
-lemma le_minus_iff: "a \<le> - b \<longleftrightarrow> b \<le> - a"
-proof -
- have mm: "!! a (b::'a). (-(-a)) < -b \<Longrightarrow> -(-b) < -a" by (simp only: minus_less_iff)
- have "(- (- a) <= -b) = (b <= - a)"
- apply (auto simp only: le_less)
- apply (drule mm)
- apply (simp_all)
- apply (drule mm[simplified], assumption)
- done
- then show ?thesis by simp
-qed
-
-lemma minus_le_iff: "- a \<le> b \<longleftrightarrow> - b \<le> a"
-by (auto simp add: le_less minus_less_iff)
-
-lemma less_iff_diff_less_0: "a < b \<longleftrightarrow> a - b < 0"
-proof -
- have "(a < b) = (a + (- b) < b + (-b))"
- by (simp only: add_less_cancel_right)
- also have "... = (a - b < 0)" by (simp add: diff_minus)
- finally show ?thesis .
-qed
-
-lemma diff_less_eq[algebra_simps]: "a - b < c \<longleftrightarrow> a < c + b"
-apply (subst less_iff_diff_less_0 [of a])
-apply (rule less_iff_diff_less_0 [of _ c, THEN ssubst])
-apply (simp add: diff_minus add_ac)
-done
-
-lemma less_diff_eq[algebra_simps]: "a < c - b \<longleftrightarrow> a + b < c"
-apply (subst less_iff_diff_less_0 [of "plus a b"])
-apply (subst less_iff_diff_less_0 [of a])
-apply (simp add: diff_minus add_ac)
-done
-
-lemma diff_le_eq[algebra_simps]: "a - b \<le> c \<longleftrightarrow> a \<le> c + b"
-by (auto simp add: le_less diff_less_eq diff_add_cancel add_diff_cancel)
-
-lemma le_diff_eq[algebra_simps]: "a \<le> c - b \<longleftrightarrow> a + b \<le> c"
-by (auto simp add: le_less less_diff_eq diff_add_cancel add_diff_cancel)
-
-lemma le_iff_diff_le_0: "a \<le> b \<longleftrightarrow> a - b \<le> 0"
-by (simp add: algebra_simps)
-
-text{*Legacy - use @{text algebra_simps} *}
-lemmas group_simps[noatp] = algebra_simps
-
-end
-
-text{*Legacy - use @{text algebra_simps} *}
-lemmas group_simps[noatp] = algebra_simps
-
-class ordered_ab_semigroup_add =
- linorder + pordered_ab_semigroup_add
-
-class ordered_cancel_ab_semigroup_add =
- linorder + pordered_cancel_ab_semigroup_add
-begin
-
-subclass ordered_ab_semigroup_add ..
-
-subclass pordered_ab_semigroup_add_imp_le
-proof
- fix a b c :: 'a
- assume le: "c + a <= c + b"
- show "a <= b"
- proof (rule ccontr)
- assume w: "~ a \<le> b"
- hence "b <= a" by (simp add: linorder_not_le)
- hence le2: "c + b <= c + a" by (rule add_left_mono)
- have "a = b"
- apply (insert le)
- apply (insert le2)
- apply (drule antisym, simp_all)
- done
- with w show False
- by (simp add: linorder_not_le [symmetric])
- qed
-qed
-
-end
-
-class ordered_ab_group_add =
- linorder + pordered_ab_group_add
-begin
-
-subclass ordered_cancel_ab_semigroup_add ..
-
-lemma neg_less_eq_nonneg:
- "- a \<le> a \<longleftrightarrow> 0 \<le> a"
-proof
- assume A: "- a \<le> a" show "0 \<le> a"
- proof (rule classical)
- assume "\<not> 0 \<le> a"
- then have "a < 0" by auto
- with A have "- a < 0" by (rule le_less_trans)
- then show ?thesis by auto
- qed
-next
- assume A: "0 \<le> a" show "- a \<le> a"
- proof (rule order_trans)
- show "- a \<le> 0" using A by (simp add: minus_le_iff)
- next
- show "0 \<le> a" using A .
- qed
-qed
-
-lemma less_eq_neg_nonpos:
- "a \<le> - a \<longleftrightarrow> a \<le> 0"
-proof
- assume A: "a \<le> - a" show "a \<le> 0"
- proof (rule classical)
- assume "\<not> a \<le> 0"
- then have "0 < a" by auto
- then have "0 < - a" using A by (rule less_le_trans)
- then show ?thesis by auto
- qed
-next
- assume A: "a \<le> 0" show "a \<le> - a"
- proof (rule order_trans)
- show "0 \<le> - a" using A by (simp add: minus_le_iff)
- next
- show "a \<le> 0" using A .
- qed
-qed
-
-lemma equal_neg_zero:
- "a = - a \<longleftrightarrow> a = 0"
-proof
- assume "a = 0" then show "a = - a" by simp
-next
- assume A: "a = - a" show "a = 0"
- proof (cases "0 \<le> a")
- case True with A have "0 \<le> - a" by auto
- with le_minus_iff have "a \<le> 0" by simp
- with True show ?thesis by (auto intro: order_trans)
- next
- case False then have B: "a \<le> 0" by auto
- with A have "- a \<le> 0" by auto
- with B show ?thesis by (auto intro: order_trans)
- qed
-qed
-
-lemma neg_equal_zero:
- "- a = a \<longleftrightarrow> a = 0"
- unfolding equal_neg_zero [symmetric] by auto
-
-end
-
--- {* FIXME localize the following *}
-
-lemma add_increasing:
- fixes c :: "'a::{pordered_ab_semigroup_add_imp_le, comm_monoid_add}"
- shows "[|0\<le>a; b\<le>c|] ==> b \<le> a + c"
-by (insert add_mono [of 0 a b c], simp)
-
-lemma add_increasing2:
- fixes c :: "'a::{pordered_ab_semigroup_add_imp_le, comm_monoid_add}"
- shows "[|0\<le>c; b\<le>a|] ==> b \<le> a + c"
-by (simp add:add_increasing add_commute[of a])
-
-lemma add_strict_increasing:
- fixes c :: "'a::{pordered_ab_semigroup_add_imp_le, comm_monoid_add}"
- shows "[|0<a; b\<le>c|] ==> b < a + c"
-by (insert add_less_le_mono [of 0 a b c], simp)
-
-lemma add_strict_increasing2:
- fixes c :: "'a::{pordered_ab_semigroup_add_imp_le, comm_monoid_add}"
- shows "[|0\<le>a; b<c|] ==> b < a + c"
-by (insert add_le_less_mono [of 0 a b c], simp)
-
-
-class pordered_ab_group_add_abs = pordered_ab_group_add + abs +
- assumes abs_ge_zero [simp]: "\<bar>a\<bar> \<ge> 0"
- and abs_ge_self: "a \<le> \<bar>a\<bar>"
- and abs_leI: "a \<le> b \<Longrightarrow> - a \<le> b \<Longrightarrow> \<bar>a\<bar> \<le> b"
- and abs_minus_cancel [simp]: "\<bar>-a\<bar> = \<bar>a\<bar>"
- and abs_triangle_ineq: "\<bar>a + b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
-begin
-
-lemma abs_minus_le_zero: "- \<bar>a\<bar> \<le> 0"
- unfolding neg_le_0_iff_le by simp
-
-lemma abs_of_nonneg [simp]:
- assumes nonneg: "0 \<le> a" shows "\<bar>a\<bar> = a"
-proof (rule antisym)
- from nonneg le_imp_neg_le have "- a \<le> 0" by simp
- from this nonneg have "- a \<le> a" by (rule order_trans)
- then show "\<bar>a\<bar> \<le> a" by (auto intro: abs_leI)
-qed (rule abs_ge_self)
-
-lemma abs_idempotent [simp]: "\<bar>\<bar>a\<bar>\<bar> = \<bar>a\<bar>"
-by (rule antisym)
- (auto intro!: abs_ge_self abs_leI order_trans [of "uminus (abs a)" zero "abs a"])
-
-lemma abs_eq_0 [simp]: "\<bar>a\<bar> = 0 \<longleftrightarrow> a = 0"
-proof -
- have "\<bar>a\<bar> = 0 \<Longrightarrow> a = 0"
- proof (rule antisym)
- assume zero: "\<bar>a\<bar> = 0"
- with abs_ge_self show "a \<le> 0" by auto
- from zero have "\<bar>-a\<bar> = 0" by simp
- with abs_ge_self [of "uminus a"] have "- a \<le> 0" by auto
- with neg_le_0_iff_le show "0 \<le> a" by auto
- qed
- then show ?thesis by auto
-qed
-
-lemma abs_zero [simp]: "\<bar>0\<bar> = 0"
-by simp
-
-lemma abs_0_eq [simp, noatp]: "0 = \<bar>a\<bar> \<longleftrightarrow> a = 0"
-proof -
- have "0 = \<bar>a\<bar> \<longleftrightarrow> \<bar>a\<bar> = 0" by (simp only: eq_ac)
- thus ?thesis by simp
-qed
-
-lemma abs_le_zero_iff [simp]: "\<bar>a\<bar> \<le> 0 \<longleftrightarrow> a = 0"
-proof
- assume "\<bar>a\<bar> \<le> 0"
- then have "\<bar>a\<bar> = 0" by (rule antisym) simp
- thus "a = 0" by simp
-next
- assume "a = 0"
- thus "\<bar>a\<bar> \<le> 0" by simp
-qed
-
-lemma zero_less_abs_iff [simp]: "0 < \<bar>a\<bar> \<longleftrightarrow> a \<noteq> 0"
-by (simp add: less_le)
-
-lemma abs_not_less_zero [simp]: "\<not> \<bar>a\<bar> < 0"
-proof -
- have a: "\<And>x y. x \<le> y \<Longrightarrow> \<not> y < x" by auto
- show ?thesis by (simp add: a)
-qed
-
-lemma abs_ge_minus_self: "- a \<le> \<bar>a\<bar>"
-proof -
- have "- a \<le> \<bar>-a\<bar>" by (rule abs_ge_self)
- then show ?thesis by simp
-qed
-
-lemma abs_minus_commute:
- "\<bar>a - b\<bar> = \<bar>b - a\<bar>"
-proof -
- have "\<bar>a - b\<bar> = \<bar>- (a - b)\<bar>" by (simp only: abs_minus_cancel)
- also have "... = \<bar>b - a\<bar>" by simp
- finally show ?thesis .
-qed
-
-lemma abs_of_pos: "0 < a \<Longrightarrow> \<bar>a\<bar> = a"
-by (rule abs_of_nonneg, rule less_imp_le)
-
-lemma abs_of_nonpos [simp]:
- assumes "a \<le> 0" shows "\<bar>a\<bar> = - a"
-proof -
- let ?b = "- a"
- have "- ?b \<le> 0 \<Longrightarrow> \<bar>- ?b\<bar> = - (- ?b)"
- unfolding abs_minus_cancel [of "?b"]
- unfolding neg_le_0_iff_le [of "?b"]
- unfolding minus_minus by (erule abs_of_nonneg)
- then show ?thesis using assms by auto
-qed
-
-lemma abs_of_neg: "a < 0 \<Longrightarrow> \<bar>a\<bar> = - a"
-by (rule abs_of_nonpos, rule less_imp_le)
-
-lemma abs_le_D1: "\<bar>a\<bar> \<le> b \<Longrightarrow> a \<le> b"
-by (insert abs_ge_self, blast intro: order_trans)
-
-lemma abs_le_D2: "\<bar>a\<bar> \<le> b \<Longrightarrow> - a \<le> b"
-by (insert abs_le_D1 [of "uminus a"], simp)
-
-lemma abs_le_iff: "\<bar>a\<bar> \<le> b \<longleftrightarrow> a \<le> b \<and> - a \<le> b"
-by (blast intro: abs_leI dest: abs_le_D1 abs_le_D2)
-
-lemma abs_triangle_ineq2: "\<bar>a\<bar> - \<bar>b\<bar> \<le> \<bar>a - b\<bar>"
- apply (simp add: algebra_simps)
- apply (subgoal_tac "abs a = abs (plus b (minus a b))")
- apply (erule ssubst)
- apply (rule abs_triangle_ineq)
- apply (rule arg_cong[of _ _ abs])
- apply (simp add: algebra_simps)
-done
-
-lemma abs_triangle_ineq3: "\<bar>\<bar>a\<bar> - \<bar>b\<bar>\<bar> \<le> \<bar>a - b\<bar>"
- apply (subst abs_le_iff)
- apply auto
- apply (rule abs_triangle_ineq2)
- apply (subst abs_minus_commute)
- apply (rule abs_triangle_ineq2)
-done
-
-lemma abs_triangle_ineq4: "\<bar>a - b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
-proof -
- have "abs(a - b) = abs(a + - b)" by (subst diff_minus, rule refl)
- also have "... <= abs a + abs (- b)" by (rule abs_triangle_ineq)
- finally show ?thesis by simp
-qed
-
-lemma abs_diff_triangle_ineq: "\<bar>a + b - (c + d)\<bar> \<le> \<bar>a - c\<bar> + \<bar>b - d\<bar>"
-proof -
- have "\<bar>a + b - (c+d)\<bar> = \<bar>(a-c) + (b-d)\<bar>" by (simp add: diff_minus add_ac)
- also have "... \<le> \<bar>a-c\<bar> + \<bar>b-d\<bar>" by (rule abs_triangle_ineq)
- finally show ?thesis .
-qed
-
-lemma abs_add_abs [simp]:
- "\<bar>\<bar>a\<bar> + \<bar>b\<bar>\<bar> = \<bar>a\<bar> + \<bar>b\<bar>" (is "?L = ?R")
-proof (rule antisym)
- show "?L \<ge> ?R" by(rule abs_ge_self)
-next
- have "?L \<le> \<bar>\<bar>a\<bar>\<bar> + \<bar>\<bar>b\<bar>\<bar>" by(rule abs_triangle_ineq)
- also have "\<dots> = ?R" by simp
- finally show "?L \<le> ?R" .
-qed
-
-end
-
-
-subsection {* Lattice Ordered (Abelian) Groups *}
-
-class lordered_ab_group_add_meet = pordered_ab_group_add + lower_semilattice
-begin
-
-lemma add_inf_distrib_left:
- "a + inf b c = inf (a + b) (a + c)"
-apply (rule antisym)
-apply (simp_all add: le_infI)
-apply (rule add_le_imp_le_left [of "uminus a"])
-apply (simp only: add_assoc [symmetric], simp)
-apply rule
-apply (rule add_le_imp_le_left[of "a"], simp only: add_assoc[symmetric], simp)+
-done
-
-lemma add_inf_distrib_right:
- "inf a b + c = inf (a + c) (b + c)"
-proof -
- have "c + inf a b = inf (c+a) (c+b)" by (simp add: add_inf_distrib_left)
- thus ?thesis by (simp add: add_commute)
-qed
-
-end
-
-class lordered_ab_group_add_join = pordered_ab_group_add + upper_semilattice
-begin
-
-lemma add_sup_distrib_left:
- "a + sup b c = sup (a + b) (a + c)"
-apply (rule antisym)
-apply (rule add_le_imp_le_left [of "uminus a"])
-apply (simp only: add_assoc[symmetric], simp)
-apply rule
-apply (rule add_le_imp_le_left [of "a"], simp only: add_assoc[symmetric], simp)+
-apply (rule le_supI)
-apply (simp_all)
-done
-
-lemma add_sup_distrib_right:
- "sup a b + c = sup (a+c) (b+c)"
-proof -
- have "c + sup a b = sup (c+a) (c+b)" by (simp add: add_sup_distrib_left)
- thus ?thesis by (simp add: add_commute)
-qed
-
-end
-
-class lordered_ab_group_add = pordered_ab_group_add + lattice
-begin
-
-subclass lordered_ab_group_add_meet ..
-subclass lordered_ab_group_add_join ..
-
-lemmas add_sup_inf_distribs = add_inf_distrib_right add_inf_distrib_left add_sup_distrib_right add_sup_distrib_left
-
-lemma inf_eq_neg_sup: "inf a b = - sup (-a) (-b)"
-proof (rule inf_unique)
- fix a b :: 'a
- show "- sup (-a) (-b) \<le> a"
- by (rule add_le_imp_le_right [of _ "sup (uminus a) (uminus b)"])
- (simp, simp add: add_sup_distrib_left)
-next
- fix a b :: 'a
- show "- sup (-a) (-b) \<le> b"
- by (rule add_le_imp_le_right [of _ "sup (uminus a) (uminus b)"])
- (simp, simp add: add_sup_distrib_left)
-next
- fix a b c :: 'a
- assume "a \<le> b" "a \<le> c"
- then show "a \<le> - sup (-b) (-c)" by (subst neg_le_iff_le [symmetric])
- (simp add: le_supI)
-qed
-
-lemma sup_eq_neg_inf: "sup a b = - inf (-a) (-b)"
-proof (rule sup_unique)
- fix a b :: 'a
- show "a \<le> - inf (-a) (-b)"
- by (rule add_le_imp_le_right [of _ "inf (uminus a) (uminus b)"])
- (simp, simp add: add_inf_distrib_left)
-next
- fix a b :: 'a
- show "b \<le> - inf (-a) (-b)"
- by (rule add_le_imp_le_right [of _ "inf (uminus a) (uminus b)"])
- (simp, simp add: add_inf_distrib_left)
-next
- fix a b c :: 'a
- assume "a \<le> c" "b \<le> c"
- then show "- inf (-a) (-b) \<le> c" by (subst neg_le_iff_le [symmetric])
- (simp add: le_infI)
-qed
-
-lemma neg_inf_eq_sup: "- inf a b = sup (-a) (-b)"
-by (simp add: inf_eq_neg_sup)
-
-lemma neg_sup_eq_inf: "- sup a b = inf (-a) (-b)"
-by (simp add: sup_eq_neg_inf)
-
-lemma add_eq_inf_sup: "a + b = sup a b + inf a b"
-proof -
- have "0 = - inf 0 (a-b) + inf (a-b) 0" by (simp add: inf_commute)
- hence "0 = sup 0 (b-a) + inf (a-b) 0" by (simp add: inf_eq_neg_sup)
- hence "0 = (-a + sup a b) + (inf a b + (-b))"
- by (simp add: add_sup_distrib_left add_inf_distrib_right)
- (simp add: algebra_simps)
- thus ?thesis by (simp add: algebra_simps)
-qed
-
-subsection {* Positive Part, Negative Part, Absolute Value *}
-
-definition
- nprt :: "'a \<Rightarrow> 'a" where
- "nprt x = inf x 0"
-
-definition
- pprt :: "'a \<Rightarrow> 'a" where
- "pprt x = sup x 0"
-
-lemma pprt_neg: "pprt (- x) = - nprt x"
-proof -
- have "sup (- x) 0 = sup (- x) (- 0)" unfolding minus_zero ..
- also have "\<dots> = - inf x 0" unfolding neg_inf_eq_sup ..
- finally have "sup (- x) 0 = - inf x 0" .
- then show ?thesis unfolding pprt_def nprt_def .
-qed
-
-lemma nprt_neg: "nprt (- x) = - pprt x"
-proof -
- from pprt_neg have "pprt (- (- x)) = - nprt (- x)" .
- then have "pprt x = - nprt (- x)" by simp
- then show ?thesis by simp
-qed
-
-lemma prts: "a = pprt a + nprt a"
-by (simp add: pprt_def nprt_def add_eq_inf_sup[symmetric])
-
-lemma zero_le_pprt[simp]: "0 \<le> pprt a"
-by (simp add: pprt_def)
-
-lemma nprt_le_zero[simp]: "nprt a \<le> 0"
-by (simp add: nprt_def)
-
-lemma le_eq_neg: "a \<le> - b \<longleftrightarrow> a + b \<le> 0" (is "?l = ?r")
-proof -
- have a: "?l \<longrightarrow> ?r"
- apply (auto)
- apply (rule add_le_imp_le_right[of _ "uminus b" _])
- apply (simp add: add_assoc)
- done
- have b: "?r \<longrightarrow> ?l"
- apply (auto)
- apply (rule add_le_imp_le_right[of _ "b" _])
- apply (simp)
- done
- from a b show ?thesis by blast
-qed
-
-lemma pprt_0[simp]: "pprt 0 = 0" by (simp add: pprt_def)
-lemma nprt_0[simp]: "nprt 0 = 0" by (simp add: nprt_def)
-
-lemma pprt_eq_id [simp, noatp]: "0 \<le> x \<Longrightarrow> pprt x = x"
- by (simp add: pprt_def sup_aci sup_absorb1)
-
-lemma nprt_eq_id [simp, noatp]: "x \<le> 0 \<Longrightarrow> nprt x = x"
- by (simp add: nprt_def inf_aci inf_absorb1)
-
-lemma pprt_eq_0 [simp, noatp]: "x \<le> 0 \<Longrightarrow> pprt x = 0"
- by (simp add: pprt_def sup_aci sup_absorb2)
-
-lemma nprt_eq_0 [simp, noatp]: "0 \<le> x \<Longrightarrow> nprt x = 0"
- by (simp add: nprt_def inf_aci inf_absorb2)
-
-lemma sup_0_imp_0: "sup a (- a) = 0 \<Longrightarrow> a = 0"
-proof -
- {
- fix a::'a
- assume hyp: "sup a (-a) = 0"
- hence "sup a (-a) + a = a" by (simp)
- hence "sup (a+a) 0 = a" by (simp add: add_sup_distrib_right)
- hence "sup (a+a) 0 <= a" by (simp)
- hence "0 <= a" by (blast intro: order_trans inf_sup_ord)
- }
- note p = this
- assume hyp:"sup a (-a) = 0"
- hence hyp2:"sup (-a) (-(-a)) = 0" by (simp add: sup_commute)
- from p[OF hyp] p[OF hyp2] show "a = 0" by simp
-qed
-
-lemma inf_0_imp_0: "inf a (-a) = 0 \<Longrightarrow> a = 0"
-apply (simp add: inf_eq_neg_sup)
-apply (simp add: sup_commute)
-apply (erule sup_0_imp_0)
-done
-
-lemma inf_0_eq_0 [simp, noatp]: "inf a (- a) = 0 \<longleftrightarrow> a = 0"
-by (rule, erule inf_0_imp_0) simp
-
-lemma sup_0_eq_0 [simp, noatp]: "sup a (- a) = 0 \<longleftrightarrow> a = 0"
-by (rule, erule sup_0_imp_0) simp
-
-lemma zero_le_double_add_iff_zero_le_single_add [simp]:
- "0 \<le> a + a \<longleftrightarrow> 0 \<le> a"
-proof
- assume "0 <= a + a"
- hence a:"inf (a+a) 0 = 0" by (simp add: inf_commute inf_absorb1)
- have "(inf a 0)+(inf a 0) = inf (inf (a+a) 0) a" (is "?l=_")
- by (simp add: add_sup_inf_distribs inf_aci)
- hence "?l = 0 + inf a 0" by (simp add: a, simp add: inf_commute)
- hence "inf a 0 = 0" by (simp only: add_right_cancel)
- then show "0 <= a" unfolding le_iff_inf by (simp add: inf_commute)
-next
- assume a: "0 <= a"
- show "0 <= a + a" by (simp add: add_mono[OF a a, simplified])
-qed
-
-lemma double_zero: "a + a = 0 \<longleftrightarrow> a = 0"
-proof
- assume assm: "a + a = 0"
- then have "a + a + - a = - a" by simp
- then have "a + (a + - a) = - a" by (simp only: add_assoc)
- then have a: "- a = a" by simp
- show "a = 0" apply (rule antisym)
- apply (unfold neg_le_iff_le [symmetric, of a])
- unfolding a apply simp
- unfolding zero_le_double_add_iff_zero_le_single_add [symmetric, of a]
- unfolding assm unfolding le_less apply simp_all done
-next
- assume "a = 0" then show "a + a = 0" by simp
-qed
-
-lemma zero_less_double_add_iff_zero_less_single_add:
- "0 < a + a \<longleftrightarrow> 0 < a"
-proof (cases "a = 0")
- case True then show ?thesis by auto
-next
- case False then show ?thesis (*FIXME tune proof*)
- unfolding less_le apply simp apply rule
- apply clarify
- apply rule
- apply assumption
- apply (rule notI)
- unfolding double_zero [symmetric, of a] apply simp
- done
-qed
-
-lemma double_add_le_zero_iff_single_add_le_zero [simp]:
- "a + a \<le> 0 \<longleftrightarrow> a \<le> 0"
-proof -
- have "a + a \<le> 0 \<longleftrightarrow> 0 \<le> - (a + a)" by (subst le_minus_iff, simp)
- moreover have "\<dots> \<longleftrightarrow> a \<le> 0" by (simp add: zero_le_double_add_iff_zero_le_single_add)
- ultimately show ?thesis by blast
-qed
-
-lemma double_add_less_zero_iff_single_less_zero [simp]:
- "a + a < 0 \<longleftrightarrow> a < 0"
-proof -
- have "a + a < 0 \<longleftrightarrow> 0 < - (a + a)" by (subst less_minus_iff, simp)
- moreover have "\<dots> \<longleftrightarrow> a < 0" by (simp add: zero_less_double_add_iff_zero_less_single_add)
- ultimately show ?thesis by blast
-qed
-
-declare neg_inf_eq_sup [simp] neg_sup_eq_inf [simp]
-
-lemma le_minus_self_iff: "a \<le> - a \<longleftrightarrow> a \<le> 0"
-proof -
- from add_le_cancel_left [of "uminus a" "plus a a" zero]
- have "(a <= -a) = (a+a <= 0)"
- by (simp add: add_assoc[symmetric])
- thus ?thesis by simp
-qed
-
-lemma minus_le_self_iff: "- a \<le> a \<longleftrightarrow> 0 \<le> a"
-proof -
- from add_le_cancel_left [of "uminus a" zero "plus a a"]
- have "(-a <= a) = (0 <= a+a)"
- by (simp add: add_assoc[symmetric])
- thus ?thesis by simp
-qed
-
-lemma zero_le_iff_zero_nprt: "0 \<le> a \<longleftrightarrow> nprt a = 0"
-unfolding le_iff_inf by (simp add: nprt_def inf_commute)
-
-lemma le_zero_iff_zero_pprt: "a \<le> 0 \<longleftrightarrow> pprt a = 0"
-unfolding le_iff_sup by (simp add: pprt_def sup_commute)
-
-lemma le_zero_iff_pprt_id: "0 \<le> a \<longleftrightarrow> pprt a = a"
-unfolding le_iff_sup by (simp add: pprt_def sup_commute)
-
-lemma zero_le_iff_nprt_id: "a \<le> 0 \<longleftrightarrow> nprt a = a"
-unfolding le_iff_inf by (simp add: nprt_def inf_commute)
-
-lemma pprt_mono [simp, noatp]: "a \<le> b \<Longrightarrow> pprt a \<le> pprt b"
-unfolding le_iff_sup by (simp add: pprt_def sup_aci sup_assoc [symmetric, of a])
-
-lemma nprt_mono [simp, noatp]: "a \<le> b \<Longrightarrow> nprt a \<le> nprt b"
-unfolding le_iff_inf by (simp add: nprt_def inf_aci inf_assoc [symmetric, of a])
-
-end
-
-lemmas add_sup_inf_distribs = add_inf_distrib_right add_inf_distrib_left add_sup_distrib_right add_sup_distrib_left
-
-
-class lordered_ab_group_add_abs = lordered_ab_group_add + abs +
- assumes abs_lattice: "\<bar>a\<bar> = sup a (- a)"
-begin
-
-lemma abs_prts: "\<bar>a\<bar> = pprt a - nprt a"
-proof -
- have "0 \<le> \<bar>a\<bar>"
- proof -
- have a: "a \<le> \<bar>a\<bar>" and b: "- a \<le> \<bar>a\<bar>" by (auto simp add: abs_lattice)
- show ?thesis by (rule add_mono [OF a b, simplified])
- qed
- then have "0 \<le> sup a (- a)" unfolding abs_lattice .
- then have "sup (sup a (- a)) 0 = sup a (- a)" by (rule sup_absorb1)
- then show ?thesis
- by (simp add: add_sup_inf_distribs sup_aci
- pprt_def nprt_def diff_minus abs_lattice)
-qed
-
-subclass pordered_ab_group_add_abs
-proof
- have abs_ge_zero [simp]: "\<And>a. 0 \<le> \<bar>a\<bar>"
- proof -
- fix a b
- have a: "a \<le> \<bar>a\<bar>" and b: "- a \<le> \<bar>a\<bar>" by (auto simp add: abs_lattice)
- show "0 \<le> \<bar>a\<bar>" by (rule add_mono [OF a b, simplified])
- qed
- have abs_leI: "\<And>a b. a \<le> b \<Longrightarrow> - a \<le> b \<Longrightarrow> \<bar>a\<bar> \<le> b"
- by (simp add: abs_lattice le_supI)
- fix a b
- show "0 \<le> \<bar>a\<bar>" by simp
- show "a \<le> \<bar>a\<bar>"
- by (auto simp add: abs_lattice)
- show "\<bar>-a\<bar> = \<bar>a\<bar>"
- by (simp add: abs_lattice sup_commute)
- show "a \<le> b \<Longrightarrow> - a \<le> b \<Longrightarrow> \<bar>a\<bar> \<le> b" by (fact abs_leI)
- show "\<bar>a + b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
- proof -
- have g:"abs a + abs b = sup (a+b) (sup (-a-b) (sup (-a+b) (a + (-b))))" (is "_=sup ?m ?n")
- by (simp add: abs_lattice add_sup_inf_distribs sup_aci diff_minus)
- have a:"a+b <= sup ?m ?n" by (simp)
- have b:"-a-b <= ?n" by (simp)
- have c:"?n <= sup ?m ?n" by (simp)
- from b c have d: "-a-b <= sup ?m ?n" by(rule order_trans)
- have e:"-a-b = -(a+b)" by (simp add: diff_minus)
- from a d e have "abs(a+b) <= sup ?m ?n"
- by (drule_tac abs_leI, auto)
- with g[symmetric] show ?thesis by simp
- qed
-qed
-
-end
-
-lemma sup_eq_if:
- fixes a :: "'a\<Colon>{lordered_ab_group_add, linorder}"
- shows "sup a (- a) = (if a < 0 then - a else a)"
-proof -
- note add_le_cancel_right [of a a "- a", symmetric, simplified]
- moreover note add_le_cancel_right [of "-a" a a, symmetric, simplified]
- then show ?thesis by (auto simp: sup_max min_max.sup_absorb1 min_max.sup_absorb2)
-qed
-
-lemma abs_if_lattice:
- fixes a :: "'a\<Colon>{lordered_ab_group_add_abs, linorder}"
- shows "\<bar>a\<bar> = (if a < 0 then - a else a)"
-by auto
-
-
-text {* Needed for abelian cancellation simprocs: *}
-
-lemma add_cancel_21: "((x::'a::ab_group_add) + (y + z) = y + u) = (x + z = u)"
-apply (subst add_left_commute)
-apply (subst add_left_cancel)
-apply simp
-done
-
-lemma add_cancel_end: "(x + (y + z) = y) = (x = - (z::'a::ab_group_add))"
-apply (subst add_cancel_21[of _ _ _ 0, simplified])
-apply (simp add: add_right_cancel[symmetric, of "x" "-z" "z", simplified])
-done
-
-lemma less_eqI: "(x::'a::pordered_ab_group_add) - y = x' - y' \<Longrightarrow> (x < y) = (x' < y')"
-by (simp add: less_iff_diff_less_0[of x y] less_iff_diff_less_0[of x' y'])
-
-lemma le_eqI: "(x::'a::pordered_ab_group_add) - y = x' - y' \<Longrightarrow> (y <= x) = (y' <= x')"
-apply (simp add: le_iff_diff_le_0[of y x] le_iff_diff_le_0[of y' x'])
-apply (simp add: neg_le_iff_le[symmetric, of "y-x" 0] neg_le_iff_le[symmetric, of "y'-x'" 0])
-done
-
-lemma eq_eqI: "(x::'a::ab_group_add) - y = x' - y' \<Longrightarrow> (x = y) = (x' = y')"
-by (simp only: eq_iff_diff_eq_0[of x y] eq_iff_diff_eq_0[of x' y'])
-
-lemma diff_def: "(x::'a::ab_group_add) - y == x + (-y)"
-by (simp add: diff_minus)
-
-lemma le_add_right_mono:
- assumes
- "a <= b + (c::'a::pordered_ab_group_add)"
- "c <= d"
- shows "a <= b + d"
- apply (rule_tac order_trans[where y = "b+c"])
- apply (simp_all add: prems)
- done
-
-lemma estimate_by_abs:
- "a + b <= (c::'a::lordered_ab_group_add_abs) \<Longrightarrow> a <= c + abs b"
-proof -
- assume "a+b <= c"
- hence 2: "a <= c+(-b)" by (simp add: algebra_simps)
- have 3: "(-b) <= abs b" by (rule abs_ge_minus_self)
- show ?thesis by (rule le_add_right_mono[OF 2 3])
-qed
-
-subsection {* Tools setup *}
-
-lemma add_mono_thms_ordered_semiring [noatp]:
- fixes i j k :: "'a\<Colon>pordered_ab_semigroup_add"
- shows "i \<le> j \<and> k \<le> l \<Longrightarrow> i + k \<le> j + l"
- and "i = j \<and> k \<le> l \<Longrightarrow> i + k \<le> j + l"
- and "i \<le> j \<and> k = l \<Longrightarrow> i + k \<le> j + l"
- and "i = j \<and> k = l \<Longrightarrow> i + k = j + l"
-by (rule add_mono, clarify+)+
-
-lemma add_mono_thms_ordered_field [noatp]:
- fixes i j k :: "'a\<Colon>pordered_cancel_ab_semigroup_add"
- shows "i < j \<and> k = l \<Longrightarrow> i + k < j + l"
- and "i = j \<and> k < l \<Longrightarrow> i + k < j + l"
- and "i < j \<and> k \<le> l \<Longrightarrow> i + k < j + l"
- and "i \<le> j \<and> k < l \<Longrightarrow> i + k < j + l"
- and "i < j \<and> k < l \<Longrightarrow> i + k < j + l"
-by (auto intro: add_strict_right_mono add_strict_left_mono
- add_less_le_mono add_le_less_mono add_strict_mono)
-
-text{*Simplification of @{term "x-y < 0"}, etc.*}
-lemmas diff_less_0_iff_less [simp, noatp] = less_iff_diff_less_0 [symmetric]
-lemmas diff_le_0_iff_le [simp, noatp] = le_iff_diff_le_0 [symmetric]
-
-ML {*
-structure ab_group_add_cancel = Abel_Cancel
-(
-
-(* term order for abelian groups *)
-
-fun agrp_ord (Const (a, _)) = find_index (fn a' => a = a')
- [@{const_name Algebras.zero}, @{const_name Algebras.plus},
- @{const_name Algebras.uminus}, @{const_name Algebras.minus}]
- | agrp_ord _ = ~1;
-
-fun termless_agrp (a, b) = (TermOrd.term_lpo agrp_ord (a, b) = LESS);
-
-local
- val ac1 = mk_meta_eq @{thm add_assoc};
- val ac2 = mk_meta_eq @{thm add_commute};
- val ac3 = mk_meta_eq @{thm add_left_commute};
- fun solve_add_ac thy _ (_ $ (Const (@{const_name Algebras.plus},_) $ _ $ _) $ _) =
- SOME ac1
- | solve_add_ac thy _ (_ $ x $ (Const (@{const_name Algebras.plus},_) $ y $ z)) =
- if termless_agrp (y, x) then SOME ac3 else NONE
- | solve_add_ac thy _ (_ $ x $ y) =
- if termless_agrp (y, x) then SOME ac2 else NONE
- | solve_add_ac thy _ _ = NONE
-in
- val add_ac_proc = Simplifier.simproc @{theory}
- "add_ac_proc" ["x + y::'a::ab_semigroup_add"] solve_add_ac;
-end;
-
-val eq_reflection = @{thm eq_reflection};
-
-val T = @{typ "'a::ab_group_add"};
-
-val cancel_ss = HOL_basic_ss settermless termless_agrp
- addsimprocs [add_ac_proc] addsimps
- [@{thm add_0_left}, @{thm add_0_right}, @{thm diff_def},
- @{thm minus_add_distrib}, @{thm minus_minus}, @{thm minus_zero},
- @{thm right_minus}, @{thm left_minus}, @{thm add_minus_cancel},
- @{thm minus_add_cancel}];
-
-val sum_pats = [@{cterm "x + y::'a::ab_group_add"}, @{cterm "x - y::'a::ab_group_add"}];
-
-val eqI_rules = [@{thm less_eqI}, @{thm le_eqI}, @{thm eq_eqI}];
-
-val dest_eqI =
- fst o HOLogic.dest_bin "op =" HOLogic.boolT o HOLogic.dest_Trueprop o concl_of;
-
-);
-*}
-
-ML {*
- Addsimprocs [ab_group_add_cancel.sum_conv, ab_group_add_cancel.rel_conv];
-*}
-
-code_modulename SML
- OrderedGroup Arith
-
-code_modulename OCaml
- OrderedGroup Arith
-
-code_modulename Haskell
- OrderedGroup Arith
-
-end
--- a/src/HOL/Orderings.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Orderings.thy Tue Feb 09 17:06:05 2010 +0100
@@ -1052,7 +1052,7 @@
subsection {* Dense orders *}
-class dense_linear_order = linorder +
+class dense_linorder = linorder +
assumes gt_ex: "\<exists>y. x < y"
and lt_ex: "\<exists>y. y < x"
and dense: "x < y \<Longrightarrow> (\<exists>z. x < z \<and> z < y)"
--- a/src/HOL/PReal.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/PReal.thy Tue Feb 09 17:06:05 2010 +0100
@@ -12,7 +12,7 @@
imports Rational
begin
-text{*Could be generalized and moved to @{text Ring_and_Field}*}
+text{*Could be generalized and moved to @{text Groups}*}
lemma add_eq_exists: "\<exists>x. a+x = (b::rat)"
by (rule_tac x="b-a" in exI, simp)
@@ -23,7 +23,7 @@
(\<forall>y \<in> A. ((\<forall>z. 0<z & z < y --> z \<in> A) & (\<exists>u \<in> A. y < u))))"
lemma interval_empty_iff:
- "{y. (x::'a::dense_linear_order) < y \<and> y < z} = {} \<longleftrightarrow> \<not> x < z"
+ "{y. (x::'a::dense_linorder) < y \<and> y < z} = {} \<longleftrightarrow> \<not> x < z"
by (auto dest: dense)
@@ -1155,7 +1155,7 @@
preal_add_le_cancel_right preal_add_le_cancel_left
preal_add_left_cancel_iff preal_add_right_cancel_iff
-instance preal :: ordered_cancel_ab_semigroup_add
+instance preal :: linordered_cancel_ab_semigroup_add
proof
fix a b c :: preal
show "a + b = a + c \<Longrightarrow> b = c" by (rule preal_add_left_cancel)
--- a/src/HOL/Parity.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Parity.thy Tue Feb 09 17:06:05 2010 +0100
@@ -218,7 +218,7 @@
done
lemma zero_le_even_power: "even n ==>
- 0 <= (x::'a::{ordered_ring_strict,monoid_mult}) ^ n"
+ 0 <= (x::'a::{linordered_ring_strict,monoid_mult}) ^ n"
apply (simp add: even_nat_equiv_def2)
apply (erule exE)
apply (erule ssubst)
@@ -227,12 +227,12 @@
done
lemma zero_le_odd_power: "odd n ==>
- (0 <= (x::'a::{ordered_idom}) ^ n) = (0 <= x)"
+ (0 <= (x::'a::{linordered_idom}) ^ n) = (0 <= x)"
apply (auto simp: odd_nat_equiv_def2 power_Suc power_add zero_le_mult_iff)
apply (metis field_power_not_zero no_zero_divirors_neq0 order_antisym_conv zero_le_square)
done
-lemma zero_le_power_eq[presburger]: "(0 <= (x::'a::{ordered_idom}) ^ n) =
+lemma zero_le_power_eq[presburger]: "(0 <= (x::'a::{linordered_idom}) ^ n) =
(even n | (odd n & 0 <= x))"
apply auto
apply (subst zero_le_odd_power [symmetric])
@@ -240,19 +240,19 @@
apply (erule zero_le_even_power)
done
-lemma zero_less_power_eq[presburger]: "(0 < (x::'a::{ordered_idom}) ^ n) =
+lemma zero_less_power_eq[presburger]: "(0 < (x::'a::{linordered_idom}) ^ n) =
(n = 0 | (even n & x ~= 0) | (odd n & 0 < x))"
unfolding order_less_le zero_le_power_eq by auto
-lemma power_less_zero_eq[presburger]: "((x::'a::{ordered_idom}) ^ n < 0) =
+lemma power_less_zero_eq[presburger]: "((x::'a::{linordered_idom}) ^ n < 0) =
(odd n & x < 0)"
apply (subst linorder_not_le [symmetric])+
apply (subst zero_le_power_eq)
apply auto
done
-lemma power_le_zero_eq[presburger]: "((x::'a::{ordered_idom}) ^ n <= 0) =
+lemma power_le_zero_eq[presburger]: "((x::'a::{linordered_idom}) ^ n <= 0) =
(n ~= 0 & ((odd n & x <= 0) | (even n & x = 0)))"
apply (subst linorder_not_less [symmetric])+
apply (subst zero_less_power_eq)
@@ -260,7 +260,7 @@
done
lemma power_even_abs: "even n ==>
- (abs (x::'a::{ordered_idom}))^n = x^n"
+ (abs (x::'a::{linordered_idom}))^n = x^n"
apply (subst power_abs [symmetric])
apply (simp add: zero_le_even_power)
done
@@ -280,7 +280,7 @@
apply simp
done
-lemma power_mono_even: fixes x y :: "'a :: {ordered_idom}"
+lemma power_mono_even: fixes x y :: "'a :: {linordered_idom}"
assumes "even n" and "\<bar>x\<bar> \<le> \<bar>y\<bar>"
shows "x^n \<le> y^n"
proof -
@@ -292,7 +292,7 @@
lemma odd_pos: "odd (n::nat) \<Longrightarrow> 0 < n" by presburger
-lemma power_mono_odd: fixes x y :: "'a :: {ordered_idom}"
+lemma power_mono_odd: fixes x y :: "'a :: {linordered_idom}"
assumes "odd n" and "x \<le> y"
shows "x^n \<le> y^n"
proof (cases "y < 0")
@@ -372,11 +372,11 @@
subsection {* An Equivalence for @{term [source] "0 \<le> a^n"} *}
lemma even_power_le_0_imp_0:
- "a ^ (2*k) \<le> (0::'a::{ordered_idom}) ==> a=0"
+ "a ^ (2*k) \<le> (0::'a::{linordered_idom}) ==> a=0"
by (induct k) (auto simp add: zero_le_mult_iff mult_le_0_iff power_Suc)
lemma zero_le_power_iff[presburger]:
- "(0 \<le> a^n) = (0 \<le> (a::'a::{ordered_idom}) | even n)"
+ "(0 \<le> a^n) = (0 \<le> (a::'a::{linordered_idom}) | even n)"
proof cases
assume even: "even n"
then obtain k where "n = 2*k"
--- a/src/HOL/Power.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Power.thy Tue Feb 09 17:06:05 2010 +0100
@@ -130,7 +130,7 @@
end
-context ordered_semidom
+context linordered_semidom
begin
lemma zero_less_power [simp]:
@@ -323,7 +323,7 @@
end
-context ordered_idom
+context linordered_idom
begin
lemma power_abs:
--- a/src/HOL/Presburger.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Presburger.thy Tue Feb 09 17:06:05 2010 +0100
@@ -30,8 +30,8 @@
"\<exists>(z ::'a::{linorder}).\<forall>x<z.(x \<le> t) = True"
"\<exists>(z ::'a::{linorder}).\<forall>x<z.(x > t) = False"
"\<exists>(z ::'a::{linorder}).\<forall>x<z.(x \<ge> t) = False"
- "\<exists>z.\<forall>(x::'a::{linorder,plus,Ring_and_Field.dvd})<z. (d dvd x + s) = (d dvd x + s)"
- "\<exists>z.\<forall>(x::'a::{linorder,plus,Ring_and_Field.dvd})<z. (\<not> d dvd x + s) = (\<not> d dvd x + s)"
+ "\<exists>z.\<forall>(x::'a::{linorder,plus,Rings.dvd})<z. (d dvd x + s) = (d dvd x + s)"
+ "\<exists>z.\<forall>(x::'a::{linorder,plus,Rings.dvd})<z. (\<not> d dvd x + s) = (\<not> d dvd x + s)"
"\<exists>z.\<forall>x<z. F = F"
by ((erule exE, erule exE,rule_tac x="min z za" in exI,simp)+, (rule_tac x="t" in exI,fastsimp)+) simp_all
@@ -46,8 +46,8 @@
"\<exists>(z ::'a::{linorder}).\<forall>x>z.(x \<le> t) = False"
"\<exists>(z ::'a::{linorder}).\<forall>x>z.(x > t) = True"
"\<exists>(z ::'a::{linorder}).\<forall>x>z.(x \<ge> t) = True"
- "\<exists>z.\<forall>(x::'a::{linorder,plus,Ring_and_Field.dvd})>z. (d dvd x + s) = (d dvd x + s)"
- "\<exists>z.\<forall>(x::'a::{linorder,plus,Ring_and_Field.dvd})>z. (\<not> d dvd x + s) = (\<not> d dvd x + s)"
+ "\<exists>z.\<forall>(x::'a::{linorder,plus,Rings.dvd})>z. (d dvd x + s) = (d dvd x + s)"
+ "\<exists>z.\<forall>(x::'a::{linorder,plus,Rings.dvd})>z. (\<not> d dvd x + s) = (\<not> d dvd x + s)"
"\<exists>z.\<forall>x>z. F = F"
by ((erule exE, erule exE,rule_tac x="max z za" in exI,simp)+,(rule_tac x="t" in exI,fastsimp)+) simp_all
@@ -56,8 +56,8 @@
\<Longrightarrow> \<forall>x k. (P x \<and> Q x) = (P (x - k*D) \<and> Q (x - k*D))"
"\<lbrakk>\<forall>x k. P x = P (x - k*D); \<forall>x k. Q x = Q (x - k*D)\<rbrakk>
\<Longrightarrow> \<forall>x k. (P x \<or> Q x) = (P (x - k*D) \<or> Q (x - k*D))"
- "(d::'a::{comm_ring,Ring_and_Field.dvd}) dvd D \<Longrightarrow> \<forall>x k. (d dvd x + t) = (d dvd (x - k*D) + t)"
- "(d::'a::{comm_ring,Ring_and_Field.dvd}) dvd D \<Longrightarrow> \<forall>x k. (\<not>d dvd x + t) = (\<not>d dvd (x - k*D) + t)"
+ "(d::'a::{comm_ring,Rings.dvd}) dvd D \<Longrightarrow> \<forall>x k. (d dvd x + t) = (d dvd (x - k*D) + t)"
+ "(d::'a::{comm_ring,Rings.dvd}) dvd D \<Longrightarrow> \<forall>x k. (\<not>d dvd x + t) = (\<not>d dvd (x - k*D) + t)"
"\<forall>x k. F = F"
apply (auto elim!: dvdE simp add: algebra_simps)
unfolding mult_assoc [symmetric] left_distrib [symmetric] left_diff_distrib [symmetric]
@@ -243,7 +243,7 @@
{fix x
have "P x \<longrightarrow> P (x - i * d)" using step.hyps by blast
also have "\<dots> \<longrightarrow> P(x - (i + 1) * d)" using minus[THEN spec, of "x - i * d"]
- by (simp add:int_distrib OrderedGroup.diff_diff_eq[symmetric])
+ by (simp add: algebra_simps)
ultimately have "P x \<longrightarrow> P(x - (i + 1) * d)" by blast}
thus ?case ..
qed
@@ -360,7 +360,7 @@
apply(fastsimp)
done
-theorem unity_coeff_ex: "(\<exists>(x::'a::{semiring_0,Ring_and_Field.dvd}). P (l * x)) \<equiv> (\<exists>x. l dvd (x + 0) \<and> P x)"
+theorem unity_coeff_ex: "(\<exists>(x::'a::{semiring_0,Rings.dvd}). P (l * x)) \<equiv> (\<exists>x. l dvd (x + 0) \<and> P x)"
apply (rule eq_reflection [symmetric])
apply (rule iffI)
defer
--- a/src/HOL/Probability/Borel.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Probability/Borel.thy Tue Feb 09 17:06:05 2010 +0100
@@ -73,7 +73,7 @@
with w have "real(Suc(natceiling(inverse(g w - f w)))) > inverse(g w - f w)"
by (metis lessI order_le_less_trans real_natceiling_ge real_of_nat_less_iff) hence "inverse(real(Suc(natceiling(inverse(g w - f w)))))
< inverse(inverse(g w - f w))"
- by (metis less_iff_diff_less_0 less_imp_inverse_less linorder_neqE_ordered_idom nz positive_imp_inverse_positive real_le_antisym real_less_def w)
+ by (metis less_iff_diff_less_0 less_imp_inverse_less linorder_neqE_linordered_idom nz positive_imp_inverse_positive real_le_antisym real_less_def w)
hence "inverse(real(Suc(natceiling(inverse(g w - f w))))) < g w - f w"
by (metis inverse_inverse_eq order_less_le_trans real_le_refl)
thus "\<exists>n. f w \<le> g w - inverse(real(Suc n))" using w
@@ -355,7 +355,7 @@
borel_measurable_add_borel_measurable f g)
have "(\<lambda>x. -((f x + -g x) ^ 2 * inverse 4)) =
(\<lambda>x. 0 + ((f x + -g x) ^ 2 * inverse -4))"
- by (simp add: Ring_and_Field.minus_divide_right)
+ by (simp add: minus_divide_right)
also have "... \<in> borel_measurable M"
by (fast intro: affine_borel_measurable borel_measurable_square
borel_measurable_add_borel_measurable
--- a/src/HOL/Quickcheck.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Quickcheck.thy Tue Feb 09 17:06:05 2010 +0100
@@ -164,7 +164,7 @@
where
"union R1 R2 = (\<lambda>s. let
(P1, s') = R1 s; (P2, s'') = R2 s'
- in (upper_semilattice_class.sup P1 P2, s''))"
+ in (semilattice_sup_class.sup P1 P2, s''))"
definition if_randompred :: "bool \<Rightarrow> unit randompred"
where
--- a/src/HOL/RComplete.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/RComplete.thy Tue Feb 09 17:06:05 2010 +0100
@@ -15,7 +15,7 @@
by simp
lemma abs_diff_less_iff:
- "(\<bar>x - a\<bar> < (r::'a::ordered_idom)) = (a - r < x \<and> x < a + r)"
+ "(\<bar>x - a\<bar> < (r::'a::linordered_idom)) = (a - r < x \<and> x < a + r)"
by auto
subsection {* Completeness of Positive Reals *}
--- a/src/HOL/Rational.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Rational.thy Tue Feb 09 17:06:05 2010 +0100
@@ -613,7 +613,7 @@
end
-instance rat :: ordered_field
+instance rat :: linordered_field
proof
fix q r s :: rat
show "q \<le> r ==> s + q \<le> s + r"
@@ -760,7 +760,7 @@
class field_char_0 = field + ring_char_0
-subclass (in ordered_field) field_char_0 ..
+subclass (in linordered_field) field_char_0 ..
context field_char_0
begin
@@ -832,7 +832,7 @@
done
lemma of_rat_less:
- "(of_rat r :: 'a::ordered_field) < of_rat s \<longleftrightarrow> r < s"
+ "(of_rat r :: 'a::linordered_field) < of_rat s \<longleftrightarrow> r < s"
proof (induct r, induct s)
fix a b c d :: int
assume not_zero: "b > 0" "d > 0"
@@ -841,14 +841,14 @@
"(of_int a :: 'a) / of_int b < of_int c / of_int d
\<longleftrightarrow> (of_int a :: 'a) * of_int d < of_int c * of_int b"
using not_zero by (simp add: pos_less_divide_eq pos_divide_less_eq)
- show "(of_rat (Fract a b) :: 'a::ordered_field) < of_rat (Fract c d)
+ show "(of_rat (Fract a b) :: 'a::linordered_field) < of_rat (Fract c d)
\<longleftrightarrow> Fract a b < Fract c d"
using not_zero `b * d > 0`
by (simp add: of_rat_rat of_int_divide_less_eq of_int_mult [symmetric] del: of_int_mult)
qed
lemma of_rat_less_eq:
- "(of_rat r :: 'a::ordered_field) \<le> of_rat s \<longleftrightarrow> r \<le> s"
+ "(of_rat r :: 'a::linordered_field) \<le> of_rat s \<longleftrightarrow> r \<le> s"
unfolding le_less by (auto simp add: of_rat_less)
lemmas of_rat_eq_0_iff [simp] = of_rat_eq_iff [of _ 0, simplified]
@@ -1083,11 +1083,11 @@
finally show ?thesis using assms by simp
qed
-lemma (in ordered_idom) sgn_greater [simp]:
+lemma (in linordered_idom) sgn_greater [simp]:
"0 < sgn a \<longleftrightarrow> 0 < a"
unfolding sgn_if by auto
-lemma (in ordered_idom) sgn_less [simp]:
+lemma (in linordered_idom) sgn_less [simp]:
"sgn a < 0 \<longleftrightarrow> a < 0"
unfolding sgn_if by auto
--- a/src/HOL/Real.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Real.thy Tue Feb 09 17:06:05 2010 +0100
@@ -3,7 +3,7 @@
begin
lemma field_le_epsilon:
- fixes x y :: "'a:: {number_ring,division_by_zero,ordered_field}"
+ fixes x y :: "'a:: {number_ring,division_by_zero,linordered_field}"
assumes e: "(!!e. 0 < e ==> x \<le> y + e)"
shows "x \<le> y"
proof (rule ccontr)
--- a/src/HOL/RealDef.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/RealDef.thy Tue Feb 09 17:06:05 2010 +0100
@@ -416,7 +416,7 @@
subsection{*The Reals Form an Ordered Field*}
-instance real :: ordered_field
+instance real :: linordered_field
proof
fix x y z :: real
show "x \<le> y ==> z + x \<le> z + y" by (rule real_add_left_mono)
@@ -426,8 +426,6 @@
by (simp only: real_sgn_def)
qed
-instance real :: lordered_ab_group_add ..
-
text{*The function @{term real_of_preal} requires many proofs, but it seems
to be essential for proving completeness of the reals from that of the
positive reals.*}
@@ -523,7 +521,7 @@
lemma real_mult_less_iff1 [simp]: "(0::real) < z ==> (x*z < y*z) = (x < y)"
by (force elim: order_less_asym
- simp add: Ring_and_Field.mult_less_cancel_right)
+ simp add: mult_less_cancel_right)
lemma real_mult_le_cancel_iff1 [simp]: "(0::real) < z ==> (x*z \<le> y*z) = (x\<le>y)"
apply (simp add: mult_le_cancel_right)
@@ -1017,7 +1015,7 @@
done
-text{*Similar results are proved in @{text Ring_and_Field}*}
+text{*Similar results are proved in @{text Fields}*}
lemma real_less_half_sum: "x < y ==> x < (x+y) / (2::real)"
by auto
@@ -1032,7 +1030,7 @@
(* FIXME: redundant, but used by Integration/RealRandVar.thy in AFP *)
lemma abs_le_interval_iff: "(abs x \<le> r) = (-r\<le>x & x\<le>(r::real))"
-by (force simp add: OrderedGroup.abs_le_iff)
+by (force simp add: abs_le_iff)
lemma abs_add_one_gt_zero [simp]: "(0::real) < 1 + abs(x)"
by (simp add: abs_if)
@@ -1046,13 +1044,6 @@
lemma abs_sum_triangle_ineq: "abs ((x::real) + y + (-l + -m)) \<le> abs(x + -l) + abs(y + -m)"
by simp
-instance real :: lordered_ring
-proof
- fix a::real
- show "abs a = sup a (-a)"
- by (auto simp add: real_abs_def sup_real_def)
-qed
-
subsection {* Implementation of rational real numbers *}
--- a/src/HOL/Ring_and_Field.thy Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2391 +0,0 @@
-(* Title: HOL/Ring_and_Field.thy
- Author: Gertrud Bauer
- Author: Steven Obua
- Author: Tobias Nipkow
- Author: Lawrence C Paulson
- Author: Markus Wenzel
- Author: Jeremy Avigad
-*)
-
-header {* (Ordered) Rings and Fields *}
-
-theory Ring_and_Field
-imports OrderedGroup
-begin
-
-text {*
- The theory of partially ordered rings is taken from the books:
- \begin{itemize}
- \item \emph{Lattice Theory} by Garret Birkhoff, American Mathematical Society 1979
- \item \emph{Partially Ordered Algebraic Systems}, Pergamon Press 1963
- \end{itemize}
- Most of the used notions can also be looked up in
- \begin{itemize}
- \item \url{http://www.mathworld.com} by Eric Weisstein et. al.
- \item \emph{Algebra I} by van der Waerden, Springer.
- \end{itemize}
-*}
-
-class semiring = ab_semigroup_add + semigroup_mult +
- assumes left_distrib[algebra_simps]: "(a + b) * c = a * c + b * c"
- assumes right_distrib[algebra_simps]: "a * (b + c) = a * b + a * c"
-begin
-
-text{*For the @{text combine_numerals} simproc*}
-lemma combine_common_factor:
- "a * e + (b * e + c) = (a + b) * e + c"
-by (simp add: left_distrib add_ac)
-
-end
-
-class mult_zero = times + zero +
- assumes mult_zero_left [simp]: "0 * a = 0"
- assumes mult_zero_right [simp]: "a * 0 = 0"
-
-class semiring_0 = semiring + comm_monoid_add + mult_zero
-
-class semiring_0_cancel = semiring + cancel_comm_monoid_add
-begin
-
-subclass semiring_0
-proof
- fix a :: 'a
- have "0 * a + 0 * a = 0 * a + 0" by (simp add: left_distrib [symmetric])
- thus "0 * a = 0" by (simp only: add_left_cancel)
-next
- fix a :: 'a
- have "a * 0 + a * 0 = a * 0 + 0" by (simp add: right_distrib [symmetric])
- thus "a * 0 = 0" by (simp only: add_left_cancel)
-qed
-
-end
-
-class comm_semiring = ab_semigroup_add + ab_semigroup_mult +
- assumes distrib: "(a + b) * c = a * c + b * c"
-begin
-
-subclass semiring
-proof
- fix a b c :: 'a
- show "(a + b) * c = a * c + b * c" by (simp add: distrib)
- have "a * (b + c) = (b + c) * a" by (simp add: mult_ac)
- also have "... = b * a + c * a" by (simp only: distrib)
- also have "... = a * b + a * c" by (simp add: mult_ac)
- finally show "a * (b + c) = a * b + a * c" by blast
-qed
-
-end
-
-class comm_semiring_0 = comm_semiring + comm_monoid_add + mult_zero
-begin
-
-subclass semiring_0 ..
-
-end
-
-class comm_semiring_0_cancel = comm_semiring + cancel_comm_monoid_add
-begin
-
-subclass semiring_0_cancel ..
-
-subclass comm_semiring_0 ..
-
-end
-
-class zero_neq_one = zero + one +
- assumes zero_neq_one [simp]: "0 \<noteq> 1"
-begin
-
-lemma one_neq_zero [simp]: "1 \<noteq> 0"
-by (rule not_sym) (rule zero_neq_one)
-
-end
-
-class semiring_1 = zero_neq_one + semiring_0 + monoid_mult
-
-text {* Abstract divisibility *}
-
-class dvd = times
-begin
-
-definition dvd :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl "dvd" 50) where
- [code del]: "b dvd a \<longleftrightarrow> (\<exists>k. a = b * k)"
-
-lemma dvdI [intro?]: "a = b * k \<Longrightarrow> b dvd a"
- unfolding dvd_def ..
-
-lemma dvdE [elim?]: "b dvd a \<Longrightarrow> (\<And>k. a = b * k \<Longrightarrow> P) \<Longrightarrow> P"
- unfolding dvd_def by blast
-
-end
-
-class comm_semiring_1 = zero_neq_one + comm_semiring_0 + comm_monoid_mult + dvd
- (*previously almost_semiring*)
-begin
-
-subclass semiring_1 ..
-
-lemma dvd_refl[simp]: "a dvd a"
-proof
- show "a = a * 1" by simp
-qed
-
-lemma dvd_trans:
- assumes "a dvd b" and "b dvd c"
- shows "a dvd c"
-proof -
- from assms obtain v where "b = a * v" by (auto elim!: dvdE)
- moreover from assms obtain w where "c = b * w" by (auto elim!: dvdE)
- ultimately have "c = a * (v * w)" by (simp add: mult_assoc)
- then show ?thesis ..
-qed
-
-lemma dvd_0_left_iff [noatp, simp]: "0 dvd a \<longleftrightarrow> a = 0"
-by (auto intro: dvd_refl elim!: dvdE)
-
-lemma dvd_0_right [iff]: "a dvd 0"
-proof
- show "0 = a * 0" by simp
-qed
-
-lemma one_dvd [simp]: "1 dvd a"
-by (auto intro!: dvdI)
-
-lemma dvd_mult[simp]: "a dvd c \<Longrightarrow> a dvd (b * c)"
-by (auto intro!: mult_left_commute dvdI elim!: dvdE)
-
-lemma dvd_mult2[simp]: "a dvd b \<Longrightarrow> a dvd (b * c)"
- apply (subst mult_commute)
- apply (erule dvd_mult)
- done
-
-lemma dvd_triv_right [simp]: "a dvd b * a"
-by (rule dvd_mult) (rule dvd_refl)
-
-lemma dvd_triv_left [simp]: "a dvd a * b"
-by (rule dvd_mult2) (rule dvd_refl)
-
-lemma mult_dvd_mono:
- assumes "a dvd b"
- and "c dvd d"
- shows "a * c dvd b * d"
-proof -
- from `a dvd b` obtain b' where "b = a * b'" ..
- moreover from `c dvd d` obtain d' where "d = c * d'" ..
- ultimately have "b * d = (a * c) * (b' * d')" by (simp add: mult_ac)
- then show ?thesis ..
-qed
-
-lemma dvd_mult_left: "a * b dvd c \<Longrightarrow> a dvd c"
-by (simp add: dvd_def mult_assoc, blast)
-
-lemma dvd_mult_right: "a * b dvd c \<Longrightarrow> b dvd c"
- unfolding mult_ac [of a] by (rule dvd_mult_left)
-
-lemma dvd_0_left: "0 dvd a \<Longrightarrow> a = 0"
-by simp
-
-lemma dvd_add[simp]:
- assumes "a dvd b" and "a dvd c" shows "a dvd (b + c)"
-proof -
- from `a dvd b` obtain b' where "b = a * b'" ..
- moreover from `a dvd c` obtain c' where "c = a * c'" ..
- ultimately have "b + c = a * (b' + c')" by (simp add: right_distrib)
- then show ?thesis ..
-qed
-
-end
-
-
-class no_zero_divisors = zero + times +
- assumes no_zero_divisors: "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> a * b \<noteq> 0"
-
-class semiring_1_cancel = semiring + cancel_comm_monoid_add
- + zero_neq_one + monoid_mult
-begin
-
-subclass semiring_0_cancel ..
-
-subclass semiring_1 ..
-
-end
-
-class comm_semiring_1_cancel = comm_semiring + cancel_comm_monoid_add
- + zero_neq_one + comm_monoid_mult
-begin
-
-subclass semiring_1_cancel ..
-subclass comm_semiring_0_cancel ..
-subclass comm_semiring_1 ..
-
-end
-
-class ring = semiring + ab_group_add
-begin
-
-subclass semiring_0_cancel ..
-
-text {* Distribution rules *}
-
-lemma minus_mult_left: "- (a * b) = - a * b"
-by (rule minus_unique) (simp add: left_distrib [symmetric])
-
-lemma minus_mult_right: "- (a * b) = a * - b"
-by (rule minus_unique) (simp add: right_distrib [symmetric])
-
-text{*Extract signs from products*}
-lemmas mult_minus_left [simp, noatp] = minus_mult_left [symmetric]
-lemmas mult_minus_right [simp,noatp] = minus_mult_right [symmetric]
-
-lemma minus_mult_minus [simp]: "- a * - b = a * b"
-by simp
-
-lemma minus_mult_commute: "- a * b = a * - b"
-by simp
-
-lemma right_diff_distrib[algebra_simps]: "a * (b - c) = a * b - a * c"
-by (simp add: right_distrib diff_minus)
-
-lemma left_diff_distrib[algebra_simps]: "(a - b) * c = a * c - b * c"
-by (simp add: left_distrib diff_minus)
-
-lemmas ring_distribs[noatp] =
- right_distrib left_distrib left_diff_distrib right_diff_distrib
-
-text{*Legacy - use @{text algebra_simps} *}
-lemmas ring_simps[noatp] = algebra_simps
-
-lemma eq_add_iff1:
- "a * e + c = b * e + d \<longleftrightarrow> (a - b) * e + c = d"
-by (simp add: algebra_simps)
-
-lemma eq_add_iff2:
- "a * e + c = b * e + d \<longleftrightarrow> c = (b - a) * e + d"
-by (simp add: algebra_simps)
-
-end
-
-lemmas ring_distribs[noatp] =
- right_distrib left_distrib left_diff_distrib right_diff_distrib
-
-class comm_ring = comm_semiring + ab_group_add
-begin
-
-subclass ring ..
-subclass comm_semiring_0_cancel ..
-
-end
-
-class ring_1 = ring + zero_neq_one + monoid_mult
-begin
-
-subclass semiring_1_cancel ..
-
-end
-
-class comm_ring_1 = comm_ring + zero_neq_one + comm_monoid_mult
- (*previously ring*)
-begin
-
-subclass ring_1 ..
-subclass comm_semiring_1_cancel ..
-
-lemma dvd_minus_iff [simp]: "x dvd - y \<longleftrightarrow> x dvd y"
-proof
- assume "x dvd - y"
- then have "x dvd - 1 * - y" by (rule dvd_mult)
- then show "x dvd y" by simp
-next
- assume "x dvd y"
- then have "x dvd - 1 * y" by (rule dvd_mult)
- then show "x dvd - y" by simp
-qed
-
-lemma minus_dvd_iff [simp]: "- x dvd y \<longleftrightarrow> x dvd y"
-proof
- assume "- x dvd y"
- then obtain k where "y = - x * k" ..
- then have "y = x * - k" by simp
- then show "x dvd y" ..
-next
- assume "x dvd y"
- then obtain k where "y = x * k" ..
- then have "y = - x * - k" by simp
- then show "- x dvd y" ..
-qed
-
-lemma dvd_diff[simp]: "x dvd y \<Longrightarrow> x dvd z \<Longrightarrow> x dvd (y - z)"
-by (simp add: diff_minus dvd_minus_iff)
-
-end
-
-class ring_no_zero_divisors = ring + no_zero_divisors
-begin
-
-lemma mult_eq_0_iff [simp]:
- shows "a * b = 0 \<longleftrightarrow> (a = 0 \<or> b = 0)"
-proof (cases "a = 0 \<or> b = 0")
- case False then have "a \<noteq> 0" and "b \<noteq> 0" by auto
- then show ?thesis using no_zero_divisors by simp
-next
- case True then show ?thesis by auto
-qed
-
-text{*Cancellation of equalities with a common factor*}
-lemma mult_cancel_right [simp, noatp]:
- "a * c = b * c \<longleftrightarrow> c = 0 \<or> a = b"
-proof -
- have "(a * c = b * c) = ((a - b) * c = 0)"
- by (simp add: algebra_simps right_minus_eq)
- thus ?thesis by (simp add: disj_commute right_minus_eq)
-qed
-
-lemma mult_cancel_left [simp, noatp]:
- "c * a = c * b \<longleftrightarrow> c = 0 \<or> a = b"
-proof -
- have "(c * a = c * b) = (c * (a - b) = 0)"
- by (simp add: algebra_simps right_minus_eq)
- thus ?thesis by (simp add: right_minus_eq)
-qed
-
-end
-
-class ring_1_no_zero_divisors = ring_1 + ring_no_zero_divisors
-begin
-
-lemma mult_cancel_right1 [simp]:
- "c = b * c \<longleftrightarrow> c = 0 \<or> b = 1"
-by (insert mult_cancel_right [of 1 c b], force)
-
-lemma mult_cancel_right2 [simp]:
- "a * c = c \<longleftrightarrow> c = 0 \<or> a = 1"
-by (insert mult_cancel_right [of a c 1], simp)
-
-lemma mult_cancel_left1 [simp]:
- "c = c * b \<longleftrightarrow> c = 0 \<or> b = 1"
-by (insert mult_cancel_left [of c 1 b], force)
-
-lemma mult_cancel_left2 [simp]:
- "c * a = c \<longleftrightarrow> c = 0 \<or> a = 1"
-by (insert mult_cancel_left [of c a 1], simp)
-
-end
-
-class idom = comm_ring_1 + no_zero_divisors
-begin
-
-subclass ring_1_no_zero_divisors ..
-
-lemma square_eq_iff: "a * a = b * b \<longleftrightarrow> (a = b \<or> a = - b)"
-proof
- assume "a * a = b * b"
- then have "(a - b) * (a + b) = 0"
- by (simp add: algebra_simps)
- then show "a = b \<or> a = - b"
- by (simp add: right_minus_eq eq_neg_iff_add_eq_0)
-next
- assume "a = b \<or> a = - b"
- then show "a * a = b * b" by auto
-qed
-
-lemma dvd_mult_cancel_right [simp]:
- "a * c dvd b * c \<longleftrightarrow> c = 0 \<or> a dvd b"
-proof -
- have "a * c dvd b * c \<longleftrightarrow> (\<exists>k. b * c = (a * k) * c)"
- unfolding dvd_def by (simp add: mult_ac)
- also have "(\<exists>k. b * c = (a * k) * c) \<longleftrightarrow> c = 0 \<or> a dvd b"
- unfolding dvd_def by simp
- finally show ?thesis .
-qed
-
-lemma dvd_mult_cancel_left [simp]:
- "c * a dvd c * b \<longleftrightarrow> c = 0 \<or> a dvd b"
-proof -
- have "c * a dvd c * b \<longleftrightarrow> (\<exists>k. b * c = (a * k) * c)"
- unfolding dvd_def by (simp add: mult_ac)
- also have "(\<exists>k. b * c = (a * k) * c) \<longleftrightarrow> c = 0 \<or> a dvd b"
- unfolding dvd_def by simp
- finally show ?thesis .
-qed
-
-end
-
-class division_ring = ring_1 + inverse +
- assumes left_inverse [simp]: "a \<noteq> 0 \<Longrightarrow> inverse a * a = 1"
- assumes right_inverse [simp]: "a \<noteq> 0 \<Longrightarrow> a * inverse a = 1"
-begin
-
-subclass ring_1_no_zero_divisors
-proof
- fix a b :: 'a
- assume a: "a \<noteq> 0" and b: "b \<noteq> 0"
- show "a * b \<noteq> 0"
- proof
- assume ab: "a * b = 0"
- hence "0 = inverse a * (a * b) * inverse b" by simp
- also have "\<dots> = (inverse a * a) * (b * inverse b)"
- by (simp only: mult_assoc)
- also have "\<dots> = 1" using a b by simp
- finally show False by simp
- qed
-qed
-
-lemma nonzero_imp_inverse_nonzero:
- "a \<noteq> 0 \<Longrightarrow> inverse a \<noteq> 0"
-proof
- assume ianz: "inverse a = 0"
- assume "a \<noteq> 0"
- hence "1 = a * inverse a" by simp
- also have "... = 0" by (simp add: ianz)
- finally have "1 = 0" .
- thus False by (simp add: eq_commute)
-qed
-
-lemma inverse_zero_imp_zero:
- "inverse a = 0 \<Longrightarrow> a = 0"
-apply (rule classical)
-apply (drule nonzero_imp_inverse_nonzero)
-apply auto
-done
-
-lemma inverse_unique:
- assumes ab: "a * b = 1"
- shows "inverse a = b"
-proof -
- have "a \<noteq> 0" using ab by (cases "a = 0") simp_all
- moreover have "inverse a * (a * b) = inverse a" by (simp add: ab)
- ultimately show ?thesis by (simp add: mult_assoc [symmetric])
-qed
-
-lemma nonzero_inverse_minus_eq:
- "a \<noteq> 0 \<Longrightarrow> inverse (- a) = - inverse a"
-by (rule inverse_unique) simp
-
-lemma nonzero_inverse_inverse_eq:
- "a \<noteq> 0 \<Longrightarrow> inverse (inverse a) = a"
-by (rule inverse_unique) simp
-
-lemma nonzero_inverse_eq_imp_eq:
- assumes "inverse a = inverse b" and "a \<noteq> 0" and "b \<noteq> 0"
- shows "a = b"
-proof -
- from `inverse a = inverse b`
- have "inverse (inverse a) = inverse (inverse b)" by (rule arg_cong)
- with `a \<noteq> 0` and `b \<noteq> 0` show "a = b"
- by (simp add: nonzero_inverse_inverse_eq)
-qed
-
-lemma inverse_1 [simp]: "inverse 1 = 1"
-by (rule inverse_unique) simp
-
-lemma nonzero_inverse_mult_distrib:
- assumes "a \<noteq> 0" and "b \<noteq> 0"
- shows "inverse (a * b) = inverse b * inverse a"
-proof -
- have "a * (b * inverse b) * inverse a = 1" using assms by simp
- hence "a * b * (inverse b * inverse a) = 1" by (simp only: mult_assoc)
- thus ?thesis by (rule inverse_unique)
-qed
-
-lemma division_ring_inverse_add:
- "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a + inverse b = inverse a * (a + b) * inverse b"
-by (simp add: algebra_simps)
-
-lemma division_ring_inverse_diff:
- "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a - inverse b = inverse a * (b - a) * inverse b"
-by (simp add: algebra_simps)
-
-end
-
-class field = comm_ring_1 + inverse +
- assumes field_inverse: "a \<noteq> 0 \<Longrightarrow> inverse a * a = 1"
- assumes divide_inverse: "a / b = a * inverse b"
-begin
-
-subclass division_ring
-proof
- fix a :: 'a
- assume "a \<noteq> 0"
- thus "inverse a * a = 1" by (rule field_inverse)
- thus "a * inverse a = 1" by (simp only: mult_commute)
-qed
-
-subclass idom ..
-
-lemma right_inverse_eq: "b \<noteq> 0 \<Longrightarrow> a / b = 1 \<longleftrightarrow> a = b"
-proof
- assume neq: "b \<noteq> 0"
- {
- hence "a = (a / b) * b" by (simp add: divide_inverse mult_ac)
- also assume "a / b = 1"
- finally show "a = b" by simp
- next
- assume "a = b"
- with neq show "a / b = 1" by (simp add: divide_inverse)
- }
-qed
-
-lemma nonzero_inverse_eq_divide: "a \<noteq> 0 \<Longrightarrow> inverse a = 1 / a"
-by (simp add: divide_inverse)
-
-lemma divide_self [simp]: "a \<noteq> 0 \<Longrightarrow> a / a = 1"
-by (simp add: divide_inverse)
-
-lemma divide_zero_left [simp]: "0 / a = 0"
-by (simp add: divide_inverse)
-
-lemma inverse_eq_divide: "inverse a = 1 / a"
-by (simp add: divide_inverse)
-
-lemma add_divide_distrib: "(a+b) / c = a/c + b/c"
-by (simp add: divide_inverse algebra_simps)
-
-text{*There is no slick version using division by zero.*}
-lemma inverse_add:
- "[| a \<noteq> 0; b \<noteq> 0 |]
- ==> inverse a + inverse b = (a + b) * inverse a * inverse b"
-by (simp add: division_ring_inverse_add mult_ac)
-
-lemma nonzero_mult_divide_mult_cancel_left [simp, noatp]:
-assumes [simp]: "b\<noteq>0" and [simp]: "c\<noteq>0" shows "(c*a)/(c*b) = a/b"
-proof -
- have "(c*a)/(c*b) = c * a * (inverse b * inverse c)"
- by (simp add: divide_inverse nonzero_inverse_mult_distrib)
- also have "... = a * inverse b * (inverse c * c)"
- by (simp only: mult_ac)
- also have "... = a * inverse b" by simp
- finally show ?thesis by (simp add: divide_inverse)
-qed
-
-lemma nonzero_mult_divide_mult_cancel_right [simp, noatp]:
- "\<lbrakk>b \<noteq> 0; c \<noteq> 0\<rbrakk> \<Longrightarrow> (a * c) / (b * c) = a / b"
-by (simp add: mult_commute [of _ c])
-
-lemma divide_1 [simp]: "a / 1 = a"
-by (simp add: divide_inverse)
-
-lemma times_divide_eq_right: "a * (b / c) = (a * b) / c"
-by (simp add: divide_inverse mult_assoc)
-
-lemma times_divide_eq_left: "(b / c) * a = (b * a) / c"
-by (simp add: divide_inverse mult_ac)
-
-text {* These are later declared as simp rules. *}
-lemmas times_divide_eq [noatp] = times_divide_eq_right times_divide_eq_left
-
-lemma add_frac_eq:
- assumes "y \<noteq> 0" and "z \<noteq> 0"
- shows "x / y + w / z = (x * z + w * y) / (y * z)"
-proof -
- have "x / y + w / z = (x * z) / (y * z) + (y * w) / (y * z)"
- using assms by simp
- also have "\<dots> = (x * z + y * w) / (y * z)"
- by (simp only: add_divide_distrib)
- finally show ?thesis
- by (simp only: mult_commute)
-qed
-
-text{*Special Cancellation Simprules for Division*}
-
-lemma nonzero_mult_divide_cancel_right [simp, noatp]:
- "b \<noteq> 0 \<Longrightarrow> a * b / b = a"
-using nonzero_mult_divide_mult_cancel_right [of 1 b a] by simp
-
-lemma nonzero_mult_divide_cancel_left [simp, noatp]:
- "a \<noteq> 0 \<Longrightarrow> a * b / a = b"
-using nonzero_mult_divide_mult_cancel_left [of 1 a b] by simp
-
-lemma nonzero_divide_mult_cancel_right [simp, noatp]:
- "\<lbrakk>a \<noteq> 0; b \<noteq> 0\<rbrakk> \<Longrightarrow> b / (a * b) = 1 / a"
-using nonzero_mult_divide_mult_cancel_right [of a b 1] by simp
-
-lemma nonzero_divide_mult_cancel_left [simp, noatp]:
- "\<lbrakk>a \<noteq> 0; b \<noteq> 0\<rbrakk> \<Longrightarrow> a / (a * b) = 1 / b"
-using nonzero_mult_divide_mult_cancel_left [of b a 1] by simp
-
-lemma nonzero_mult_divide_mult_cancel_left2 [simp, noatp]:
- "\<lbrakk>b \<noteq> 0; c \<noteq> 0\<rbrakk> \<Longrightarrow> (c * a) / (b * c) = a / b"
-using nonzero_mult_divide_mult_cancel_left [of b c a] by (simp add: mult_ac)
-
-lemma nonzero_mult_divide_mult_cancel_right2 [simp, noatp]:
- "\<lbrakk>b \<noteq> 0; c \<noteq> 0\<rbrakk> \<Longrightarrow> (a * c) / (c * b) = a / b"
-using nonzero_mult_divide_mult_cancel_right [of b c a] by (simp add: mult_ac)
-
-lemma minus_divide_left: "- (a / b) = (-a) / b"
-by (simp add: divide_inverse)
-
-lemma nonzero_minus_divide_right: "b \<noteq> 0 ==> - (a / b) = a / (- b)"
-by (simp add: divide_inverse nonzero_inverse_minus_eq)
-
-lemma nonzero_minus_divide_divide: "b \<noteq> 0 ==> (-a) / (-b) = a / b"
-by (simp add: divide_inverse nonzero_inverse_minus_eq)
-
-lemma divide_minus_left [simp, noatp]: "(-a) / b = - (a / b)"
-by (simp add: divide_inverse)
-
-lemma diff_divide_distrib: "(a - b) / c = a / c - b / c"
-by (simp add: diff_minus add_divide_distrib)
-
-lemma add_divide_eq_iff:
- "z \<noteq> 0 \<Longrightarrow> x + y / z = (z * x + y) / z"
-by (simp add: add_divide_distrib)
-
-lemma divide_add_eq_iff:
- "z \<noteq> 0 \<Longrightarrow> x / z + y = (x + z * y) / z"
-by (simp add: add_divide_distrib)
-
-lemma diff_divide_eq_iff:
- "z \<noteq> 0 \<Longrightarrow> x - y / z = (z * x - y) / z"
-by (simp add: diff_divide_distrib)
-
-lemma divide_diff_eq_iff:
- "z \<noteq> 0 \<Longrightarrow> x / z - y = (x - z * y) / z"
-by (simp add: diff_divide_distrib)
-
-lemma nonzero_eq_divide_eq: "c \<noteq> 0 \<Longrightarrow> a = b / c \<longleftrightarrow> a * c = b"
-proof -
- assume [simp]: "c \<noteq> 0"
- have "a = b / c \<longleftrightarrow> a * c = (b / c) * c" by simp
- also have "... \<longleftrightarrow> a * c = b" by (simp add: divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma nonzero_divide_eq_eq: "c \<noteq> 0 \<Longrightarrow> b / c = a \<longleftrightarrow> b = a * c"
-proof -
- assume [simp]: "c \<noteq> 0"
- have "b / c = a \<longleftrightarrow> (b / c) * c = a * c" by simp
- also have "... \<longleftrightarrow> b = a * c" by (simp add: divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma divide_eq_imp: "c \<noteq> 0 \<Longrightarrow> b = a * c \<Longrightarrow> b / c = a"
-by simp
-
-lemma eq_divide_imp: "c \<noteq> 0 \<Longrightarrow> a * c = b \<Longrightarrow> a = b / c"
-by (erule subst, simp)
-
-lemmas field_eq_simps[noatp] = algebra_simps
- (* pull / out*)
- add_divide_eq_iff divide_add_eq_iff
- diff_divide_eq_iff divide_diff_eq_iff
- (* multiply eqn *)
- nonzero_eq_divide_eq nonzero_divide_eq_eq
-(* is added later:
- times_divide_eq_left times_divide_eq_right
-*)
-
-text{*An example:*}
-lemma "\<lbrakk>a\<noteq>b; c\<noteq>d; e\<noteq>f\<rbrakk> \<Longrightarrow> ((a-b)*(c-d)*(e-f))/((c-d)*(e-f)*(a-b)) = 1"
-apply(subgoal_tac "(c-d)*(e-f)*(a-b) \<noteq> 0")
- apply(simp add:field_eq_simps)
-apply(simp)
-done
-
-lemma diff_frac_eq:
- "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> x / y - w / z = (x * z - w * y) / (y * z)"
-by (simp add: field_eq_simps times_divide_eq)
-
-lemma frac_eq_eq:
- "y \<noteq> 0 \<Longrightarrow> z \<noteq> 0 \<Longrightarrow> (x / y = w / z) = (x * z = w * y)"
-by (simp add: field_eq_simps times_divide_eq)
-
-end
-
-class division_by_zero = zero + inverse +
- assumes inverse_zero [simp]: "inverse 0 = 0"
-
-lemma divide_zero [simp]:
- "a / 0 = (0::'a::{field,division_by_zero})"
-by (simp add: divide_inverse)
-
-lemma divide_self_if [simp]:
- "a / (a::'a::{field,division_by_zero}) = (if a=0 then 0 else 1)"
-by simp
-
-class mult_mono = times + zero + ord +
- assumes mult_left_mono: "a \<le> b \<Longrightarrow> 0 \<le> c \<Longrightarrow> c * a \<le> c * b"
- assumes mult_right_mono: "a \<le> b \<Longrightarrow> 0 \<le> c \<Longrightarrow> a * c \<le> b * c"
-
-class pordered_semiring = mult_mono + semiring_0 + pordered_ab_semigroup_add
-begin
-
-lemma mult_mono:
- "a \<le> b \<Longrightarrow> c \<le> d \<Longrightarrow> 0 \<le> b \<Longrightarrow> 0 \<le> c
- \<Longrightarrow> a * c \<le> b * d"
-apply (erule mult_right_mono [THEN order_trans], assumption)
-apply (erule mult_left_mono, assumption)
-done
-
-lemma mult_mono':
- "a \<le> b \<Longrightarrow> c \<le> d \<Longrightarrow> 0 \<le> a \<Longrightarrow> 0 \<le> c
- \<Longrightarrow> a * c \<le> b * d"
-apply (rule mult_mono)
-apply (fast intro: order_trans)+
-done
-
-end
-
-class pordered_cancel_semiring = mult_mono + pordered_ab_semigroup_add
- + semiring + cancel_comm_monoid_add
-begin
-
-subclass semiring_0_cancel ..
-subclass pordered_semiring ..
-
-lemma mult_nonneg_nonneg: "0 \<le> a \<Longrightarrow> 0 \<le> b \<Longrightarrow> 0 \<le> a * b"
-using mult_left_mono [of zero b a] by simp
-
-lemma mult_nonneg_nonpos: "0 \<le> a \<Longrightarrow> b \<le> 0 \<Longrightarrow> a * b \<le> 0"
-using mult_left_mono [of b zero a] by simp
-
-lemma mult_nonpos_nonneg: "a \<le> 0 \<Longrightarrow> 0 \<le> b \<Longrightarrow> a * b \<le> 0"
-using mult_right_mono [of a zero b] by simp
-
-text {* Legacy - use @{text mult_nonpos_nonneg} *}
-lemma mult_nonneg_nonpos2: "0 \<le> a \<Longrightarrow> b \<le> 0 \<Longrightarrow> b * a \<le> 0"
-by (drule mult_right_mono [of b zero], auto)
-
-lemma split_mult_neg_le: "(0 \<le> a & b \<le> 0) | (a \<le> 0 & 0 \<le> b) \<Longrightarrow> a * b \<le> 0"
-by (auto simp add: mult_nonneg_nonpos mult_nonneg_nonpos2)
-
-end
-
-class ordered_semiring = semiring + comm_monoid_add + ordered_cancel_ab_semigroup_add + mult_mono
-begin
-
-subclass pordered_cancel_semiring ..
-
-subclass pordered_comm_monoid_add ..
-
-lemma mult_left_less_imp_less:
- "c * a < c * b \<Longrightarrow> 0 \<le> c \<Longrightarrow> a < b"
-by (force simp add: mult_left_mono not_le [symmetric])
-
-lemma mult_right_less_imp_less:
- "a * c < b * c \<Longrightarrow> 0 \<le> c \<Longrightarrow> a < b"
-by (force simp add: mult_right_mono not_le [symmetric])
-
-end
-
-class ordered_semiring_1 = ordered_semiring + semiring_1
-
-class ordered_semiring_strict = semiring + comm_monoid_add + ordered_cancel_ab_semigroup_add +
- assumes mult_strict_left_mono: "a < b \<Longrightarrow> 0 < c \<Longrightarrow> c * a < c * b"
- assumes mult_strict_right_mono: "a < b \<Longrightarrow> 0 < c \<Longrightarrow> a * c < b * c"
-begin
-
-subclass semiring_0_cancel ..
-
-subclass ordered_semiring
-proof
- fix a b c :: 'a
- assume A: "a \<le> b" "0 \<le> c"
- from A show "c * a \<le> c * b"
- unfolding le_less
- using mult_strict_left_mono by (cases "c = 0") auto
- from A show "a * c \<le> b * c"
- unfolding le_less
- using mult_strict_right_mono by (cases "c = 0") auto
-qed
-
-lemma mult_left_le_imp_le:
- "c * a \<le> c * b \<Longrightarrow> 0 < c \<Longrightarrow> a \<le> b"
-by (force simp add: mult_strict_left_mono _not_less [symmetric])
-
-lemma mult_right_le_imp_le:
- "a * c \<le> b * c \<Longrightarrow> 0 < c \<Longrightarrow> a \<le> b"
-by (force simp add: mult_strict_right_mono not_less [symmetric])
-
-lemma mult_pos_pos: "0 < a \<Longrightarrow> 0 < b \<Longrightarrow> 0 < a * b"
-using mult_strict_left_mono [of zero b a] by simp
-
-lemma mult_pos_neg: "0 < a \<Longrightarrow> b < 0 \<Longrightarrow> a * b < 0"
-using mult_strict_left_mono [of b zero a] by simp
-
-lemma mult_neg_pos: "a < 0 \<Longrightarrow> 0 < b \<Longrightarrow> a * b < 0"
-using mult_strict_right_mono [of a zero b] by simp
-
-text {* Legacy - use @{text mult_neg_pos} *}
-lemma mult_pos_neg2: "0 < a \<Longrightarrow> b < 0 \<Longrightarrow> b * a < 0"
-by (drule mult_strict_right_mono [of b zero], auto)
-
-lemma zero_less_mult_pos:
- "0 < a * b \<Longrightarrow> 0 < a \<Longrightarrow> 0 < b"
-apply (cases "b\<le>0")
- apply (auto simp add: le_less not_less)
-apply (drule_tac mult_pos_neg [of a b])
- apply (auto dest: less_not_sym)
-done
-
-lemma zero_less_mult_pos2:
- "0 < b * a \<Longrightarrow> 0 < a \<Longrightarrow> 0 < b"
-apply (cases "b\<le>0")
- apply (auto simp add: le_less not_less)
-apply (drule_tac mult_pos_neg2 [of a b])
- apply (auto dest: less_not_sym)
-done
-
-text{*Strict monotonicity in both arguments*}
-lemma mult_strict_mono:
- assumes "a < b" and "c < d" and "0 < b" and "0 \<le> c"
- shows "a * c < b * d"
- using assms apply (cases "c=0")
- apply (simp add: mult_pos_pos)
- apply (erule mult_strict_right_mono [THEN less_trans])
- apply (force simp add: le_less)
- apply (erule mult_strict_left_mono, assumption)
- done
-
-text{*This weaker variant has more natural premises*}
-lemma mult_strict_mono':
- assumes "a < b" and "c < d" and "0 \<le> a" and "0 \<le> c"
- shows "a * c < b * d"
-by (rule mult_strict_mono) (insert assms, auto)
-
-lemma mult_less_le_imp_less:
- assumes "a < b" and "c \<le> d" and "0 \<le> a" and "0 < c"
- shows "a * c < b * d"
- using assms apply (subgoal_tac "a * c < b * c")
- apply (erule less_le_trans)
- apply (erule mult_left_mono)
- apply simp
- apply (erule mult_strict_right_mono)
- apply assumption
- done
-
-lemma mult_le_less_imp_less:
- assumes "a \<le> b" and "c < d" and "0 < a" and "0 \<le> c"
- shows "a * c < b * d"
- using assms apply (subgoal_tac "a * c \<le> b * c")
- apply (erule le_less_trans)
- apply (erule mult_strict_left_mono)
- apply simp
- apply (erule mult_right_mono)
- apply simp
- done
-
-lemma mult_less_imp_less_left:
- assumes less: "c * a < c * b" and nonneg: "0 \<le> c"
- shows "a < b"
-proof (rule ccontr)
- assume "\<not> a < b"
- hence "b \<le> a" by (simp add: linorder_not_less)
- hence "c * b \<le> c * a" using nonneg by (rule mult_left_mono)
- with this and less show False by (simp add: not_less [symmetric])
-qed
-
-lemma mult_less_imp_less_right:
- assumes less: "a * c < b * c" and nonneg: "0 \<le> c"
- shows "a < b"
-proof (rule ccontr)
- assume "\<not> a < b"
- hence "b \<le> a" by (simp add: linorder_not_less)
- hence "b * c \<le> a * c" using nonneg by (rule mult_right_mono)
- with this and less show False by (simp add: not_less [symmetric])
-qed
-
-end
-
-class ordered_semiring_1_strict = ordered_semiring_strict + semiring_1
-
-class mult_mono1 = times + zero + ord +
- assumes mult_mono1: "a \<le> b \<Longrightarrow> 0 \<le> c \<Longrightarrow> c * a \<le> c * b"
-
-class pordered_comm_semiring = comm_semiring_0
- + pordered_ab_semigroup_add + mult_mono1
-begin
-
-subclass pordered_semiring
-proof
- fix a b c :: 'a
- assume "a \<le> b" "0 \<le> c"
- thus "c * a \<le> c * b" by (rule mult_mono1)
- thus "a * c \<le> b * c" by (simp only: mult_commute)
-qed
-
-end
-
-class pordered_cancel_comm_semiring = comm_semiring_0_cancel
- + pordered_ab_semigroup_add + mult_mono1
-begin
-
-subclass pordered_comm_semiring ..
-subclass pordered_cancel_semiring ..
-
-end
-
-class ordered_comm_semiring_strict = comm_semiring_0 + ordered_cancel_ab_semigroup_add +
- assumes mult_strict_left_mono_comm: "a < b \<Longrightarrow> 0 < c \<Longrightarrow> c * a < c * b"
-begin
-
-subclass ordered_semiring_strict
-proof
- fix a b c :: 'a
- assume "a < b" "0 < c"
- thus "c * a < c * b" by (rule mult_strict_left_mono_comm)
- thus "a * c < b * c" by (simp only: mult_commute)
-qed
-
-subclass pordered_cancel_comm_semiring
-proof
- fix a b c :: 'a
- assume "a \<le> b" "0 \<le> c"
- thus "c * a \<le> c * b"
- unfolding le_less
- using mult_strict_left_mono by (cases "c = 0") auto
-qed
-
-end
-
-class pordered_ring = ring + pordered_cancel_semiring
-begin
-
-subclass pordered_ab_group_add ..
-
-text{*Legacy - use @{text algebra_simps} *}
-lemmas ring_simps[noatp] = algebra_simps
-
-lemma less_add_iff1:
- "a * e + c < b * e + d \<longleftrightarrow> (a - b) * e + c < d"
-by (simp add: algebra_simps)
-
-lemma less_add_iff2:
- "a * e + c < b * e + d \<longleftrightarrow> c < (b - a) * e + d"
-by (simp add: algebra_simps)
-
-lemma le_add_iff1:
- "a * e + c \<le> b * e + d \<longleftrightarrow> (a - b) * e + c \<le> d"
-by (simp add: algebra_simps)
-
-lemma le_add_iff2:
- "a * e + c \<le> b * e + d \<longleftrightarrow> c \<le> (b - a) * e + d"
-by (simp add: algebra_simps)
-
-lemma mult_left_mono_neg:
- "b \<le> a \<Longrightarrow> c \<le> 0 \<Longrightarrow> c * a \<le> c * b"
- apply (drule mult_left_mono [of _ _ "uminus c"])
- apply (simp_all add: minus_mult_left [symmetric])
- done
-
-lemma mult_right_mono_neg:
- "b \<le> a \<Longrightarrow> c \<le> 0 \<Longrightarrow> a * c \<le> b * c"
- apply (drule mult_right_mono [of _ _ "uminus c"])
- apply (simp_all add: minus_mult_right [symmetric])
- done
-
-lemma mult_nonpos_nonpos: "a \<le> 0 \<Longrightarrow> b \<le> 0 \<Longrightarrow> 0 \<le> a * b"
-using mult_right_mono_neg [of a zero b] by simp
-
-lemma split_mult_pos_le:
- "(0 \<le> a \<and> 0 \<le> b) \<or> (a \<le> 0 \<and> b \<le> 0) \<Longrightarrow> 0 \<le> a * b"
-by (auto simp add: mult_nonneg_nonneg mult_nonpos_nonpos)
-
-end
-
-class abs_if = minus + uminus + ord + zero + abs +
- assumes abs_if: "\<bar>a\<bar> = (if a < 0 then - a else a)"
-
-class sgn_if = minus + uminus + zero + one + ord + sgn +
- assumes sgn_if: "sgn x = (if x = 0 then 0 else if 0 < x then 1 else - 1)"
-
-lemma (in sgn_if) sgn0[simp]: "sgn 0 = 0"
-by(simp add:sgn_if)
-
-class ordered_ring = ring + ordered_semiring
- + ordered_ab_group_add + abs_if
-begin
-
-subclass pordered_ring ..
-
-subclass pordered_ab_group_add_abs
-proof
- fix a b
- show "\<bar>a + b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
-by (auto simp add: abs_if not_less neg_less_eq_nonneg less_eq_neg_nonpos)
- (auto simp del: minus_add_distrib simp add: minus_add_distrib [symmetric]
- neg_less_eq_nonneg less_eq_neg_nonpos, auto intro: add_nonneg_nonneg,
- auto intro!: less_imp_le add_neg_neg)
-qed (auto simp add: abs_if less_eq_neg_nonpos neg_equal_zero)
-
-end
-
-(* The "strict" suffix can be seen as describing the combination of ordered_ring and no_zero_divisors.
- Basically, ordered_ring + no_zero_divisors = ordered_ring_strict.
- *)
-class ordered_ring_strict = ring + ordered_semiring_strict
- + ordered_ab_group_add + abs_if
-begin
-
-subclass ordered_ring ..
-
-lemma mult_strict_left_mono_neg: "b < a \<Longrightarrow> c < 0 \<Longrightarrow> c * a < c * b"
-using mult_strict_left_mono [of b a "- c"] by simp
-
-lemma mult_strict_right_mono_neg: "b < a \<Longrightarrow> c < 0 \<Longrightarrow> a * c < b * c"
-using mult_strict_right_mono [of b a "- c"] by simp
-
-lemma mult_neg_neg: "a < 0 \<Longrightarrow> b < 0 \<Longrightarrow> 0 < a * b"
-using mult_strict_right_mono_neg [of a zero b] by simp
-
-subclass ring_no_zero_divisors
-proof
- fix a b
- assume "a \<noteq> 0" then have A: "a < 0 \<or> 0 < a" by (simp add: neq_iff)
- assume "b \<noteq> 0" then have B: "b < 0 \<or> 0 < b" by (simp add: neq_iff)
- have "a * b < 0 \<or> 0 < a * b"
- proof (cases "a < 0")
- case True note A' = this
- show ?thesis proof (cases "b < 0")
- case True with A'
- show ?thesis by (auto dest: mult_neg_neg)
- next
- case False with B have "0 < b" by auto
- with A' show ?thesis by (auto dest: mult_strict_right_mono)
- qed
- next
- case False with A have A': "0 < a" by auto
- show ?thesis proof (cases "b < 0")
- case True with A'
- show ?thesis by (auto dest: mult_strict_right_mono_neg)
- next
- case False with B have "0 < b" by auto
- with A' show ?thesis by (auto dest: mult_pos_pos)
- qed
- qed
- then show "a * b \<noteq> 0" by (simp add: neq_iff)
-qed
-
-lemma zero_less_mult_iff:
- "0 < a * b \<longleftrightarrow> 0 < a \<and> 0 < b \<or> a < 0 \<and> b < 0"
- apply (auto simp add: mult_pos_pos mult_neg_neg)
- apply (simp_all add: not_less le_less)
- apply (erule disjE) apply assumption defer
- apply (erule disjE) defer apply (drule sym) apply simp
- apply (erule disjE) defer apply (drule sym) apply simp
- apply (erule disjE) apply assumption apply (drule sym) apply simp
- apply (drule sym) apply simp
- apply (blast dest: zero_less_mult_pos)
- apply (blast dest: zero_less_mult_pos2)
- done
-
-lemma zero_le_mult_iff:
- "0 \<le> a * b \<longleftrightarrow> 0 \<le> a \<and> 0 \<le> b \<or> a \<le> 0 \<and> b \<le> 0"
-by (auto simp add: eq_commute [of 0] le_less not_less zero_less_mult_iff)
-
-lemma mult_less_0_iff:
- "a * b < 0 \<longleftrightarrow> 0 < a \<and> b < 0 \<or> a < 0 \<and> 0 < b"
- apply (insert zero_less_mult_iff [of "-a" b])
- apply (force simp add: minus_mult_left[symmetric])
- done
-
-lemma mult_le_0_iff:
- "a * b \<le> 0 \<longleftrightarrow> 0 \<le> a \<and> b \<le> 0 \<or> a \<le> 0 \<and> 0 \<le> b"
- apply (insert zero_le_mult_iff [of "-a" b])
- apply (force simp add: minus_mult_left[symmetric])
- done
-
-lemma zero_le_square [simp]: "0 \<le> a * a"
-by (simp add: zero_le_mult_iff linear)
-
-lemma not_square_less_zero [simp]: "\<not> (a * a < 0)"
-by (simp add: not_less)
-
-text{*Cancellation laws for @{term "c*a < c*b"} and @{term "a*c < b*c"},
- also with the relations @{text "\<le>"} and equality.*}
-
-text{*These ``disjunction'' versions produce two cases when the comparison is
- an assumption, but effectively four when the comparison is a goal.*}
-
-lemma mult_less_cancel_right_disj:
- "a * c < b * c \<longleftrightarrow> 0 < c \<and> a < b \<or> c < 0 \<and> b < a"
- apply (cases "c = 0")
- apply (auto simp add: neq_iff mult_strict_right_mono
- mult_strict_right_mono_neg)
- apply (auto simp add: not_less
- not_le [symmetric, of "a*c"]
- not_le [symmetric, of a])
- apply (erule_tac [!] notE)
- apply (auto simp add: less_imp_le mult_right_mono
- mult_right_mono_neg)
- done
-
-lemma mult_less_cancel_left_disj:
- "c * a < c * b \<longleftrightarrow> 0 < c \<and> a < b \<or> c < 0 \<and> b < a"
- apply (cases "c = 0")
- apply (auto simp add: neq_iff mult_strict_left_mono
- mult_strict_left_mono_neg)
- apply (auto simp add: not_less
- not_le [symmetric, of "c*a"]
- not_le [symmetric, of a])
- apply (erule_tac [!] notE)
- apply (auto simp add: less_imp_le mult_left_mono
- mult_left_mono_neg)
- done
-
-text{*The ``conjunction of implication'' lemmas produce two cases when the
-comparison is a goal, but give four when the comparison is an assumption.*}
-
-lemma mult_less_cancel_right:
- "a * c < b * c \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < b) \<and> (c \<le> 0 \<longrightarrow> b < a)"
- using mult_less_cancel_right_disj [of a c b] by auto
-
-lemma mult_less_cancel_left:
- "c * a < c * b \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < b) \<and> (c \<le> 0 \<longrightarrow> b < a)"
- using mult_less_cancel_left_disj [of c a b] by auto
-
-lemma mult_le_cancel_right:
- "a * c \<le> b * c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> a)"
-by (simp add: not_less [symmetric] mult_less_cancel_right_disj)
-
-lemma mult_le_cancel_left:
- "c * a \<le> c * b \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> a)"
-by (simp add: not_less [symmetric] mult_less_cancel_left_disj)
-
-lemma mult_le_cancel_left_pos:
- "0 < c \<Longrightarrow> c * a \<le> c * b \<longleftrightarrow> a \<le> b"
-by (auto simp: mult_le_cancel_left)
-
-lemma mult_le_cancel_left_neg:
- "c < 0 \<Longrightarrow> c * a \<le> c * b \<longleftrightarrow> b \<le> a"
-by (auto simp: mult_le_cancel_left)
-
-lemma mult_less_cancel_left_pos:
- "0 < c \<Longrightarrow> c * a < c * b \<longleftrightarrow> a < b"
-by (auto simp: mult_less_cancel_left)
-
-lemma mult_less_cancel_left_neg:
- "c < 0 \<Longrightarrow> c * a < c * b \<longleftrightarrow> b < a"
-by (auto simp: mult_less_cancel_left)
-
-end
-
-text{*Legacy - use @{text algebra_simps} *}
-lemmas ring_simps[noatp] = algebra_simps
-
-lemmas mult_sign_intros =
- mult_nonneg_nonneg mult_nonneg_nonpos
- mult_nonpos_nonneg mult_nonpos_nonpos
- mult_pos_pos mult_pos_neg
- mult_neg_pos mult_neg_neg
-
-class pordered_comm_ring = comm_ring + pordered_comm_semiring
-begin
-
-subclass pordered_ring ..
-subclass pordered_cancel_comm_semiring ..
-
-end
-
-class ordered_semidom = comm_semiring_1_cancel + ordered_comm_semiring_strict +
- (*previously ordered_semiring*)
- assumes zero_less_one [simp]: "0 < 1"
-begin
-
-lemma pos_add_strict:
- shows "0 < a \<Longrightarrow> b < c \<Longrightarrow> b < a + c"
- using add_strict_mono [of zero a b c] by simp
-
-lemma zero_le_one [simp]: "0 \<le> 1"
-by (rule zero_less_one [THEN less_imp_le])
-
-lemma not_one_le_zero [simp]: "\<not> 1 \<le> 0"
-by (simp add: not_le)
-
-lemma not_one_less_zero [simp]: "\<not> 1 < 0"
-by (simp add: not_less)
-
-lemma less_1_mult:
- assumes "1 < m" and "1 < n"
- shows "1 < m * n"
- using assms mult_strict_mono [of 1 m 1 n]
- by (simp add: less_trans [OF zero_less_one])
-
-end
-
-class ordered_idom = comm_ring_1 +
- ordered_comm_semiring_strict + ordered_ab_group_add +
- abs_if + sgn_if
- (*previously ordered_ring*)
-begin
-
-subclass ordered_ring_strict ..
-subclass pordered_comm_ring ..
-subclass idom ..
-
-subclass ordered_semidom
-proof
- have "0 \<le> 1 * 1" by (rule zero_le_square)
- thus "0 < 1" by (simp add: le_less)
-qed
-
-lemma linorder_neqE_ordered_idom:
- assumes "x \<noteq> y" obtains "x < y" | "y < x"
- using assms by (rule neqE)
-
-text {* These cancellation simprules also produce two cases when the comparison is a goal. *}
-
-lemma mult_le_cancel_right1:
- "c \<le> b * c \<longleftrightarrow> (0 < c \<longrightarrow> 1 \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> 1)"
-by (insert mult_le_cancel_right [of 1 c b], simp)
-
-lemma mult_le_cancel_right2:
- "a * c \<le> c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> 1) \<and> (c < 0 \<longrightarrow> 1 \<le> a)"
-by (insert mult_le_cancel_right [of a c 1], simp)
-
-lemma mult_le_cancel_left1:
- "c \<le> c * b \<longleftrightarrow> (0 < c \<longrightarrow> 1 \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> 1)"
-by (insert mult_le_cancel_left [of c 1 b], simp)
-
-lemma mult_le_cancel_left2:
- "c * a \<le> c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> 1) \<and> (c < 0 \<longrightarrow> 1 \<le> a)"
-by (insert mult_le_cancel_left [of c a 1], simp)
-
-lemma mult_less_cancel_right1:
- "c < b * c \<longleftrightarrow> (0 \<le> c \<longrightarrow> 1 < b) \<and> (c \<le> 0 \<longrightarrow> b < 1)"
-by (insert mult_less_cancel_right [of 1 c b], simp)
-
-lemma mult_less_cancel_right2:
- "a * c < c \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < 1) \<and> (c \<le> 0 \<longrightarrow> 1 < a)"
-by (insert mult_less_cancel_right [of a c 1], simp)
-
-lemma mult_less_cancel_left1:
- "c < c * b \<longleftrightarrow> (0 \<le> c \<longrightarrow> 1 < b) \<and> (c \<le> 0 \<longrightarrow> b < 1)"
-by (insert mult_less_cancel_left [of c 1 b], simp)
-
-lemma mult_less_cancel_left2:
- "c * a < c \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < 1) \<and> (c \<le> 0 \<longrightarrow> 1 < a)"
-by (insert mult_less_cancel_left [of c a 1], simp)
-
-lemma sgn_sgn [simp]:
- "sgn (sgn a) = sgn a"
-unfolding sgn_if by simp
-
-lemma sgn_0_0:
- "sgn a = 0 \<longleftrightarrow> a = 0"
-unfolding sgn_if by simp
-
-lemma sgn_1_pos:
- "sgn a = 1 \<longleftrightarrow> a > 0"
-unfolding sgn_if by (simp add: neg_equal_zero)
-
-lemma sgn_1_neg:
- "sgn a = - 1 \<longleftrightarrow> a < 0"
-unfolding sgn_if by (auto simp add: equal_neg_zero)
-
-lemma sgn_pos [simp]:
- "0 < a \<Longrightarrow> sgn a = 1"
-unfolding sgn_1_pos .
-
-lemma sgn_neg [simp]:
- "a < 0 \<Longrightarrow> sgn a = - 1"
-unfolding sgn_1_neg .
-
-lemma sgn_times:
- "sgn (a * b) = sgn a * sgn b"
-by (auto simp add: sgn_if zero_less_mult_iff)
-
-lemma abs_sgn: "abs k = k * sgn k"
-unfolding sgn_if abs_if by auto
-
-lemma sgn_greater [simp]:
- "0 < sgn a \<longleftrightarrow> 0 < a"
- unfolding sgn_if by auto
-
-lemma sgn_less [simp]:
- "sgn a < 0 \<longleftrightarrow> a < 0"
- unfolding sgn_if by auto
-
-lemma abs_dvd_iff [simp]: "(abs m) dvd k \<longleftrightarrow> m dvd k"
- by (simp add: abs_if)
-
-lemma dvd_abs_iff [simp]: "m dvd (abs k) \<longleftrightarrow> m dvd k"
- by (simp add: abs_if)
-
-lemma dvd_if_abs_eq:
- "abs l = abs (k) \<Longrightarrow> l dvd k"
-by(subst abs_dvd_iff[symmetric]) simp
-
-end
-
-class ordered_field = field + ordered_idom
-
-text {* Simprules for comparisons where common factors can be cancelled. *}
-
-lemmas mult_compare_simps[noatp] =
- mult_le_cancel_right mult_le_cancel_left
- mult_le_cancel_right1 mult_le_cancel_right2
- mult_le_cancel_left1 mult_le_cancel_left2
- mult_less_cancel_right mult_less_cancel_left
- mult_less_cancel_right1 mult_less_cancel_right2
- mult_less_cancel_left1 mult_less_cancel_left2
- mult_cancel_right mult_cancel_left
- mult_cancel_right1 mult_cancel_right2
- mult_cancel_left1 mult_cancel_left2
-
--- {* FIXME continue localization here *}
-
-lemma inverse_nonzero_iff_nonzero [simp]:
- "(inverse a = 0) = (a = (0::'a::{division_ring,division_by_zero}))"
-by (force dest: inverse_zero_imp_zero)
-
-lemma inverse_minus_eq [simp]:
- "inverse(-a) = -inverse(a::'a::{division_ring,division_by_zero})"
-proof cases
- assume "a=0" thus ?thesis by (simp add: inverse_zero)
-next
- assume "a\<noteq>0"
- thus ?thesis by (simp add: nonzero_inverse_minus_eq)
-qed
-
-lemma inverse_eq_imp_eq:
- "inverse a = inverse b ==> a = (b::'a::{division_ring,division_by_zero})"
-apply (cases "a=0 | b=0")
- apply (force dest!: inverse_zero_imp_zero
- simp add: eq_commute [of "0::'a"])
-apply (force dest!: nonzero_inverse_eq_imp_eq)
-done
-
-lemma inverse_eq_iff_eq [simp]:
- "(inverse a = inverse b) = (a = (b::'a::{division_ring,division_by_zero}))"
-by (force dest!: inverse_eq_imp_eq)
-
-lemma inverse_inverse_eq [simp]:
- "inverse(inverse (a::'a::{division_ring,division_by_zero})) = a"
- proof cases
- assume "a=0" thus ?thesis by simp
- next
- assume "a\<noteq>0"
- thus ?thesis by (simp add: nonzero_inverse_inverse_eq)
- qed
-
-text{*This version builds in division by zero while also re-orienting
- the right-hand side.*}
-lemma inverse_mult_distrib [simp]:
- "inverse(a*b) = inverse(a) * inverse(b::'a::{field,division_by_zero})"
- proof cases
- assume "a \<noteq> 0 & b \<noteq> 0"
- thus ?thesis by (simp add: nonzero_inverse_mult_distrib mult_commute)
- next
- assume "~ (a \<noteq> 0 & b \<noteq> 0)"
- thus ?thesis by force
- qed
-
-lemma inverse_divide [simp]:
- "inverse (a/b) = b / (a::'a::{field,division_by_zero})"
-by (simp add: divide_inverse mult_commute)
-
-
-subsection {* Calculations with fractions *}
-
-text{* There is a whole bunch of simp-rules just for class @{text
-field} but none for class @{text field} and @{text nonzero_divides}
-because the latter are covered by a simproc. *}
-
-lemma mult_divide_mult_cancel_left:
- "c\<noteq>0 ==> (c*a) / (c*b) = a / (b::'a::{field,division_by_zero})"
-apply (cases "b = 0")
-apply (simp_all add: nonzero_mult_divide_mult_cancel_left)
-done
-
-lemma mult_divide_mult_cancel_right:
- "c\<noteq>0 ==> (a*c) / (b*c) = a / (b::'a::{field,division_by_zero})"
-apply (cases "b = 0")
-apply (simp_all add: nonzero_mult_divide_mult_cancel_right)
-done
-
-lemma divide_divide_eq_right [simp,noatp]:
- "a / (b/c) = (a*c) / (b::'a::{field,division_by_zero})"
-by (simp add: divide_inverse mult_ac)
-
-lemma divide_divide_eq_left [simp,noatp]:
- "(a / b) / (c::'a::{field,division_by_zero}) = a / (b*c)"
-by (simp add: divide_inverse mult_assoc)
-
-
-subsubsection{*Special Cancellation Simprules for Division*}
-
-lemma mult_divide_mult_cancel_left_if[simp,noatp]:
-fixes c :: "'a :: {field,division_by_zero}"
-shows "(c*a) / (c*b) = (if c=0 then 0 else a/b)"
-by (simp add: mult_divide_mult_cancel_left)
-
-
-subsection {* Division and Unary Minus *}
-
-lemma minus_divide_right: "- (a/b) = a / -(b::'a::{field,division_by_zero})"
-by (simp add: divide_inverse)
-
-lemma divide_minus_right [simp, noatp]:
- "a / -(b::'a::{field,division_by_zero}) = -(a / b)"
-by (simp add: divide_inverse)
-
-lemma minus_divide_divide:
- "(-a)/(-b) = a / (b::'a::{field,division_by_zero})"
-apply (cases "b=0", simp)
-apply (simp add: nonzero_minus_divide_divide)
-done
-
-lemma eq_divide_eq:
- "((a::'a::{field,division_by_zero}) = b/c) = (if c\<noteq>0 then a*c = b else a=0)"
-by (simp add: nonzero_eq_divide_eq)
-
-lemma divide_eq_eq:
- "(b/c = (a::'a::{field,division_by_zero})) = (if c\<noteq>0 then b = a*c else a=0)"
-by (force simp add: nonzero_divide_eq_eq)
-
-
-subsection {* Ordered Fields *}
-
-lemma positive_imp_inverse_positive:
-assumes a_gt_0: "0 < a" shows "0 < inverse (a::'a::ordered_field)"
-proof -
- have "0 < a * inverse a"
- by (simp add: a_gt_0 [THEN order_less_imp_not_eq2] zero_less_one)
- thus "0 < inverse a"
- by (simp add: a_gt_0 [THEN order_less_not_sym] zero_less_mult_iff)
-qed
-
-lemma negative_imp_inverse_negative:
- "a < 0 ==> inverse a < (0::'a::ordered_field)"
-by (insert positive_imp_inverse_positive [of "-a"],
- simp add: nonzero_inverse_minus_eq order_less_imp_not_eq)
-
-lemma inverse_le_imp_le:
-assumes invle: "inverse a \<le> inverse b" and apos: "0 < a"
-shows "b \<le> (a::'a::ordered_field)"
-proof (rule classical)
- assume "~ b \<le> a"
- hence "a < b" by (simp add: linorder_not_le)
- hence bpos: "0 < b" by (blast intro: apos order_less_trans)
- hence "a * inverse a \<le> a * inverse b"
- by (simp add: apos invle order_less_imp_le mult_left_mono)
- hence "(a * inverse a) * b \<le> (a * inverse b) * b"
- by (simp add: bpos order_less_imp_le mult_right_mono)
- thus "b \<le> a" by (simp add: mult_assoc apos bpos order_less_imp_not_eq2)
-qed
-
-lemma inverse_positive_imp_positive:
-assumes inv_gt_0: "0 < inverse a" and nz: "a \<noteq> 0"
-shows "0 < (a::'a::ordered_field)"
-proof -
- have "0 < inverse (inverse a)"
- using inv_gt_0 by (rule positive_imp_inverse_positive)
- thus "0 < a"
- using nz by (simp add: nonzero_inverse_inverse_eq)
-qed
-
-lemma inverse_positive_iff_positive [simp]:
- "(0 < inverse a) = (0 < (a::'a::{ordered_field,division_by_zero}))"
-apply (cases "a = 0", simp)
-apply (blast intro: inverse_positive_imp_positive positive_imp_inverse_positive)
-done
-
-lemma inverse_negative_imp_negative:
-assumes inv_less_0: "inverse a < 0" and nz: "a \<noteq> 0"
-shows "a < (0::'a::ordered_field)"
-proof -
- have "inverse (inverse a) < 0"
- using inv_less_0 by (rule negative_imp_inverse_negative)
- thus "a < 0" using nz by (simp add: nonzero_inverse_inverse_eq)
-qed
-
-lemma inverse_negative_iff_negative [simp]:
- "(inverse a < 0) = (a < (0::'a::{ordered_field,division_by_zero}))"
-apply (cases "a = 0", simp)
-apply (blast intro: inverse_negative_imp_negative negative_imp_inverse_negative)
-done
-
-lemma inverse_nonnegative_iff_nonnegative [simp]:
- "(0 \<le> inverse a) = (0 \<le> (a::'a::{ordered_field,division_by_zero}))"
-by (simp add: linorder_not_less [symmetric])
-
-lemma inverse_nonpositive_iff_nonpositive [simp]:
- "(inverse a \<le> 0) = (a \<le> (0::'a::{ordered_field,division_by_zero}))"
-by (simp add: linorder_not_less [symmetric])
-
-lemma ordered_field_no_lb: "\<forall> x. \<exists>y. y < (x::'a::ordered_field)"
-proof
- fix x::'a
- have m1: "- (1::'a) < 0" by simp
- from add_strict_right_mono[OF m1, where c=x]
- have "(- 1) + x < x" by simp
- thus "\<exists>y. y < x" by blast
-qed
-
-lemma ordered_field_no_ub: "\<forall> x. \<exists>y. y > (x::'a::ordered_field)"
-proof
- fix x::'a
- have m1: " (1::'a) > 0" by simp
- from add_strict_right_mono[OF m1, where c=x]
- have "1 + x > x" by simp
- thus "\<exists>y. y > x" by blast
-qed
-
-subsection{*Anti-Monotonicity of @{term inverse}*}
-
-lemma less_imp_inverse_less:
-assumes less: "a < b" and apos: "0 < a"
-shows "inverse b < inverse (a::'a::ordered_field)"
-proof (rule ccontr)
- assume "~ inverse b < inverse a"
- hence "inverse a \<le> inverse b" by (simp add: linorder_not_less)
- hence "~ (a < b)"
- by (simp add: linorder_not_less inverse_le_imp_le [OF _ apos])
- thus False by (rule notE [OF _ less])
-qed
-
-lemma inverse_less_imp_less:
- "[|inverse a < inverse b; 0 < a|] ==> b < (a::'a::ordered_field)"
-apply (simp add: order_less_le [of "inverse a"] order_less_le [of "b"])
-apply (force dest!: inverse_le_imp_le nonzero_inverse_eq_imp_eq)
-done
-
-text{*Both premises are essential. Consider -1 and 1.*}
-lemma inverse_less_iff_less [simp,noatp]:
- "[|0 < a; 0 < b|] ==> (inverse a < inverse b) = (b < (a::'a::ordered_field))"
-by (blast intro: less_imp_inverse_less dest: inverse_less_imp_less)
-
-lemma le_imp_inverse_le:
- "[|a \<le> b; 0 < a|] ==> inverse b \<le> inverse (a::'a::ordered_field)"
-by (force simp add: order_le_less less_imp_inverse_less)
-
-lemma inverse_le_iff_le [simp,noatp]:
- "[|0 < a; 0 < b|] ==> (inverse a \<le> inverse b) = (b \<le> (a::'a::ordered_field))"
-by (blast intro: le_imp_inverse_le dest: inverse_le_imp_le)
-
-
-text{*These results refer to both operands being negative. The opposite-sign
-case is trivial, since inverse preserves signs.*}
-lemma inverse_le_imp_le_neg:
- "[|inverse a \<le> inverse b; b < 0|] ==> b \<le> (a::'a::ordered_field)"
-apply (rule classical)
-apply (subgoal_tac "a < 0")
- prefer 2 apply (force simp add: linorder_not_le intro: order_less_trans)
-apply (insert inverse_le_imp_le [of "-b" "-a"])
-apply (simp add: order_less_imp_not_eq nonzero_inverse_minus_eq)
-done
-
-lemma less_imp_inverse_less_neg:
- "[|a < b; b < 0|] ==> inverse b < inverse (a::'a::ordered_field)"
-apply (subgoal_tac "a < 0")
- prefer 2 apply (blast intro: order_less_trans)
-apply (insert less_imp_inverse_less [of "-b" "-a"])
-apply (simp add: order_less_imp_not_eq nonzero_inverse_minus_eq)
-done
-
-lemma inverse_less_imp_less_neg:
- "[|inverse a < inverse b; b < 0|] ==> b < (a::'a::ordered_field)"
-apply (rule classical)
-apply (subgoal_tac "a < 0")
- prefer 2
- apply (force simp add: linorder_not_less intro: order_le_less_trans)
-apply (insert inverse_less_imp_less [of "-b" "-a"])
-apply (simp add: order_less_imp_not_eq nonzero_inverse_minus_eq)
-done
-
-lemma inverse_less_iff_less_neg [simp,noatp]:
- "[|a < 0; b < 0|] ==> (inverse a < inverse b) = (b < (a::'a::ordered_field))"
-apply (insert inverse_less_iff_less [of "-b" "-a"])
-apply (simp del: inverse_less_iff_less
- add: order_less_imp_not_eq nonzero_inverse_minus_eq)
-done
-
-lemma le_imp_inverse_le_neg:
- "[|a \<le> b; b < 0|] ==> inverse b \<le> inverse (a::'a::ordered_field)"
-by (force simp add: order_le_less less_imp_inverse_less_neg)
-
-lemma inverse_le_iff_le_neg [simp,noatp]:
- "[|a < 0; b < 0|] ==> (inverse a \<le> inverse b) = (b \<le> (a::'a::ordered_field))"
-by (blast intro: le_imp_inverse_le_neg dest: inverse_le_imp_le_neg)
-
-
-subsection{*Inverses and the Number One*}
-
-lemma one_less_inverse_iff:
- "(1 < inverse x) = (0 < x & x < (1::'a::{ordered_field,division_by_zero}))"
-proof cases
- assume "0 < x"
- with inverse_less_iff_less [OF zero_less_one, of x]
- show ?thesis by simp
-next
- assume notless: "~ (0 < x)"
- have "~ (1 < inverse x)"
- proof
- assume "1 < inverse x"
- also with notless have "... \<le> 0" by (simp add: linorder_not_less)
- also have "... < 1" by (rule zero_less_one)
- finally show False by auto
- qed
- with notless show ?thesis by simp
-qed
-
-lemma inverse_eq_1_iff [simp]:
- "(inverse x = 1) = (x = (1::'a::{field,division_by_zero}))"
-by (insert inverse_eq_iff_eq [of x 1], simp)
-
-lemma one_le_inverse_iff:
- "(1 \<le> inverse x) = (0 < x & x \<le> (1::'a::{ordered_field,division_by_zero}))"
-by (force simp add: order_le_less one_less_inverse_iff zero_less_one
- eq_commute [of 1])
-
-lemma inverse_less_1_iff:
- "(inverse x < 1) = (x \<le> 0 | 1 < (x::'a::{ordered_field,division_by_zero}))"
-by (simp add: linorder_not_le [symmetric] one_le_inverse_iff)
-
-lemma inverse_le_1_iff:
- "(inverse x \<le> 1) = (x \<le> 0 | 1 \<le> (x::'a::{ordered_field,division_by_zero}))"
-by (simp add: linorder_not_less [symmetric] one_less_inverse_iff)
-
-
-subsection{*Simplification of Inequalities Involving Literal Divisors*}
-
-lemma pos_le_divide_eq: "0 < (c::'a::ordered_field) ==> (a \<le> b/c) = (a*c \<le> b)"
-proof -
- assume less: "0<c"
- hence "(a \<le> b/c) = (a*c \<le> (b/c)*c)"
- by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
- also have "... = (a*c \<le> b)"
- by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma neg_le_divide_eq: "c < (0::'a::ordered_field) ==> (a \<le> b/c) = (b \<le> a*c)"
-proof -
- assume less: "c<0"
- hence "(a \<le> b/c) = ((b/c)*c \<le> a*c)"
- by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
- also have "... = (b \<le> a*c)"
- by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma le_divide_eq:
- "(a \<le> b/c) =
- (if 0 < c then a*c \<le> b
- else if c < 0 then b \<le> a*c
- else a \<le> (0::'a::{ordered_field,division_by_zero}))"
-apply (cases "c=0", simp)
-apply (force simp add: pos_le_divide_eq neg_le_divide_eq linorder_neq_iff)
-done
-
-lemma pos_divide_le_eq: "0 < (c::'a::ordered_field) ==> (b/c \<le> a) = (b \<le> a*c)"
-proof -
- assume less: "0<c"
- hence "(b/c \<le> a) = ((b/c)*c \<le> a*c)"
- by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
- also have "... = (b \<le> a*c)"
- by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma neg_divide_le_eq: "c < (0::'a::ordered_field) ==> (b/c \<le> a) = (a*c \<le> b)"
-proof -
- assume less: "c<0"
- hence "(b/c \<le> a) = (a*c \<le> (b/c)*c)"
- by (simp add: mult_le_cancel_right order_less_not_sym [OF less])
- also have "... = (a*c \<le> b)"
- by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma divide_le_eq:
- "(b/c \<le> a) =
- (if 0 < c then b \<le> a*c
- else if c < 0 then a*c \<le> b
- else 0 \<le> (a::'a::{ordered_field,division_by_zero}))"
-apply (cases "c=0", simp)
-apply (force simp add: pos_divide_le_eq neg_divide_le_eq linorder_neq_iff)
-done
-
-lemma pos_less_divide_eq:
- "0 < (c::'a::ordered_field) ==> (a < b/c) = (a*c < b)"
-proof -
- assume less: "0<c"
- hence "(a < b/c) = (a*c < (b/c)*c)"
- by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
- also have "... = (a*c < b)"
- by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma neg_less_divide_eq:
- "c < (0::'a::ordered_field) ==> (a < b/c) = (b < a*c)"
-proof -
- assume less: "c<0"
- hence "(a < b/c) = ((b/c)*c < a*c)"
- by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
- also have "... = (b < a*c)"
- by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma less_divide_eq:
- "(a < b/c) =
- (if 0 < c then a*c < b
- else if c < 0 then b < a*c
- else a < (0::'a::{ordered_field,division_by_zero}))"
-apply (cases "c=0", simp)
-apply (force simp add: pos_less_divide_eq neg_less_divide_eq linorder_neq_iff)
-done
-
-lemma pos_divide_less_eq:
- "0 < (c::'a::ordered_field) ==> (b/c < a) = (b < a*c)"
-proof -
- assume less: "0<c"
- hence "(b/c < a) = ((b/c)*c < a*c)"
- by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
- also have "... = (b < a*c)"
- by (simp add: order_less_imp_not_eq2 [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma neg_divide_less_eq:
- "c < (0::'a::ordered_field) ==> (b/c < a) = (a*c < b)"
-proof -
- assume less: "c<0"
- hence "(b/c < a) = (a*c < (b/c)*c)"
- by (simp add: mult_less_cancel_right_disj order_less_not_sym [OF less])
- also have "... = (a*c < b)"
- by (simp add: order_less_imp_not_eq [OF less] divide_inverse mult_assoc)
- finally show ?thesis .
-qed
-
-lemma divide_less_eq:
- "(b/c < a) =
- (if 0 < c then b < a*c
- else if c < 0 then a*c < b
- else 0 < (a::'a::{ordered_field,division_by_zero}))"
-apply (cases "c=0", simp)
-apply (force simp add: pos_divide_less_eq neg_divide_less_eq linorder_neq_iff)
-done
-
-
-subsection{*Field simplification*}
-
-text{* Lemmas @{text field_simps} multiply with denominators in in(equations)
-if they can be proved to be non-zero (for equations) or positive/negative
-(for inequations). Can be too aggressive and is therefore separate from the
-more benign @{text algebra_simps}. *}
-
-lemmas field_simps[noatp] = field_eq_simps
- (* multiply ineqn *)
- pos_divide_less_eq neg_divide_less_eq
- pos_less_divide_eq neg_less_divide_eq
- pos_divide_le_eq neg_divide_le_eq
- pos_le_divide_eq neg_le_divide_eq
-
-text{* Lemmas @{text sign_simps} is a first attempt to automate proofs
-of positivity/negativity needed for @{text field_simps}. Have not added @{text
-sign_simps} to @{text field_simps} because the former can lead to case
-explosions. *}
-
-lemmas sign_simps[noatp] = group_simps
- zero_less_mult_iff mult_less_0_iff
-
-(* Only works once linear arithmetic is installed:
-text{*An example:*}
-lemma fixes a b c d e f :: "'a::ordered_field"
-shows "\<lbrakk>a>b; c<d; e<f; 0 < u \<rbrakk> \<Longrightarrow>
- ((a-b)*(c-d)*(e-f))/((c-d)*(e-f)*(a-b)) <
- ((e-f)*(a-b)*(c-d))/((e-f)*(a-b)*(c-d)) + u"
-apply(subgoal_tac "(c-d)*(e-f)*(a-b) > 0")
- prefer 2 apply(simp add:sign_simps)
-apply(subgoal_tac "(c-d)*(e-f)*(a-b)*u > 0")
- prefer 2 apply(simp add:sign_simps)
-apply(simp add:field_simps)
-done
-*)
-
-
-subsection{*Division and Signs*}
-
-lemma zero_less_divide_iff:
- "((0::'a::{ordered_field,division_by_zero}) < a/b) = (0 < a & 0 < b | a < 0 & b < 0)"
-by (simp add: divide_inverse zero_less_mult_iff)
-
-lemma divide_less_0_iff:
- "(a/b < (0::'a::{ordered_field,division_by_zero})) =
- (0 < a & b < 0 | a < 0 & 0 < b)"
-by (simp add: divide_inverse mult_less_0_iff)
-
-lemma zero_le_divide_iff:
- "((0::'a::{ordered_field,division_by_zero}) \<le> a/b) =
- (0 \<le> a & 0 \<le> b | a \<le> 0 & b \<le> 0)"
-by (simp add: divide_inverse zero_le_mult_iff)
-
-lemma divide_le_0_iff:
- "(a/b \<le> (0::'a::{ordered_field,division_by_zero})) =
- (0 \<le> a & b \<le> 0 | a \<le> 0 & 0 \<le> b)"
-by (simp add: divide_inverse mult_le_0_iff)
-
-lemma divide_eq_0_iff [simp,noatp]:
- "(a/b = 0) = (a=0 | b=(0::'a::{field,division_by_zero}))"
-by (simp add: divide_inverse)
-
-lemma divide_pos_pos:
- "0 < (x::'a::ordered_field) ==> 0 < y ==> 0 < x / y"
-by(simp add:field_simps)
-
-
-lemma divide_nonneg_pos:
- "0 <= (x::'a::ordered_field) ==> 0 < y ==> 0 <= x / y"
-by(simp add:field_simps)
-
-lemma divide_neg_pos:
- "(x::'a::ordered_field) < 0 ==> 0 < y ==> x / y < 0"
-by(simp add:field_simps)
-
-lemma divide_nonpos_pos:
- "(x::'a::ordered_field) <= 0 ==> 0 < y ==> x / y <= 0"
-by(simp add:field_simps)
-
-lemma divide_pos_neg:
- "0 < (x::'a::ordered_field) ==> y < 0 ==> x / y < 0"
-by(simp add:field_simps)
-
-lemma divide_nonneg_neg:
- "0 <= (x::'a::ordered_field) ==> y < 0 ==> x / y <= 0"
-by(simp add:field_simps)
-
-lemma divide_neg_neg:
- "(x::'a::ordered_field) < 0 ==> y < 0 ==> 0 < x / y"
-by(simp add:field_simps)
-
-lemma divide_nonpos_neg:
- "(x::'a::ordered_field) <= 0 ==> y < 0 ==> 0 <= x / y"
-by(simp add:field_simps)
-
-
-subsection{*Cancellation Laws for Division*}
-
-lemma divide_cancel_right [simp,noatp]:
- "(a/c = b/c) = (c = 0 | a = (b::'a::{field,division_by_zero}))"
-apply (cases "c=0", simp)
-apply (simp add: divide_inverse)
-done
-
-lemma divide_cancel_left [simp,noatp]:
- "(c/a = c/b) = (c = 0 | a = (b::'a::{field,division_by_zero}))"
-apply (cases "c=0", simp)
-apply (simp add: divide_inverse)
-done
-
-
-subsection {* Division and the Number One *}
-
-text{*Simplify expressions equated with 1*}
-lemma divide_eq_1_iff [simp,noatp]:
- "(a/b = 1) = (b \<noteq> 0 & a = (b::'a::{field,division_by_zero}))"
-apply (cases "b=0", simp)
-apply (simp add: right_inverse_eq)
-done
-
-lemma one_eq_divide_iff [simp,noatp]:
- "(1 = a/b) = (b \<noteq> 0 & a = (b::'a::{field,division_by_zero}))"
-by (simp add: eq_commute [of 1])
-
-lemma zero_eq_1_divide_iff [simp,noatp]:
- "((0::'a::{ordered_field,division_by_zero}) = 1/a) = (a = 0)"
-apply (cases "a=0", simp)
-apply (auto simp add: nonzero_eq_divide_eq)
-done
-
-lemma one_divide_eq_0_iff [simp,noatp]:
- "(1/a = (0::'a::{ordered_field,division_by_zero})) = (a = 0)"
-apply (cases "a=0", simp)
-apply (insert zero_neq_one [THEN not_sym])
-apply (auto simp add: nonzero_divide_eq_eq)
-done
-
-text{*Simplify expressions such as @{text "0 < 1/x"} to @{text "0 < x"}*}
-lemmas zero_less_divide_1_iff = zero_less_divide_iff [of 1, simplified]
-lemmas divide_less_0_1_iff = divide_less_0_iff [of 1, simplified]
-lemmas zero_le_divide_1_iff = zero_le_divide_iff [of 1, simplified]
-lemmas divide_le_0_1_iff = divide_le_0_iff [of 1, simplified]
-
-declare zero_less_divide_1_iff [simp,noatp]
-declare divide_less_0_1_iff [simp,noatp]
-declare zero_le_divide_1_iff [simp,noatp]
-declare divide_le_0_1_iff [simp,noatp]
-
-
-subsection {* Ordering Rules for Division *}
-
-lemma divide_strict_right_mono:
- "[|a < b; 0 < c|] ==> a / c < b / (c::'a::ordered_field)"
-by (simp add: order_less_imp_not_eq2 divide_inverse mult_strict_right_mono
- positive_imp_inverse_positive)
-
-lemma divide_right_mono:
- "[|a \<le> b; 0 \<le> c|] ==> a/c \<le> b/(c::'a::{ordered_field,division_by_zero})"
-by (force simp add: divide_strict_right_mono order_le_less)
-
-lemma divide_right_mono_neg: "(a::'a::{division_by_zero,ordered_field}) <= b
- ==> c <= 0 ==> b / c <= a / c"
-apply (drule divide_right_mono [of _ _ "- c"])
-apply auto
-done
-
-lemma divide_strict_right_mono_neg:
- "[|b < a; c < 0|] ==> a / c < b / (c::'a::ordered_field)"
-apply (drule divide_strict_right_mono [of _ _ "-c"], simp)
-apply (simp add: order_less_imp_not_eq nonzero_minus_divide_right [symmetric])
-done
-
-text{*The last premise ensures that @{term a} and @{term b}
- have the same sign*}
-lemma divide_strict_left_mono:
- "[|b < a; 0 < c; 0 < a*b|] ==> c / a < c / (b::'a::ordered_field)"
-by(auto simp: field_simps times_divide_eq zero_less_mult_iff mult_strict_right_mono)
-
-lemma divide_left_mono:
- "[|b \<le> a; 0 \<le> c; 0 < a*b|] ==> c / a \<le> c / (b::'a::ordered_field)"
-by(auto simp: field_simps times_divide_eq zero_less_mult_iff mult_right_mono)
-
-lemma divide_left_mono_neg: "(a::'a::{division_by_zero,ordered_field}) <= b
- ==> c <= 0 ==> 0 < a * b ==> c / a <= c / b"
- apply (drule divide_left_mono [of _ _ "- c"])
- apply (auto simp add: mult_commute)
-done
-
-lemma divide_strict_left_mono_neg:
- "[|a < b; c < 0; 0 < a*b|] ==> c / a < c / (b::'a::ordered_field)"
-by(auto simp: field_simps times_divide_eq zero_less_mult_iff mult_strict_right_mono_neg)
-
-
-text{*Simplify quotients that are compared with the value 1.*}
-
-lemma le_divide_eq_1 [noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "(1 \<le> b / a) = ((0 < a & a \<le> b) | (a < 0 & b \<le> a))"
-by (auto simp add: le_divide_eq)
-
-lemma divide_le_eq_1 [noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "(b / a \<le> 1) = ((0 < a & b \<le> a) | (a < 0 & a \<le> b) | a=0)"
-by (auto simp add: divide_le_eq)
-
-lemma less_divide_eq_1 [noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "(1 < b / a) = ((0 < a & a < b) | (a < 0 & b < a))"
-by (auto simp add: less_divide_eq)
-
-lemma divide_less_eq_1 [noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "(b / a < 1) = ((0 < a & b < a) | (a < 0 & a < b) | a=0)"
-by (auto simp add: divide_less_eq)
-
-
-subsection{*Conditional Simplification Rules: No Case Splits*}
-
-lemma le_divide_eq_1_pos [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "0 < a \<Longrightarrow> (1 \<le> b/a) = (a \<le> b)"
-by (auto simp add: le_divide_eq)
-
-lemma le_divide_eq_1_neg [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "a < 0 \<Longrightarrow> (1 \<le> b/a) = (b \<le> a)"
-by (auto simp add: le_divide_eq)
-
-lemma divide_le_eq_1_pos [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "0 < a \<Longrightarrow> (b/a \<le> 1) = (b \<le> a)"
-by (auto simp add: divide_le_eq)
-
-lemma divide_le_eq_1_neg [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "a < 0 \<Longrightarrow> (b/a \<le> 1) = (a \<le> b)"
-by (auto simp add: divide_le_eq)
-
-lemma less_divide_eq_1_pos [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "0 < a \<Longrightarrow> (1 < b/a) = (a < b)"
-by (auto simp add: less_divide_eq)
-
-lemma less_divide_eq_1_neg [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "a < 0 \<Longrightarrow> (1 < b/a) = (b < a)"
-by (auto simp add: less_divide_eq)
-
-lemma divide_less_eq_1_pos [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "0 < a \<Longrightarrow> (b/a < 1) = (b < a)"
-by (auto simp add: divide_less_eq)
-
-lemma divide_less_eq_1_neg [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "a < 0 \<Longrightarrow> b/a < 1 <-> a < b"
-by (auto simp add: divide_less_eq)
-
-lemma eq_divide_eq_1 [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "(1 = b/a) = ((a \<noteq> 0 & a = b))"
-by (auto simp add: eq_divide_eq)
-
-lemma divide_eq_eq_1 [simp,noatp]:
- fixes a :: "'a :: {ordered_field,division_by_zero}"
- shows "(b/a = 1) = ((a \<noteq> 0 & a = b))"
-by (auto simp add: divide_eq_eq)
-
-
-subsection {* Reasoning about inequalities with division *}
-
-lemma mult_right_le_one_le: "0 <= (x::'a::ordered_idom) ==> 0 <= y ==> y <= 1
- ==> x * y <= x"
-by (auto simp add: mult_compare_simps)
-
-lemma mult_left_le_one_le: "0 <= (x::'a::ordered_idom) ==> 0 <= y ==> y <= 1
- ==> y * x <= x"
-by (auto simp add: mult_compare_simps)
-
-lemma mult_imp_div_pos_le: "0 < (y::'a::ordered_field) ==> x <= z * y ==>
- x / y <= z"
-by (subst pos_divide_le_eq, assumption+)
-
-lemma mult_imp_le_div_pos: "0 < (y::'a::ordered_field) ==> z * y <= x ==>
- z <= x / y"
-by(simp add:field_simps)
-
-lemma mult_imp_div_pos_less: "0 < (y::'a::ordered_field) ==> x < z * y ==>
- x / y < z"
-by(simp add:field_simps)
-
-lemma mult_imp_less_div_pos: "0 < (y::'a::ordered_field) ==> z * y < x ==>
- z < x / y"
-by(simp add:field_simps)
-
-lemma frac_le: "(0::'a::ordered_field) <= x ==>
- x <= y ==> 0 < w ==> w <= z ==> x / z <= y / w"
- apply (rule mult_imp_div_pos_le)
- apply simp
- apply (subst times_divide_eq_left)
- apply (rule mult_imp_le_div_pos, assumption)
- apply (rule mult_mono)
- apply simp_all
-done
-
-lemma frac_less: "(0::'a::ordered_field) <= x ==>
- x < y ==> 0 < w ==> w <= z ==> x / z < y / w"
- apply (rule mult_imp_div_pos_less)
- apply simp
- apply (subst times_divide_eq_left)
- apply (rule mult_imp_less_div_pos, assumption)
- apply (erule mult_less_le_imp_less)
- apply simp_all
-done
-
-lemma frac_less2: "(0::'a::ordered_field) < x ==>
- x <= y ==> 0 < w ==> w < z ==> x / z < y / w"
- apply (rule mult_imp_div_pos_less)
- apply simp_all
- apply (subst times_divide_eq_left)
- apply (rule mult_imp_less_div_pos, assumption)
- apply (erule mult_le_less_imp_less)
- apply simp_all
-done
-
-text{*It's not obvious whether these should be simprules or not.
- Their effect is to gather terms into one big fraction, like
- a*b*c / x*y*z. The rationale for that is unclear, but many proofs
- seem to need them.*}
-
-declare times_divide_eq [simp]
-
-
-subsection {* Ordered Fields are Dense *}
-
-context ordered_semidom
-begin
-
-lemma less_add_one: "a < a + 1"
-proof -
- have "a + 0 < a + 1"
- by (blast intro: zero_less_one add_strict_left_mono)
- thus ?thesis by simp
-qed
-
-lemma zero_less_two: "0 < 1 + 1"
-by (blast intro: less_trans zero_less_one less_add_one)
-
-end
-
-lemma less_half_sum: "a < b ==> a < (a+b) / (1+1::'a::ordered_field)"
-by (simp add: field_simps zero_less_two)
-
-lemma gt_half_sum: "a < b ==> (a+b)/(1+1::'a::ordered_field) < b"
-by (simp add: field_simps zero_less_two)
-
-instance ordered_field < dense_linear_order
-proof
- fix x y :: 'a
- have "x < x + 1" by simp
- then show "\<exists>y. x < y" ..
- have "x - 1 < x" by simp
- then show "\<exists>y. y < x" ..
- show "x < y \<Longrightarrow> \<exists>z>x. z < y" by (blast intro!: less_half_sum gt_half_sum)
-qed
-
-
-subsection {* Absolute Value *}
-
-context ordered_idom
-begin
-
-lemma mult_sgn_abs: "sgn x * abs x = x"
- unfolding abs_if sgn_if by auto
-
-end
-
-lemma abs_one [simp]: "abs 1 = (1::'a::ordered_idom)"
-by (simp add: abs_if zero_less_one [THEN order_less_not_sym])
-
-class pordered_ring_abs = pordered_ring + pordered_ab_group_add_abs +
- assumes abs_eq_mult:
- "(0 \<le> a \<or> a \<le> 0) \<and> (0 \<le> b \<or> b \<le> 0) \<Longrightarrow> \<bar>a * b\<bar> = \<bar>a\<bar> * \<bar>b\<bar>"
-
-
-class lordered_ring = pordered_ring + lordered_ab_group_add_abs
-begin
-
-subclass lordered_ab_group_add_meet ..
-subclass lordered_ab_group_add_join ..
-
-end
-
-lemma abs_le_mult: "abs (a * b) \<le> (abs a) * (abs (b::'a::lordered_ring))"
-proof -
- let ?x = "pprt a * pprt b - pprt a * nprt b - nprt a * pprt b + nprt a * nprt b"
- let ?y = "pprt a * pprt b + pprt a * nprt b + nprt a * pprt b + nprt a * nprt b"
- have a: "(abs a) * (abs b) = ?x"
- by (simp only: abs_prts[of a] abs_prts[of b] algebra_simps)
- {
- fix u v :: 'a
- have bh: "\<lbrakk>u = a; v = b\<rbrakk> \<Longrightarrow>
- u * v = pprt a * pprt b + pprt a * nprt b +
- nprt a * pprt b + nprt a * nprt b"
- apply (subst prts[of u], subst prts[of v])
- apply (simp add: algebra_simps)
- done
- }
- note b = this[OF refl[of a] refl[of b]]
- note addm = add_mono[of "0::'a" _ "0::'a", simplified]
- note addm2 = add_mono[of _ "0::'a" _ "0::'a", simplified]
- have xy: "- ?x <= ?y"
- apply (simp)
- apply (rule_tac y="0::'a" in order_trans)
- apply (rule addm2)
- apply (simp_all add: mult_nonneg_nonneg mult_nonpos_nonpos)
- apply (rule addm)
- apply (simp_all add: mult_nonneg_nonneg mult_nonpos_nonpos)
- done
- have yx: "?y <= ?x"
- apply (simp add:diff_def)
- apply (rule_tac y=0 in order_trans)
- apply (rule addm2, (simp add: mult_nonneg_nonpos mult_nonneg_nonpos2)+)
- apply (rule addm, (simp add: mult_nonneg_nonpos mult_nonneg_nonpos2)+)
- done
- have i1: "a*b <= abs a * abs b" by (simp only: a b yx)
- have i2: "- (abs a * abs b) <= a*b" by (simp only: a b xy)
- show ?thesis
- apply (rule abs_leI)
- apply (simp add: i1)
- apply (simp add: i2[simplified minus_le_iff])
- done
-qed
-
-instance lordered_ring \<subseteq> pordered_ring_abs
-proof
- fix a b :: "'a\<Colon> lordered_ring"
- assume "(0 \<le> a \<or> a \<le> 0) \<and> (0 \<le> b \<or> b \<le> 0)"
- show "abs (a*b) = abs a * abs b"
-proof -
- have s: "(0 <= a*b) | (a*b <= 0)"
- apply (auto)
- apply (rule_tac split_mult_pos_le)
- apply (rule_tac contrapos_np[of "a*b <= 0"])
- apply (simp)
- apply (rule_tac split_mult_neg_le)
- apply (insert prems)
- apply (blast)
- done
- have mulprts: "a * b = (pprt a + nprt a) * (pprt b + nprt b)"
- by (simp add: prts[symmetric])
- show ?thesis
- proof cases
- assume "0 <= a * b"
- then show ?thesis
- apply (simp_all add: mulprts abs_prts)
- apply (insert prems)
- apply (auto simp add:
- algebra_simps
- iffD1[OF zero_le_iff_zero_nprt] iffD1[OF le_zero_iff_zero_pprt]
- iffD1[OF le_zero_iff_pprt_id] iffD1[OF zero_le_iff_nprt_id])
- apply(drule (1) mult_nonneg_nonpos[of a b], simp)
- apply(drule (1) mult_nonneg_nonpos2[of b a], simp)
- done
- next
- assume "~(0 <= a*b)"
- with s have "a*b <= 0" by simp
- then show ?thesis
- apply (simp_all add: mulprts abs_prts)
- apply (insert prems)
- apply (auto simp add: algebra_simps)
- apply(drule (1) mult_nonneg_nonneg[of a b],simp)
- apply(drule (1) mult_nonpos_nonpos[of a b],simp)
- done
- qed
-qed
-qed
-
-context ordered_idom
-begin
-
-subclass pordered_ring_abs proof
-qed (auto simp add: abs_if not_less equal_neg_zero neg_equal_zero mult_less_0_iff)
-
-lemma abs_mult:
- "abs (a * b) = abs a * abs b"
- by (rule abs_eq_mult) auto
-
-lemma abs_mult_self:
- "abs a * abs a = a * a"
- by (simp add: abs_if)
-
-end
-
-lemma nonzero_abs_inverse:
- "a \<noteq> 0 ==> abs (inverse (a::'a::ordered_field)) = inverse (abs a)"
-apply (auto simp add: linorder_neq_iff abs_if nonzero_inverse_minus_eq
- negative_imp_inverse_negative)
-apply (blast intro: positive_imp_inverse_positive elim: order_less_asym)
-done
-
-lemma abs_inverse [simp]:
- "abs (inverse (a::'a::{ordered_field,division_by_zero})) =
- inverse (abs a)"
-apply (cases "a=0", simp)
-apply (simp add: nonzero_abs_inverse)
-done
-
-lemma nonzero_abs_divide:
- "b \<noteq> 0 ==> abs (a / (b::'a::ordered_field)) = abs a / abs b"
-by (simp add: divide_inverse abs_mult nonzero_abs_inverse)
-
-lemma abs_divide [simp]:
- "abs (a / (b::'a::{ordered_field,division_by_zero})) = abs a / abs b"
-apply (cases "b=0", simp)
-apply (simp add: nonzero_abs_divide)
-done
-
-lemma abs_mult_less:
- "[| abs a < c; abs b < d |] ==> abs a * abs b < c*(d::'a::ordered_idom)"
-proof -
- assume ac: "abs a < c"
- hence cpos: "0<c" by (blast intro: order_le_less_trans abs_ge_zero)
- assume "abs b < d"
- thus ?thesis by (simp add: ac cpos mult_strict_mono)
-qed
-
-lemmas eq_minus_self_iff[noatp] = equal_neg_zero
-
-lemma less_minus_self_iff: "(a < -a) = (a < (0::'a::ordered_idom))"
- unfolding order_less_le less_eq_neg_nonpos equal_neg_zero ..
-
-lemma abs_less_iff: "(abs a < b) = (a < b & -a < (b::'a::ordered_idom))"
-apply (simp add: order_less_le abs_le_iff)
-apply (auto simp add: abs_if neg_less_eq_nonneg less_eq_neg_nonpos)
-done
-
-lemma abs_mult_pos: "(0::'a::ordered_idom) <= x ==>
- (abs y) * x = abs (y * x)"
- apply (subst abs_mult)
- apply simp
-done
-
-lemma abs_div_pos: "(0::'a::{division_by_zero,ordered_field}) < y ==>
- abs x / y = abs (x / y)"
- apply (subst abs_divide)
- apply (simp add: order_less_imp_le)
-done
-
-
-subsection {* Bounds of products via negative and positive Part *}
-
-lemma mult_le_prts:
- assumes
- "a1 <= (a::'a::lordered_ring)"
- "a <= a2"
- "b1 <= b"
- "b <= b2"
- shows
- "a * b <= pprt a2 * pprt b2 + pprt a1 * nprt b2 + nprt a2 * pprt b1 + nprt a1 * nprt b1"
-proof -
- have "a * b = (pprt a + nprt a) * (pprt b + nprt b)"
- apply (subst prts[symmetric])+
- apply simp
- done
- then have "a * b = pprt a * pprt b + pprt a * nprt b + nprt a * pprt b + nprt a * nprt b"
- by (simp add: algebra_simps)
- moreover have "pprt a * pprt b <= pprt a2 * pprt b2"
- by (simp_all add: prems mult_mono)
- moreover have "pprt a * nprt b <= pprt a1 * nprt b2"
- proof -
- have "pprt a * nprt b <= pprt a * nprt b2"
- by (simp add: mult_left_mono prems)
- moreover have "pprt a * nprt b2 <= pprt a1 * nprt b2"
- by (simp add: mult_right_mono_neg prems)
- ultimately show ?thesis
- by simp
- qed
- moreover have "nprt a * pprt b <= nprt a2 * pprt b1"
- proof -
- have "nprt a * pprt b <= nprt a2 * pprt b"
- by (simp add: mult_right_mono prems)
- moreover have "nprt a2 * pprt b <= nprt a2 * pprt b1"
- by (simp add: mult_left_mono_neg prems)
- ultimately show ?thesis
- by simp
- qed
- moreover have "nprt a * nprt b <= nprt a1 * nprt b1"
- proof -
- have "nprt a * nprt b <= nprt a * nprt b1"
- by (simp add: mult_left_mono_neg prems)
- moreover have "nprt a * nprt b1 <= nprt a1 * nprt b1"
- by (simp add: mult_right_mono_neg prems)
- ultimately show ?thesis
- by simp
- qed
- ultimately show ?thesis
- by - (rule add_mono | simp)+
-qed
-
-lemma mult_ge_prts:
- assumes
- "a1 <= (a::'a::lordered_ring)"
- "a <= a2"
- "b1 <= b"
- "b <= b2"
- shows
- "a * b >= nprt a1 * pprt b2 + nprt a2 * nprt b2 + pprt a1 * pprt b1 + pprt a2 * nprt b1"
-proof -
- from prems have a1:"- a2 <= -a" by auto
- from prems have a2: "-a <= -a1" by auto
- from mult_le_prts[of "-a2" "-a" "-a1" "b1" b "b2", OF a1 a2 prems(3) prems(4), simplified nprt_neg pprt_neg]
- have le: "- (a * b) <= - nprt a1 * pprt b2 + - nprt a2 * nprt b2 + - pprt a1 * pprt b1 + - pprt a2 * nprt b1" by simp
- then have "-(- nprt a1 * pprt b2 + - nprt a2 * nprt b2 + - pprt a1 * pprt b1 + - pprt a2 * nprt b1) <= a * b"
- by (simp only: minus_le_iff)
- then show ?thesis by simp
-qed
-
-
-code_modulename SML
- Ring_and_Field Arith
-
-code_modulename OCaml
- Ring_and_Field Arith
-
-code_modulename Haskell
- Ring_and_Field Arith
-
-end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Rings.thy Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,1212 @@
+(* Title: HOL/Rings.thy
+ Author: Gertrud Bauer
+ Author: Steven Obua
+ Author: Tobias Nipkow
+ Author: Lawrence C Paulson
+ Author: Markus Wenzel
+ Author: Jeremy Avigad
+*)
+
+header {* Rings *}
+
+theory Rings
+imports Groups
+begin
+
+text {*
+ The theory of partially ordered rings is taken from the books:
+ \begin{itemize}
+ \item \emph{Lattice Theory} by Garret Birkhoff, American Mathematical Society 1979
+ \item \emph{Partially Ordered Algebraic Systems}, Pergamon Press 1963
+ \end{itemize}
+ Most of the used notions can also be looked up in
+ \begin{itemize}
+ \item \url{http://www.mathworld.com} by Eric Weisstein et. al.
+ \item \emph{Algebra I} by van der Waerden, Springer.
+ \end{itemize}
+*}
+
+class semiring = ab_semigroup_add + semigroup_mult +
+ assumes left_distrib[algebra_simps]: "(a + b) * c = a * c + b * c"
+ assumes right_distrib[algebra_simps]: "a * (b + c) = a * b + a * c"
+begin
+
+text{*For the @{text combine_numerals} simproc*}
+lemma combine_common_factor:
+ "a * e + (b * e + c) = (a + b) * e + c"
+by (simp add: left_distrib add_ac)
+
+end
+
+class mult_zero = times + zero +
+ assumes mult_zero_left [simp]: "0 * a = 0"
+ assumes mult_zero_right [simp]: "a * 0 = 0"
+
+class semiring_0 = semiring + comm_monoid_add + mult_zero
+
+class semiring_0_cancel = semiring + cancel_comm_monoid_add
+begin
+
+subclass semiring_0
+proof
+ fix a :: 'a
+ have "0 * a + 0 * a = 0 * a + 0" by (simp add: left_distrib [symmetric])
+ thus "0 * a = 0" by (simp only: add_left_cancel)
+next
+ fix a :: 'a
+ have "a * 0 + a * 0 = a * 0 + 0" by (simp add: right_distrib [symmetric])
+ thus "a * 0 = 0" by (simp only: add_left_cancel)
+qed
+
+end
+
+class comm_semiring = ab_semigroup_add + ab_semigroup_mult +
+ assumes distrib: "(a + b) * c = a * c + b * c"
+begin
+
+subclass semiring
+proof
+ fix a b c :: 'a
+ show "(a + b) * c = a * c + b * c" by (simp add: distrib)
+ have "a * (b + c) = (b + c) * a" by (simp add: mult_ac)
+ also have "... = b * a + c * a" by (simp only: distrib)
+ also have "... = a * b + a * c" by (simp add: mult_ac)
+ finally show "a * (b + c) = a * b + a * c" by blast
+qed
+
+end
+
+class comm_semiring_0 = comm_semiring + comm_monoid_add + mult_zero
+begin
+
+subclass semiring_0 ..
+
+end
+
+class comm_semiring_0_cancel = comm_semiring + cancel_comm_monoid_add
+begin
+
+subclass semiring_0_cancel ..
+
+subclass comm_semiring_0 ..
+
+end
+
+class zero_neq_one = zero + one +
+ assumes zero_neq_one [simp]: "0 \<noteq> 1"
+begin
+
+lemma one_neq_zero [simp]: "1 \<noteq> 0"
+by (rule not_sym) (rule zero_neq_one)
+
+end
+
+class semiring_1 = zero_neq_one + semiring_0 + monoid_mult
+
+text {* Abstract divisibility *}
+
+class dvd = times
+begin
+
+definition dvd :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infixl "dvd" 50) where
+ [code del]: "b dvd a \<longleftrightarrow> (\<exists>k. a = b * k)"
+
+lemma dvdI [intro?]: "a = b * k \<Longrightarrow> b dvd a"
+ unfolding dvd_def ..
+
+lemma dvdE [elim?]: "b dvd a \<Longrightarrow> (\<And>k. a = b * k \<Longrightarrow> P) \<Longrightarrow> P"
+ unfolding dvd_def by blast
+
+end
+
+class comm_semiring_1 = zero_neq_one + comm_semiring_0 + comm_monoid_mult + dvd
+ (*previously almost_semiring*)
+begin
+
+subclass semiring_1 ..
+
+lemma dvd_refl[simp]: "a dvd a"
+proof
+ show "a = a * 1" by simp
+qed
+
+lemma dvd_trans:
+ assumes "a dvd b" and "b dvd c"
+ shows "a dvd c"
+proof -
+ from assms obtain v where "b = a * v" by (auto elim!: dvdE)
+ moreover from assms obtain w where "c = b * w" by (auto elim!: dvdE)
+ ultimately have "c = a * (v * w)" by (simp add: mult_assoc)
+ then show ?thesis ..
+qed
+
+lemma dvd_0_left_iff [noatp, simp]: "0 dvd a \<longleftrightarrow> a = 0"
+by (auto intro: dvd_refl elim!: dvdE)
+
+lemma dvd_0_right [iff]: "a dvd 0"
+proof
+ show "0 = a * 0" by simp
+qed
+
+lemma one_dvd [simp]: "1 dvd a"
+by (auto intro!: dvdI)
+
+lemma dvd_mult[simp]: "a dvd c \<Longrightarrow> a dvd (b * c)"
+by (auto intro!: mult_left_commute dvdI elim!: dvdE)
+
+lemma dvd_mult2[simp]: "a dvd b \<Longrightarrow> a dvd (b * c)"
+ apply (subst mult_commute)
+ apply (erule dvd_mult)
+ done
+
+lemma dvd_triv_right [simp]: "a dvd b * a"
+by (rule dvd_mult) (rule dvd_refl)
+
+lemma dvd_triv_left [simp]: "a dvd a * b"
+by (rule dvd_mult2) (rule dvd_refl)
+
+lemma mult_dvd_mono:
+ assumes "a dvd b"
+ and "c dvd d"
+ shows "a * c dvd b * d"
+proof -
+ from `a dvd b` obtain b' where "b = a * b'" ..
+ moreover from `c dvd d` obtain d' where "d = c * d'" ..
+ ultimately have "b * d = (a * c) * (b' * d')" by (simp add: mult_ac)
+ then show ?thesis ..
+qed
+
+lemma dvd_mult_left: "a * b dvd c \<Longrightarrow> a dvd c"
+by (simp add: dvd_def mult_assoc, blast)
+
+lemma dvd_mult_right: "a * b dvd c \<Longrightarrow> b dvd c"
+ unfolding mult_ac [of a] by (rule dvd_mult_left)
+
+lemma dvd_0_left: "0 dvd a \<Longrightarrow> a = 0"
+by simp
+
+lemma dvd_add[simp]:
+ assumes "a dvd b" and "a dvd c" shows "a dvd (b + c)"
+proof -
+ from `a dvd b` obtain b' where "b = a * b'" ..
+ moreover from `a dvd c` obtain c' where "c = a * c'" ..
+ ultimately have "b + c = a * (b' + c')" by (simp add: right_distrib)
+ then show ?thesis ..
+qed
+
+end
+
+
+class no_zero_divisors = zero + times +
+ assumes no_zero_divisors: "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> a * b \<noteq> 0"
+
+class semiring_1_cancel = semiring + cancel_comm_monoid_add
+ + zero_neq_one + monoid_mult
+begin
+
+subclass semiring_0_cancel ..
+
+subclass semiring_1 ..
+
+end
+
+class comm_semiring_1_cancel = comm_semiring + cancel_comm_monoid_add
+ + zero_neq_one + comm_monoid_mult
+begin
+
+subclass semiring_1_cancel ..
+subclass comm_semiring_0_cancel ..
+subclass comm_semiring_1 ..
+
+end
+
+class ring = semiring + ab_group_add
+begin
+
+subclass semiring_0_cancel ..
+
+text {* Distribution rules *}
+
+lemma minus_mult_left: "- (a * b) = - a * b"
+by (rule minus_unique) (simp add: left_distrib [symmetric])
+
+lemma minus_mult_right: "- (a * b) = a * - b"
+by (rule minus_unique) (simp add: right_distrib [symmetric])
+
+text{*Extract signs from products*}
+lemmas mult_minus_left [simp, noatp] = minus_mult_left [symmetric]
+lemmas mult_minus_right [simp,noatp] = minus_mult_right [symmetric]
+
+lemma minus_mult_minus [simp]: "- a * - b = a * b"
+by simp
+
+lemma minus_mult_commute: "- a * b = a * - b"
+by simp
+
+lemma right_diff_distrib[algebra_simps]: "a * (b - c) = a * b - a * c"
+by (simp add: right_distrib diff_minus)
+
+lemma left_diff_distrib[algebra_simps]: "(a - b) * c = a * c - b * c"
+by (simp add: left_distrib diff_minus)
+
+lemmas ring_distribs[noatp] =
+ right_distrib left_distrib left_diff_distrib right_diff_distrib
+
+text{*Legacy - use @{text algebra_simps} *}
+lemmas ring_simps[noatp] = algebra_simps
+
+lemma eq_add_iff1:
+ "a * e + c = b * e + d \<longleftrightarrow> (a - b) * e + c = d"
+by (simp add: algebra_simps)
+
+lemma eq_add_iff2:
+ "a * e + c = b * e + d \<longleftrightarrow> c = (b - a) * e + d"
+by (simp add: algebra_simps)
+
+end
+
+lemmas ring_distribs[noatp] =
+ right_distrib left_distrib left_diff_distrib right_diff_distrib
+
+class comm_ring = comm_semiring + ab_group_add
+begin
+
+subclass ring ..
+subclass comm_semiring_0_cancel ..
+
+end
+
+class ring_1 = ring + zero_neq_one + monoid_mult
+begin
+
+subclass semiring_1_cancel ..
+
+end
+
+class comm_ring_1 = comm_ring + zero_neq_one + comm_monoid_mult
+ (*previously ring*)
+begin
+
+subclass ring_1 ..
+subclass comm_semiring_1_cancel ..
+
+lemma dvd_minus_iff [simp]: "x dvd - y \<longleftrightarrow> x dvd y"
+proof
+ assume "x dvd - y"
+ then have "x dvd - 1 * - y" by (rule dvd_mult)
+ then show "x dvd y" by simp
+next
+ assume "x dvd y"
+ then have "x dvd - 1 * y" by (rule dvd_mult)
+ then show "x dvd - y" by simp
+qed
+
+lemma minus_dvd_iff [simp]: "- x dvd y \<longleftrightarrow> x dvd y"
+proof
+ assume "- x dvd y"
+ then obtain k where "y = - x * k" ..
+ then have "y = x * - k" by simp
+ then show "x dvd y" ..
+next
+ assume "x dvd y"
+ then obtain k where "y = x * k" ..
+ then have "y = - x * - k" by simp
+ then show "- x dvd y" ..
+qed
+
+lemma dvd_diff[simp]: "x dvd y \<Longrightarrow> x dvd z \<Longrightarrow> x dvd (y - z)"
+by (simp add: diff_minus dvd_minus_iff)
+
+end
+
+class ring_no_zero_divisors = ring + no_zero_divisors
+begin
+
+lemma mult_eq_0_iff [simp]:
+ shows "a * b = 0 \<longleftrightarrow> (a = 0 \<or> b = 0)"
+proof (cases "a = 0 \<or> b = 0")
+ case False then have "a \<noteq> 0" and "b \<noteq> 0" by auto
+ then show ?thesis using no_zero_divisors by simp
+next
+ case True then show ?thesis by auto
+qed
+
+text{*Cancellation of equalities with a common factor*}
+lemma mult_cancel_right [simp, noatp]:
+ "a * c = b * c \<longleftrightarrow> c = 0 \<or> a = b"
+proof -
+ have "(a * c = b * c) = ((a - b) * c = 0)"
+ by (simp add: algebra_simps right_minus_eq)
+ thus ?thesis by (simp add: disj_commute right_minus_eq)
+qed
+
+lemma mult_cancel_left [simp, noatp]:
+ "c * a = c * b \<longleftrightarrow> c = 0 \<or> a = b"
+proof -
+ have "(c * a = c * b) = (c * (a - b) = 0)"
+ by (simp add: algebra_simps right_minus_eq)
+ thus ?thesis by (simp add: right_minus_eq)
+qed
+
+end
+
+class ring_1_no_zero_divisors = ring_1 + ring_no_zero_divisors
+begin
+
+lemma mult_cancel_right1 [simp]:
+ "c = b * c \<longleftrightarrow> c = 0 \<or> b = 1"
+by (insert mult_cancel_right [of 1 c b], force)
+
+lemma mult_cancel_right2 [simp]:
+ "a * c = c \<longleftrightarrow> c = 0 \<or> a = 1"
+by (insert mult_cancel_right [of a c 1], simp)
+
+lemma mult_cancel_left1 [simp]:
+ "c = c * b \<longleftrightarrow> c = 0 \<or> b = 1"
+by (insert mult_cancel_left [of c 1 b], force)
+
+lemma mult_cancel_left2 [simp]:
+ "c * a = c \<longleftrightarrow> c = 0 \<or> a = 1"
+by (insert mult_cancel_left [of c a 1], simp)
+
+end
+
+class idom = comm_ring_1 + no_zero_divisors
+begin
+
+subclass ring_1_no_zero_divisors ..
+
+lemma square_eq_iff: "a * a = b * b \<longleftrightarrow> (a = b \<or> a = - b)"
+proof
+ assume "a * a = b * b"
+ then have "(a - b) * (a + b) = 0"
+ by (simp add: algebra_simps)
+ then show "a = b \<or> a = - b"
+ by (simp add: right_minus_eq eq_neg_iff_add_eq_0)
+next
+ assume "a = b \<or> a = - b"
+ then show "a * a = b * b" by auto
+qed
+
+lemma dvd_mult_cancel_right [simp]:
+ "a * c dvd b * c \<longleftrightarrow> c = 0 \<or> a dvd b"
+proof -
+ have "a * c dvd b * c \<longleftrightarrow> (\<exists>k. b * c = (a * k) * c)"
+ unfolding dvd_def by (simp add: mult_ac)
+ also have "(\<exists>k. b * c = (a * k) * c) \<longleftrightarrow> c = 0 \<or> a dvd b"
+ unfolding dvd_def by simp
+ finally show ?thesis .
+qed
+
+lemma dvd_mult_cancel_left [simp]:
+ "c * a dvd c * b \<longleftrightarrow> c = 0 \<or> a dvd b"
+proof -
+ have "c * a dvd c * b \<longleftrightarrow> (\<exists>k. b * c = (a * k) * c)"
+ unfolding dvd_def by (simp add: mult_ac)
+ also have "(\<exists>k. b * c = (a * k) * c) \<longleftrightarrow> c = 0 \<or> a dvd b"
+ unfolding dvd_def by simp
+ finally show ?thesis .
+qed
+
+end
+
+class division_ring = ring_1 + inverse +
+ assumes left_inverse [simp]: "a \<noteq> 0 \<Longrightarrow> inverse a * a = 1"
+ assumes right_inverse [simp]: "a \<noteq> 0 \<Longrightarrow> a * inverse a = 1"
+begin
+
+subclass ring_1_no_zero_divisors
+proof
+ fix a b :: 'a
+ assume a: "a \<noteq> 0" and b: "b \<noteq> 0"
+ show "a * b \<noteq> 0"
+ proof
+ assume ab: "a * b = 0"
+ hence "0 = inverse a * (a * b) * inverse b" by simp
+ also have "\<dots> = (inverse a * a) * (b * inverse b)"
+ by (simp only: mult_assoc)
+ also have "\<dots> = 1" using a b by simp
+ finally show False by simp
+ qed
+qed
+
+lemma nonzero_imp_inverse_nonzero:
+ "a \<noteq> 0 \<Longrightarrow> inverse a \<noteq> 0"
+proof
+ assume ianz: "inverse a = 0"
+ assume "a \<noteq> 0"
+ hence "1 = a * inverse a" by simp
+ also have "... = 0" by (simp add: ianz)
+ finally have "1 = 0" .
+ thus False by (simp add: eq_commute)
+qed
+
+lemma inverse_zero_imp_zero:
+ "inverse a = 0 \<Longrightarrow> a = 0"
+apply (rule classical)
+apply (drule nonzero_imp_inverse_nonzero)
+apply auto
+done
+
+lemma inverse_unique:
+ assumes ab: "a * b = 1"
+ shows "inverse a = b"
+proof -
+ have "a \<noteq> 0" using ab by (cases "a = 0") simp_all
+ moreover have "inverse a * (a * b) = inverse a" by (simp add: ab)
+ ultimately show ?thesis by (simp add: mult_assoc [symmetric])
+qed
+
+lemma nonzero_inverse_minus_eq:
+ "a \<noteq> 0 \<Longrightarrow> inverse (- a) = - inverse a"
+by (rule inverse_unique) simp
+
+lemma nonzero_inverse_inverse_eq:
+ "a \<noteq> 0 \<Longrightarrow> inverse (inverse a) = a"
+by (rule inverse_unique) simp
+
+lemma nonzero_inverse_eq_imp_eq:
+ assumes "inverse a = inverse b" and "a \<noteq> 0" and "b \<noteq> 0"
+ shows "a = b"
+proof -
+ from `inverse a = inverse b`
+ have "inverse (inverse a) = inverse (inverse b)" by (rule arg_cong)
+ with `a \<noteq> 0` and `b \<noteq> 0` show "a = b"
+ by (simp add: nonzero_inverse_inverse_eq)
+qed
+
+lemma inverse_1 [simp]: "inverse 1 = 1"
+by (rule inverse_unique) simp
+
+lemma nonzero_inverse_mult_distrib:
+ assumes "a \<noteq> 0" and "b \<noteq> 0"
+ shows "inverse (a * b) = inverse b * inverse a"
+proof -
+ have "a * (b * inverse b) * inverse a = 1" using assms by simp
+ hence "a * b * (inverse b * inverse a) = 1" by (simp only: mult_assoc)
+ thus ?thesis by (rule inverse_unique)
+qed
+
+lemma division_ring_inverse_add:
+ "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a + inverse b = inverse a * (a + b) * inverse b"
+by (simp add: algebra_simps)
+
+lemma division_ring_inverse_diff:
+ "a \<noteq> 0 \<Longrightarrow> b \<noteq> 0 \<Longrightarrow> inverse a - inverse b = inverse a * (b - a) * inverse b"
+by (simp add: algebra_simps)
+
+end
+
+class mult_mono = times + zero + ord +
+ assumes mult_left_mono: "a \<le> b \<Longrightarrow> 0 \<le> c \<Longrightarrow> c * a \<le> c * b"
+ assumes mult_right_mono: "a \<le> b \<Longrightarrow> 0 \<le> c \<Longrightarrow> a * c \<le> b * c"
+
+class ordered_semiring = mult_mono + semiring_0 + ordered_ab_semigroup_add
+begin
+
+lemma mult_mono:
+ "a \<le> b \<Longrightarrow> c \<le> d \<Longrightarrow> 0 \<le> b \<Longrightarrow> 0 \<le> c
+ \<Longrightarrow> a * c \<le> b * d"
+apply (erule mult_right_mono [THEN order_trans], assumption)
+apply (erule mult_left_mono, assumption)
+done
+
+lemma mult_mono':
+ "a \<le> b \<Longrightarrow> c \<le> d \<Longrightarrow> 0 \<le> a \<Longrightarrow> 0 \<le> c
+ \<Longrightarrow> a * c \<le> b * d"
+apply (rule mult_mono)
+apply (fast intro: order_trans)+
+done
+
+end
+
+class ordered_cancel_semiring = mult_mono + ordered_ab_semigroup_add
+ + semiring + cancel_comm_monoid_add
+begin
+
+subclass semiring_0_cancel ..
+subclass ordered_semiring ..
+
+lemma mult_nonneg_nonneg: "0 \<le> a \<Longrightarrow> 0 \<le> b \<Longrightarrow> 0 \<le> a * b"
+using mult_left_mono [of zero b a] by simp
+
+lemma mult_nonneg_nonpos: "0 \<le> a \<Longrightarrow> b \<le> 0 \<Longrightarrow> a * b \<le> 0"
+using mult_left_mono [of b zero a] by simp
+
+lemma mult_nonpos_nonneg: "a \<le> 0 \<Longrightarrow> 0 \<le> b \<Longrightarrow> a * b \<le> 0"
+using mult_right_mono [of a zero b] by simp
+
+text {* Legacy - use @{text mult_nonpos_nonneg} *}
+lemma mult_nonneg_nonpos2: "0 \<le> a \<Longrightarrow> b \<le> 0 \<Longrightarrow> b * a \<le> 0"
+by (drule mult_right_mono [of b zero], auto)
+
+lemma split_mult_neg_le: "(0 \<le> a & b \<le> 0) | (a \<le> 0 & 0 \<le> b) \<Longrightarrow> a * b \<le> 0"
+by (auto simp add: mult_nonneg_nonpos mult_nonneg_nonpos2)
+
+end
+
+class linordered_semiring = semiring + comm_monoid_add + linordered_cancel_ab_semigroup_add + mult_mono
+begin
+
+subclass ordered_cancel_semiring ..
+
+subclass ordered_comm_monoid_add ..
+
+lemma mult_left_less_imp_less:
+ "c * a < c * b \<Longrightarrow> 0 \<le> c \<Longrightarrow> a < b"
+by (force simp add: mult_left_mono not_le [symmetric])
+
+lemma mult_right_less_imp_less:
+ "a * c < b * c \<Longrightarrow> 0 \<le> c \<Longrightarrow> a < b"
+by (force simp add: mult_right_mono not_le [symmetric])
+
+end
+
+class linordered_semiring_1 = linordered_semiring + semiring_1
+
+class linordered_semiring_strict = semiring + comm_monoid_add + linordered_cancel_ab_semigroup_add +
+ assumes mult_strict_left_mono: "a < b \<Longrightarrow> 0 < c \<Longrightarrow> c * a < c * b"
+ assumes mult_strict_right_mono: "a < b \<Longrightarrow> 0 < c \<Longrightarrow> a * c < b * c"
+begin
+
+subclass semiring_0_cancel ..
+
+subclass linordered_semiring
+proof
+ fix a b c :: 'a
+ assume A: "a \<le> b" "0 \<le> c"
+ from A show "c * a \<le> c * b"
+ unfolding le_less
+ using mult_strict_left_mono by (cases "c = 0") auto
+ from A show "a * c \<le> b * c"
+ unfolding le_less
+ using mult_strict_right_mono by (cases "c = 0") auto
+qed
+
+lemma mult_left_le_imp_le:
+ "c * a \<le> c * b \<Longrightarrow> 0 < c \<Longrightarrow> a \<le> b"
+by (force simp add: mult_strict_left_mono _not_less [symmetric])
+
+lemma mult_right_le_imp_le:
+ "a * c \<le> b * c \<Longrightarrow> 0 < c \<Longrightarrow> a \<le> b"
+by (force simp add: mult_strict_right_mono not_less [symmetric])
+
+lemma mult_pos_pos: "0 < a \<Longrightarrow> 0 < b \<Longrightarrow> 0 < a * b"
+using mult_strict_left_mono [of zero b a] by simp
+
+lemma mult_pos_neg: "0 < a \<Longrightarrow> b < 0 \<Longrightarrow> a * b < 0"
+using mult_strict_left_mono [of b zero a] by simp
+
+lemma mult_neg_pos: "a < 0 \<Longrightarrow> 0 < b \<Longrightarrow> a * b < 0"
+using mult_strict_right_mono [of a zero b] by simp
+
+text {* Legacy - use @{text mult_neg_pos} *}
+lemma mult_pos_neg2: "0 < a \<Longrightarrow> b < 0 \<Longrightarrow> b * a < 0"
+by (drule mult_strict_right_mono [of b zero], auto)
+
+lemma zero_less_mult_pos:
+ "0 < a * b \<Longrightarrow> 0 < a \<Longrightarrow> 0 < b"
+apply (cases "b\<le>0")
+ apply (auto simp add: le_less not_less)
+apply (drule_tac mult_pos_neg [of a b])
+ apply (auto dest: less_not_sym)
+done
+
+lemma zero_less_mult_pos2:
+ "0 < b * a \<Longrightarrow> 0 < a \<Longrightarrow> 0 < b"
+apply (cases "b\<le>0")
+ apply (auto simp add: le_less not_less)
+apply (drule_tac mult_pos_neg2 [of a b])
+ apply (auto dest: less_not_sym)
+done
+
+text{*Strict monotonicity in both arguments*}
+lemma mult_strict_mono:
+ assumes "a < b" and "c < d" and "0 < b" and "0 \<le> c"
+ shows "a * c < b * d"
+ using assms apply (cases "c=0")
+ apply (simp add: mult_pos_pos)
+ apply (erule mult_strict_right_mono [THEN less_trans])
+ apply (force simp add: le_less)
+ apply (erule mult_strict_left_mono, assumption)
+ done
+
+text{*This weaker variant has more natural premises*}
+lemma mult_strict_mono':
+ assumes "a < b" and "c < d" and "0 \<le> a" and "0 \<le> c"
+ shows "a * c < b * d"
+by (rule mult_strict_mono) (insert assms, auto)
+
+lemma mult_less_le_imp_less:
+ assumes "a < b" and "c \<le> d" and "0 \<le> a" and "0 < c"
+ shows "a * c < b * d"
+ using assms apply (subgoal_tac "a * c < b * c")
+ apply (erule less_le_trans)
+ apply (erule mult_left_mono)
+ apply simp
+ apply (erule mult_strict_right_mono)
+ apply assumption
+ done
+
+lemma mult_le_less_imp_less:
+ assumes "a \<le> b" and "c < d" and "0 < a" and "0 \<le> c"
+ shows "a * c < b * d"
+ using assms apply (subgoal_tac "a * c \<le> b * c")
+ apply (erule le_less_trans)
+ apply (erule mult_strict_left_mono)
+ apply simp
+ apply (erule mult_right_mono)
+ apply simp
+ done
+
+lemma mult_less_imp_less_left:
+ assumes less: "c * a < c * b" and nonneg: "0 \<le> c"
+ shows "a < b"
+proof (rule ccontr)
+ assume "\<not> a < b"
+ hence "b \<le> a" by (simp add: linorder_not_less)
+ hence "c * b \<le> c * a" using nonneg by (rule mult_left_mono)
+ with this and less show False by (simp add: not_less [symmetric])
+qed
+
+lemma mult_less_imp_less_right:
+ assumes less: "a * c < b * c" and nonneg: "0 \<le> c"
+ shows "a < b"
+proof (rule ccontr)
+ assume "\<not> a < b"
+ hence "b \<le> a" by (simp add: linorder_not_less)
+ hence "b * c \<le> a * c" using nonneg by (rule mult_right_mono)
+ with this and less show False by (simp add: not_less [symmetric])
+qed
+
+end
+
+class linlinordered_semiring_1_strict = linordered_semiring_strict + semiring_1
+
+class mult_mono1 = times + zero + ord +
+ assumes mult_mono1: "a \<le> b \<Longrightarrow> 0 \<le> c \<Longrightarrow> c * a \<le> c * b"
+
+class ordered_comm_semiring = comm_semiring_0
+ + ordered_ab_semigroup_add + mult_mono1
+begin
+
+subclass ordered_semiring
+proof
+ fix a b c :: 'a
+ assume "a \<le> b" "0 \<le> c"
+ thus "c * a \<le> c * b" by (rule mult_mono1)
+ thus "a * c \<le> b * c" by (simp only: mult_commute)
+qed
+
+end
+
+class ordered_cancel_comm_semiring = comm_semiring_0_cancel
+ + ordered_ab_semigroup_add + mult_mono1
+begin
+
+subclass ordered_comm_semiring ..
+subclass ordered_cancel_semiring ..
+
+end
+
+class linordered_comm_semiring_strict = comm_semiring_0 + linordered_cancel_ab_semigroup_add +
+ assumes mult_strict_left_mono_comm: "a < b \<Longrightarrow> 0 < c \<Longrightarrow> c * a < c * b"
+begin
+
+subclass linordered_semiring_strict
+proof
+ fix a b c :: 'a
+ assume "a < b" "0 < c"
+ thus "c * a < c * b" by (rule mult_strict_left_mono_comm)
+ thus "a * c < b * c" by (simp only: mult_commute)
+qed
+
+subclass ordered_cancel_comm_semiring
+proof
+ fix a b c :: 'a
+ assume "a \<le> b" "0 \<le> c"
+ thus "c * a \<le> c * b"
+ unfolding le_less
+ using mult_strict_left_mono by (cases "c = 0") auto
+qed
+
+end
+
+class ordered_ring = ring + ordered_cancel_semiring
+begin
+
+subclass ordered_ab_group_add ..
+
+text{*Legacy - use @{text algebra_simps} *}
+lemmas ring_simps[noatp] = algebra_simps
+
+lemma less_add_iff1:
+ "a * e + c < b * e + d \<longleftrightarrow> (a - b) * e + c < d"
+by (simp add: algebra_simps)
+
+lemma less_add_iff2:
+ "a * e + c < b * e + d \<longleftrightarrow> c < (b - a) * e + d"
+by (simp add: algebra_simps)
+
+lemma le_add_iff1:
+ "a * e + c \<le> b * e + d \<longleftrightarrow> (a - b) * e + c \<le> d"
+by (simp add: algebra_simps)
+
+lemma le_add_iff2:
+ "a * e + c \<le> b * e + d \<longleftrightarrow> c \<le> (b - a) * e + d"
+by (simp add: algebra_simps)
+
+lemma mult_left_mono_neg:
+ "b \<le> a \<Longrightarrow> c \<le> 0 \<Longrightarrow> c * a \<le> c * b"
+ apply (drule mult_left_mono [of _ _ "uminus c"])
+ apply (simp_all add: minus_mult_left [symmetric])
+ done
+
+lemma mult_right_mono_neg:
+ "b \<le> a \<Longrightarrow> c \<le> 0 \<Longrightarrow> a * c \<le> b * c"
+ apply (drule mult_right_mono [of _ _ "uminus c"])
+ apply (simp_all add: minus_mult_right [symmetric])
+ done
+
+lemma mult_nonpos_nonpos: "a \<le> 0 \<Longrightarrow> b \<le> 0 \<Longrightarrow> 0 \<le> a * b"
+using mult_right_mono_neg [of a zero b] by simp
+
+lemma split_mult_pos_le:
+ "(0 \<le> a \<and> 0 \<le> b) \<or> (a \<le> 0 \<and> b \<le> 0) \<Longrightarrow> 0 \<le> a * b"
+by (auto simp add: mult_nonneg_nonneg mult_nonpos_nonpos)
+
+end
+
+class abs_if = minus + uminus + ord + zero + abs +
+ assumes abs_if: "\<bar>a\<bar> = (if a < 0 then - a else a)"
+
+class sgn_if = minus + uminus + zero + one + ord + sgn +
+ assumes sgn_if: "sgn x = (if x = 0 then 0 else if 0 < x then 1 else - 1)"
+
+lemma (in sgn_if) sgn0[simp]: "sgn 0 = 0"
+by(simp add:sgn_if)
+
+class linordered_ring = ring + linordered_semiring + linordered_ab_group_add + abs_if
+begin
+
+subclass ordered_ring ..
+
+subclass ordered_ab_group_add_abs
+proof
+ fix a b
+ show "\<bar>a + b\<bar> \<le> \<bar>a\<bar> + \<bar>b\<bar>"
+ by (auto simp add: abs_if not_less neg_less_eq_nonneg less_eq_neg_nonpos)
+ (auto simp del: minus_add_distrib simp add: minus_add_distrib [symmetric]
+ neg_less_eq_nonneg less_eq_neg_nonpos, auto intro: add_nonneg_nonneg,
+ auto intro!: less_imp_le add_neg_neg)
+qed (auto simp add: abs_if less_eq_neg_nonpos neg_equal_zero)
+
+end
+
+(* The "strict" suffix can be seen as describing the combination of linordered_ring and no_zero_divisors.
+ Basically, linordered_ring + no_zero_divisors = linordered_ring_strict.
+ *)
+class linordered_ring_strict = ring + linordered_semiring_strict
+ + ordered_ab_group_add + abs_if
+begin
+
+subclass linordered_ring ..
+
+lemma mult_strict_left_mono_neg: "b < a \<Longrightarrow> c < 0 \<Longrightarrow> c * a < c * b"
+using mult_strict_left_mono [of b a "- c"] by simp
+
+lemma mult_strict_right_mono_neg: "b < a \<Longrightarrow> c < 0 \<Longrightarrow> a * c < b * c"
+using mult_strict_right_mono [of b a "- c"] by simp
+
+lemma mult_neg_neg: "a < 0 \<Longrightarrow> b < 0 \<Longrightarrow> 0 < a * b"
+using mult_strict_right_mono_neg [of a zero b] by simp
+
+subclass ring_no_zero_divisors
+proof
+ fix a b
+ assume "a \<noteq> 0" then have A: "a < 0 \<or> 0 < a" by (simp add: neq_iff)
+ assume "b \<noteq> 0" then have B: "b < 0 \<or> 0 < b" by (simp add: neq_iff)
+ have "a * b < 0 \<or> 0 < a * b"
+ proof (cases "a < 0")
+ case True note A' = this
+ show ?thesis proof (cases "b < 0")
+ case True with A'
+ show ?thesis by (auto dest: mult_neg_neg)
+ next
+ case False with B have "0 < b" by auto
+ with A' show ?thesis by (auto dest: mult_strict_right_mono)
+ qed
+ next
+ case False with A have A': "0 < a" by auto
+ show ?thesis proof (cases "b < 0")
+ case True with A'
+ show ?thesis by (auto dest: mult_strict_right_mono_neg)
+ next
+ case False with B have "0 < b" by auto
+ with A' show ?thesis by (auto dest: mult_pos_pos)
+ qed
+ qed
+ then show "a * b \<noteq> 0" by (simp add: neq_iff)
+qed
+
+lemma zero_less_mult_iff:
+ "0 < a * b \<longleftrightarrow> 0 < a \<and> 0 < b \<or> a < 0 \<and> b < 0"
+ apply (auto simp add: mult_pos_pos mult_neg_neg)
+ apply (simp_all add: not_less le_less)
+ apply (erule disjE) apply assumption defer
+ apply (erule disjE) defer apply (drule sym) apply simp
+ apply (erule disjE) defer apply (drule sym) apply simp
+ apply (erule disjE) apply assumption apply (drule sym) apply simp
+ apply (drule sym) apply simp
+ apply (blast dest: zero_less_mult_pos)
+ apply (blast dest: zero_less_mult_pos2)
+ done
+
+lemma zero_le_mult_iff:
+ "0 \<le> a * b \<longleftrightarrow> 0 \<le> a \<and> 0 \<le> b \<or> a \<le> 0 \<and> b \<le> 0"
+by (auto simp add: eq_commute [of 0] le_less not_less zero_less_mult_iff)
+
+lemma mult_less_0_iff:
+ "a * b < 0 \<longleftrightarrow> 0 < a \<and> b < 0 \<or> a < 0 \<and> 0 < b"
+ apply (insert zero_less_mult_iff [of "-a" b])
+ apply (force simp add: minus_mult_left[symmetric])
+ done
+
+lemma mult_le_0_iff:
+ "a * b \<le> 0 \<longleftrightarrow> 0 \<le> a \<and> b \<le> 0 \<or> a \<le> 0 \<and> 0 \<le> b"
+ apply (insert zero_le_mult_iff [of "-a" b])
+ apply (force simp add: minus_mult_left[symmetric])
+ done
+
+lemma zero_le_square [simp]: "0 \<le> a * a"
+by (simp add: zero_le_mult_iff linear)
+
+lemma not_square_less_zero [simp]: "\<not> (a * a < 0)"
+by (simp add: not_less)
+
+text{*Cancellation laws for @{term "c*a < c*b"} and @{term "a*c < b*c"},
+ also with the relations @{text "\<le>"} and equality.*}
+
+text{*These ``disjunction'' versions produce two cases when the comparison is
+ an assumption, but effectively four when the comparison is a goal.*}
+
+lemma mult_less_cancel_right_disj:
+ "a * c < b * c \<longleftrightarrow> 0 < c \<and> a < b \<or> c < 0 \<and> b < a"
+ apply (cases "c = 0")
+ apply (auto simp add: neq_iff mult_strict_right_mono
+ mult_strict_right_mono_neg)
+ apply (auto simp add: not_less
+ not_le [symmetric, of "a*c"]
+ not_le [symmetric, of a])
+ apply (erule_tac [!] notE)
+ apply (auto simp add: less_imp_le mult_right_mono
+ mult_right_mono_neg)
+ done
+
+lemma mult_less_cancel_left_disj:
+ "c * a < c * b \<longleftrightarrow> 0 < c \<and> a < b \<or> c < 0 \<and> b < a"
+ apply (cases "c = 0")
+ apply (auto simp add: neq_iff mult_strict_left_mono
+ mult_strict_left_mono_neg)
+ apply (auto simp add: not_less
+ not_le [symmetric, of "c*a"]
+ not_le [symmetric, of a])
+ apply (erule_tac [!] notE)
+ apply (auto simp add: less_imp_le mult_left_mono
+ mult_left_mono_neg)
+ done
+
+text{*The ``conjunction of implication'' lemmas produce two cases when the
+comparison is a goal, but give four when the comparison is an assumption.*}
+
+lemma mult_less_cancel_right:
+ "a * c < b * c \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < b) \<and> (c \<le> 0 \<longrightarrow> b < a)"
+ using mult_less_cancel_right_disj [of a c b] by auto
+
+lemma mult_less_cancel_left:
+ "c * a < c * b \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < b) \<and> (c \<le> 0 \<longrightarrow> b < a)"
+ using mult_less_cancel_left_disj [of c a b] by auto
+
+lemma mult_le_cancel_right:
+ "a * c \<le> b * c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> a)"
+by (simp add: not_less [symmetric] mult_less_cancel_right_disj)
+
+lemma mult_le_cancel_left:
+ "c * a \<le> c * b \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> a)"
+by (simp add: not_less [symmetric] mult_less_cancel_left_disj)
+
+lemma mult_le_cancel_left_pos:
+ "0 < c \<Longrightarrow> c * a \<le> c * b \<longleftrightarrow> a \<le> b"
+by (auto simp: mult_le_cancel_left)
+
+lemma mult_le_cancel_left_neg:
+ "c < 0 \<Longrightarrow> c * a \<le> c * b \<longleftrightarrow> b \<le> a"
+by (auto simp: mult_le_cancel_left)
+
+lemma mult_less_cancel_left_pos:
+ "0 < c \<Longrightarrow> c * a < c * b \<longleftrightarrow> a < b"
+by (auto simp: mult_less_cancel_left)
+
+lemma mult_less_cancel_left_neg:
+ "c < 0 \<Longrightarrow> c * a < c * b \<longleftrightarrow> b < a"
+by (auto simp: mult_less_cancel_left)
+
+end
+
+text{*Legacy - use @{text algebra_simps} *}
+lemmas ring_simps[noatp] = algebra_simps
+
+lemmas mult_sign_intros =
+ mult_nonneg_nonneg mult_nonneg_nonpos
+ mult_nonpos_nonneg mult_nonpos_nonpos
+ mult_pos_pos mult_pos_neg
+ mult_neg_pos mult_neg_neg
+
+class ordered_comm_ring = comm_ring + ordered_comm_semiring
+begin
+
+subclass ordered_ring ..
+subclass ordered_cancel_comm_semiring ..
+
+end
+
+class linordered_semidom = comm_semiring_1_cancel + linordered_comm_semiring_strict +
+ (*previously linordered_semiring*)
+ assumes zero_less_one [simp]: "0 < 1"
+begin
+
+lemma pos_add_strict:
+ shows "0 < a \<Longrightarrow> b < c \<Longrightarrow> b < a + c"
+ using add_strict_mono [of zero a b c] by simp
+
+lemma zero_le_one [simp]: "0 \<le> 1"
+by (rule zero_less_one [THEN less_imp_le])
+
+lemma not_one_le_zero [simp]: "\<not> 1 \<le> 0"
+by (simp add: not_le)
+
+lemma not_one_less_zero [simp]: "\<not> 1 < 0"
+by (simp add: not_less)
+
+lemma less_1_mult:
+ assumes "1 < m" and "1 < n"
+ shows "1 < m * n"
+ using assms mult_strict_mono [of 1 m 1 n]
+ by (simp add: less_trans [OF zero_less_one])
+
+end
+
+class linordered_idom = comm_ring_1 +
+ linordered_comm_semiring_strict + ordered_ab_group_add +
+ abs_if + sgn_if
+ (*previously linordered_ring*)
+begin
+
+subclass linordered_ring_strict ..
+subclass ordered_comm_ring ..
+subclass idom ..
+
+subclass linordered_semidom
+proof
+ have "0 \<le> 1 * 1" by (rule zero_le_square)
+ thus "0 < 1" by (simp add: le_less)
+qed
+
+lemma linorder_neqE_linordered_idom:
+ assumes "x \<noteq> y" obtains "x < y" | "y < x"
+ using assms by (rule neqE)
+
+text {* These cancellation simprules also produce two cases when the comparison is a goal. *}
+
+lemma mult_le_cancel_right1:
+ "c \<le> b * c \<longleftrightarrow> (0 < c \<longrightarrow> 1 \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> 1)"
+by (insert mult_le_cancel_right [of 1 c b], simp)
+
+lemma mult_le_cancel_right2:
+ "a * c \<le> c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> 1) \<and> (c < 0 \<longrightarrow> 1 \<le> a)"
+by (insert mult_le_cancel_right [of a c 1], simp)
+
+lemma mult_le_cancel_left1:
+ "c \<le> c * b \<longleftrightarrow> (0 < c \<longrightarrow> 1 \<le> b) \<and> (c < 0 \<longrightarrow> b \<le> 1)"
+by (insert mult_le_cancel_left [of c 1 b], simp)
+
+lemma mult_le_cancel_left2:
+ "c * a \<le> c \<longleftrightarrow> (0 < c \<longrightarrow> a \<le> 1) \<and> (c < 0 \<longrightarrow> 1 \<le> a)"
+by (insert mult_le_cancel_left [of c a 1], simp)
+
+lemma mult_less_cancel_right1:
+ "c < b * c \<longleftrightarrow> (0 \<le> c \<longrightarrow> 1 < b) \<and> (c \<le> 0 \<longrightarrow> b < 1)"
+by (insert mult_less_cancel_right [of 1 c b], simp)
+
+lemma mult_less_cancel_right2:
+ "a * c < c \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < 1) \<and> (c \<le> 0 \<longrightarrow> 1 < a)"
+by (insert mult_less_cancel_right [of a c 1], simp)
+
+lemma mult_less_cancel_left1:
+ "c < c * b \<longleftrightarrow> (0 \<le> c \<longrightarrow> 1 < b) \<and> (c \<le> 0 \<longrightarrow> b < 1)"
+by (insert mult_less_cancel_left [of c 1 b], simp)
+
+lemma mult_less_cancel_left2:
+ "c * a < c \<longleftrightarrow> (0 \<le> c \<longrightarrow> a < 1) \<and> (c \<le> 0 \<longrightarrow> 1 < a)"
+by (insert mult_less_cancel_left [of c a 1], simp)
+
+lemma sgn_sgn [simp]:
+ "sgn (sgn a) = sgn a"
+unfolding sgn_if by simp
+
+lemma sgn_0_0:
+ "sgn a = 0 \<longleftrightarrow> a = 0"
+unfolding sgn_if by simp
+
+lemma sgn_1_pos:
+ "sgn a = 1 \<longleftrightarrow> a > 0"
+unfolding sgn_if by (simp add: neg_equal_zero)
+
+lemma sgn_1_neg:
+ "sgn a = - 1 \<longleftrightarrow> a < 0"
+unfolding sgn_if by (auto simp add: equal_neg_zero)
+
+lemma sgn_pos [simp]:
+ "0 < a \<Longrightarrow> sgn a = 1"
+unfolding sgn_1_pos .
+
+lemma sgn_neg [simp]:
+ "a < 0 \<Longrightarrow> sgn a = - 1"
+unfolding sgn_1_neg .
+
+lemma sgn_times:
+ "sgn (a * b) = sgn a * sgn b"
+by (auto simp add: sgn_if zero_less_mult_iff)
+
+lemma abs_sgn: "abs k = k * sgn k"
+unfolding sgn_if abs_if by auto
+
+lemma sgn_greater [simp]:
+ "0 < sgn a \<longleftrightarrow> 0 < a"
+ unfolding sgn_if by auto
+
+lemma sgn_less [simp]:
+ "sgn a < 0 \<longleftrightarrow> a < 0"
+ unfolding sgn_if by auto
+
+lemma abs_dvd_iff [simp]: "(abs m) dvd k \<longleftrightarrow> m dvd k"
+ by (simp add: abs_if)
+
+lemma dvd_abs_iff [simp]: "m dvd (abs k) \<longleftrightarrow> m dvd k"
+ by (simp add: abs_if)
+
+lemma dvd_if_abs_eq:
+ "abs l = abs (k) \<Longrightarrow> l dvd k"
+by(subst abs_dvd_iff[symmetric]) simp
+
+end
+
+text {* Simprules for comparisons where common factors can be cancelled. *}
+
+lemmas mult_compare_simps[noatp] =
+ mult_le_cancel_right mult_le_cancel_left
+ mult_le_cancel_right1 mult_le_cancel_right2
+ mult_le_cancel_left1 mult_le_cancel_left2
+ mult_less_cancel_right mult_less_cancel_left
+ mult_less_cancel_right1 mult_less_cancel_right2
+ mult_less_cancel_left1 mult_less_cancel_left2
+ mult_cancel_right mult_cancel_left
+ mult_cancel_right1 mult_cancel_right2
+ mult_cancel_left1 mult_cancel_left2
+
+-- {* FIXME continue localization here *}
+
+subsection {* Reasoning about inequalities with division *}
+
+lemma mult_right_le_one_le: "0 <= (x::'a::linordered_idom) ==> 0 <= y ==> y <= 1
+ ==> x * y <= x"
+by (auto simp add: mult_compare_simps)
+
+lemma mult_left_le_one_le: "0 <= (x::'a::linordered_idom) ==> 0 <= y ==> y <= 1
+ ==> y * x <= x"
+by (auto simp add: mult_compare_simps)
+
+context linordered_semidom
+begin
+
+lemma less_add_one: "a < a + 1"
+proof -
+ have "a + 0 < a + 1"
+ by (blast intro: zero_less_one add_strict_left_mono)
+ thus ?thesis by simp
+qed
+
+lemma zero_less_two: "0 < 1 + 1"
+by (blast intro: less_trans zero_less_one less_add_one)
+
+end
+
+
+subsection {* Absolute Value *}
+
+context linordered_idom
+begin
+
+lemma mult_sgn_abs: "sgn x * abs x = x"
+ unfolding abs_if sgn_if by auto
+
+end
+
+lemma abs_one [simp]: "abs 1 = (1::'a::linordered_idom)"
+by (simp add: abs_if zero_less_one [THEN order_less_not_sym])
+
+class ordered_ring_abs = ordered_ring + ordered_ab_group_add_abs +
+ assumes abs_eq_mult:
+ "(0 \<le> a \<or> a \<le> 0) \<and> (0 \<le> b \<or> b \<le> 0) \<Longrightarrow> \<bar>a * b\<bar> = \<bar>a\<bar> * \<bar>b\<bar>"
+
+context linordered_idom
+begin
+
+subclass ordered_ring_abs proof
+qed (auto simp add: abs_if not_less equal_neg_zero neg_equal_zero mult_less_0_iff)
+
+lemma abs_mult:
+ "abs (a * b) = abs a * abs b"
+ by (rule abs_eq_mult) auto
+
+lemma abs_mult_self:
+ "abs a * abs a = a * a"
+ by (simp add: abs_if)
+
+end
+
+lemma abs_mult_less:
+ "[| abs a < c; abs b < d |] ==> abs a * abs b < c*(d::'a::linordered_idom)"
+proof -
+ assume ac: "abs a < c"
+ hence cpos: "0<c" by (blast intro: order_le_less_trans abs_ge_zero)
+ assume "abs b < d"
+ thus ?thesis by (simp add: ac cpos mult_strict_mono)
+qed
+
+lemmas eq_minus_self_iff[noatp] = equal_neg_zero
+
+lemma less_minus_self_iff: "(a < -a) = (a < (0::'a::linordered_idom))"
+ unfolding order_less_le less_eq_neg_nonpos equal_neg_zero ..
+
+lemma abs_less_iff: "(abs a < b) = (a < b & -a < (b::'a::linordered_idom))"
+apply (simp add: order_less_le abs_le_iff)
+apply (auto simp add: abs_if neg_less_eq_nonneg less_eq_neg_nonpos)
+done
+
+lemma abs_mult_pos: "(0::'a::linordered_idom) <= x ==>
+ (abs y) * x = abs (y * x)"
+ apply (subst abs_mult)
+ apply simp
+done
+
+code_modulename SML
+ Rings Arith
+
+code_modulename OCaml
+ Rings Arith
+
+code_modulename Haskell
+ Rings Arith
+
+end
--- a/src/HOL/SMT/Examples/SMT_Examples.certs Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/SMT/Examples/SMT_Examples.certs Tue Feb 09 17:06:05 2010 +0100
@@ -6677,15 +6677,25 @@
[mp #29 #65]: false
unsat
-rOkYPs+Q++z/M3OPR/88JQ 1272
+rOkYPs+Q++z/M3OPR/88JQ 1654
#2 := false
#55 := 0::int
#7 := 2::int
decl uf_1 :: int
#4 := uf_1
#8 := (mod uf_1 2::int)
-#58 := (>= #8 0::int)
-#61 := (not #58)
+#67 := (>= #8 0::int)
+#1 := true
+#156 := (iff #67 true)
+#158 := (iff #156 #67)
+#159 := [rewrite]: #158
+#28 := [true-axiom]: true
+#153 := (or false #67)
+#154 := [th-lemma]: #153
+#155 := [unit-resolution #154 #28]: #67
+#157 := [iff-true #155]: #156
+#160 := [mp #157 #159]: #67
+#70 := (not #67)
#5 := 1::int
#9 := (* 2::int #8)
#10 := (+ #9 1::int)
@@ -6693,24 +6703,35 @@
#6 := (+ uf_1 1::int)
#12 := (<= #6 #11)
#13 := (not #12)
-#66 := (iff #13 #61)
+#77 := (iff #13 #70)
#39 := (+ uf_1 #9)
#40 := (+ 1::int #39)
#30 := (+ 1::int uf_1)
#45 := (<= #30 #40)
#48 := (not #45)
-#64 := (iff #48 #61)
+#75 := (iff #48 #70)
+#58 := (* 1::int #8)
+#59 := (>= #58 0::int)
+#62 := (not #59)
+#71 := (iff #62 #70)
+#68 := (iff #59 #67)
+#65 := (= #58 #8)
+#66 := [rewrite]: #65
+#69 := [monotonicity #66]: #68
+#72 := [monotonicity #69]: #71
+#73 := (iff #48 #62)
#56 := (>= #9 0::int)
#51 := (not #56)
-#62 := (iff #51 #61)
-#59 := (iff #56 #58)
-#60 := [rewrite]: #59
-#63 := [monotonicity #60]: #62
+#63 := (iff #51 #62)
+#60 := (iff #56 #59)
+#61 := [rewrite]: #60
+#64 := [monotonicity #61]: #63
#52 := (iff #48 #51)
#53 := (iff #45 #56)
#54 := [rewrite]: #53
#57 := [monotonicity #54]: #52
-#65 := [trans #57 #63]: #64
+#74 := [trans #57 #64]: #73
+#76 := [trans #74 #72]: #75
#49 := (iff #13 #48)
#46 := (iff #12 #45)
#43 := (= #11 #40)
@@ -6727,40 +6748,38 @@
#32 := [rewrite]: #31
#47 := [monotonicity #32 #44]: #46
#50 := [monotonicity #47]: #49
-#67 := [trans #50 #65]: #66
+#78 := [trans #50 #76]: #77
#29 := [asserted]: #13
-#68 := [mp #29 #67]: #61
-#1 := true
-#28 := [true-axiom]: true
-#142 := (or false #58)
-#143 := [th-lemma]: #142
-#144 := [unit-resolution #143 #28]: #58
-[unit-resolution #144 #68]: false
-unsat
-
-oSBTeiF6GKDeHPsMxXHXtQ 1161
+#79 := [mp #29 #78]: #70
+[unit-resolution #79 #160]: false
+unsat
+
+oSBTeiF6GKDeHPsMxXHXtQ 1064
#2 := false
#5 := 2::int
decl uf_1 :: int
#4 := uf_1
#6 := (mod uf_1 2::int)
-#55 := (>= #6 2::int)
+#122 := (>= #6 2::int)
+#123 := (not #122)
+#1 := true
+#27 := [true-axiom]: true
+#133 := (or false #123)
+#134 := [th-lemma]: #133
+#135 := [unit-resolution #134 #27]: #123
#9 := 3::int
+#29 := (* 2::int #6)
+#48 := (>= #29 3::int)
#10 := (+ uf_1 3::int)
#7 := (+ #6 #6)
#8 := (+ uf_1 #7)
#11 := (< #8 #10)
#12 := (not #11)
-#60 := (iff #12 #55)
+#55 := (iff #12 #48)
#35 := (+ 3::int uf_1)
-#29 := (* 2::int #6)
#32 := (+ uf_1 #29)
#38 := (< #32 #35)
#41 := (not #38)
-#58 := (iff #41 #55)
-#48 := (>= #29 3::int)
-#56 := (iff #48 #55)
-#57 := [rewrite]: #56
#53 := (iff #41 #48)
#46 := (not #48)
#45 := (not #46)
@@ -6771,7 +6790,6 @@
#44 := [rewrite]: #47
#50 := [monotonicity #44]: #49
#54 := [trans #50 #52]: #53
-#59 := [trans #54 #57]: #58
#42 := (iff #12 #41)
#39 := (iff #11 #38)
#36 := (= #10 #35)
@@ -6782,16 +6800,10 @@
#34 := [monotonicity #31]: #33
#40 := [monotonicity #34 #37]: #39
#43 := [monotonicity #40]: #42
-#61 := [trans #43 #59]: #60
+#56 := [trans #43 #54]: #55
#28 := [asserted]: #12
-#62 := [mp #28 #61]: #55
-#127 := (not #55)
-#1 := true
-#27 := [true-axiom]: true
-#137 := (or false #127)
-#138 := [th-lemma]: #137
-#139 := [unit-resolution #138 #27]: #127
-[unit-resolution #139 #62]: false
+#57 := [mp #28 #56]: #48
+[th-lemma #57 #135]: false
unsat
hqH9yBHvmZgES3pBkvsqVQ 2715
@@ -7344,52 +7356,99 @@
[mp #30 #96]: false
unsat
-sHpY0IFBgZUNhP56aRB+/w 1765
+sHpY0IFBgZUNhP56aRB+/w 2968
#2 := false
+#9 := 1::int
+decl ?x1!1 :: int
+#91 := ?x1!1
+#68 := -2::int
+#129 := (* -2::int ?x1!1)
+decl ?x2!0 :: int
+#90 := ?x2!0
+#7 := 2::int
+#128 := (* 2::int ?x2!0)
+#130 := (+ #128 #129)
+#131 := (<= #130 1::int)
+#136 := (not #131)
+#55 := 0::int
+#53 := -1::int
+#115 := (* -1::int ?x1!1)
+#116 := (+ ?x2!0 #115)
+#117 := (<= #116 0::int)
+#139 := (or #117 #136)
+#142 := (not #139)
+#92 := (* -2::int ?x2!0)
+#93 := (* 2::int ?x1!1)
+#94 := (+ #93 #92)
+#95 := (>= #94 -1::int)
+#96 := (not #95)
+#97 := (* -1::int ?x2!0)
+#98 := (+ ?x1!1 #97)
+#99 := (>= #98 0::int)
+#100 := (or #99 #96)
+#101 := (not #100)
+#143 := (iff #101 #142)
+#140 := (iff #100 #139)
+#137 := (iff #96 #136)
+#134 := (iff #95 #131)
+#122 := (+ #92 #93)
+#125 := (>= #122 -1::int)
+#132 := (iff #125 #131)
+#133 := [rewrite]: #132
+#126 := (iff #95 #125)
+#123 := (= #94 #122)
+#124 := [rewrite]: #123
+#127 := [monotonicity #124]: #126
+#135 := [trans #127 #133]: #134
+#138 := [monotonicity #135]: #137
+#120 := (iff #99 #117)
+#109 := (+ #97 ?x1!1)
+#112 := (>= #109 0::int)
+#118 := (iff #112 #117)
+#119 := [rewrite]: #118
+#113 := (iff #99 #112)
+#110 := (= #98 #109)
+#111 := [rewrite]: #110
+#114 := [monotonicity #111]: #113
+#121 := [trans #114 #119]: #120
+#141 := [monotonicity #121 #138]: #140
+#144 := [monotonicity #141]: #143
#5 := (:var 0 int)
-#7 := 2::int
-#11 := (* 2::int #5)
-#9 := 1::int
+#71 := (* -2::int #5)
#4 := (:var 1 int)
#8 := (* 2::int #4)
+#72 := (+ #8 #71)
+#70 := (>= #72 -1::int)
+#69 := (not #70)
+#57 := (* -1::int #5)
+#58 := (+ #4 #57)
+#56 := (>= #58 0::int)
+#75 := (or #56 #69)
+#78 := (forall (vars (?x1 int) (?x2 int)) #75)
+#81 := (not #78)
+#102 := (~ #81 #101)
+#103 := [sk]: #102
+#11 := (* 2::int #5)
#10 := (+ #8 1::int)
#12 := (< #10 #11)
#6 := (< #4 #5)
#13 := (implies #6 #12)
#14 := (forall (vars (?x1 int) (?x2 int)) #13)
#15 := (not #14)
-#91 := (iff #15 false)
+#84 := (iff #15 #81)
#32 := (+ 1::int #8)
#35 := (< #32 #11)
#41 := (not #6)
#42 := (or #41 #35)
#47 := (forall (vars (?x1 int) (?x2 int)) #42)
#50 := (not #47)
-#89 := (iff #50 false)
-#1 := true
-#84 := (not true)
-#87 := (iff #84 false)
-#88 := [rewrite]: #87
-#85 := (iff #50 #84)
-#82 := (iff #47 true)
-#77 := (forall (vars (?x1 int) (?x2 int)) true)
-#80 := (iff #77 true)
-#81 := [elim-unused]: #80
-#78 := (iff #47 #77)
-#75 := (iff #42 true)
-#55 := 0::int
-#53 := -1::int
-#57 := (* -1::int #5)
-#58 := (+ #4 #57)
-#56 := (>= #58 0::int)
+#82 := (iff #50 #81)
+#79 := (iff #47 #78)
+#76 := (iff #42 #75)
+#73 := (iff #35 #69)
+#74 := [rewrite]: #73
+#66 := (iff #41 #56)
#54 := (not #56)
-#69 := (or #56 #54)
-#73 := (iff #69 true)
-#74 := [rewrite]: #73
-#71 := (iff #42 #69)
-#70 := (iff #35 #54)
-#68 := [rewrite]: #70
-#66 := (iff #41 #56)
#61 := (not #54)
#64 := (iff #61 #56)
#65 := [rewrite]: #64
@@ -7398,12 +7457,9 @@
#60 := [rewrite]: #59
#63 := [monotonicity #60]: #62
#67 := [trans #63 #65]: #66
-#72 := [monotonicity #67 #68]: #71
-#76 := [trans #72 #74]: #75
-#79 := [quant-intro #76]: #78
-#83 := [trans #79 #81]: #82
-#86 := [monotonicity #83]: #85
-#90 := [trans #86 #88]: #89
+#77 := [monotonicity #67 #74]: #76
+#80 := [quant-intro #77]: #79
+#83 := [monotonicity #80]: #82
#51 := (iff #15 #50)
#48 := (iff #14 #47)
#45 := (iff #13 #42)
@@ -7419,54 +7475,89 @@
#46 := [trans #40 #44]: #45
#49 := [quant-intro #46]: #48
#52 := [monotonicity #49]: #51
-#92 := [trans #52 #90]: #91
+#85 := [trans #52 #83]: #84
#31 := [asserted]: #15
-[mp #31 #92]: false
-unsat
-
-7GSX+dyn9XwHWNcjJ4X1ww 1400
+#86 := [mp #31 #85]: #81
+#106 := [mp~ #86 #103]: #101
+#107 := [mp #106 #144]: #142
+#146 := [not-or-elim #107]: #131
+#108 := (not #117)
+#145 := [not-or-elim #107]: #108
+[th-lemma #145 #146]: false
+unsat
+
+7GSX+dyn9XwHWNcjJ4X1ww 2292
#2 := false
-#9 := (:var 0 int)
+#7 := 1::int
+decl ?x1!1 :: int
+#74 := ?x1!1
+#51 := -2::int
+#96 := (* -2::int ?x1!1)
+decl ?x2!0 :: int
+#73 := ?x2!0
#4 := 2::int
-#10 := (* 2::int #9)
-#7 := 1::int
+#95 := (* 2::int ?x2!0)
+#97 := (+ #95 #96)
+#166 := (<= #97 1::int)
+#94 := (= #97 1::int)
+#53 := -1::int
+#75 := (* -2::int ?x2!0)
+#76 := (* 2::int ?x1!1)
+#77 := (+ #76 #75)
+#78 := (= #77 -1::int)
+#79 := (not #78)
+#80 := (not #79)
+#110 := (iff #80 #94)
+#102 := (not #94)
+#105 := (not #102)
+#108 := (iff #105 #94)
+#109 := [rewrite]: #108
+#106 := (iff #80 #105)
+#103 := (iff #79 #102)
+#100 := (iff #78 #94)
+#88 := (+ #75 #76)
+#91 := (= #88 -1::int)
+#98 := (iff #91 #94)
+#99 := [rewrite]: #98
+#92 := (iff #78 #91)
+#89 := (= #77 #88)
+#90 := [rewrite]: #89
+#93 := [monotonicity #90]: #92
+#101 := [trans #93 #99]: #100
+#104 := [monotonicity #101]: #103
+#107 := [monotonicity #104]: #106
+#111 := [trans #107 #109]: #110
+#9 := (:var 0 int)
+#55 := (* -2::int #9)
#5 := (:var 1 int)
#6 := (* 2::int #5)
+#56 := (+ #6 #55)
+#54 := (= #56 -1::int)
+#58 := (not #54)
+#61 := (forall (vars (?x1 int) (?x2 int)) #58)
+#64 := (not #61)
+#81 := (~ #64 #80)
+#82 := [sk]: #81
+#10 := (* 2::int #9)
#8 := (+ #6 1::int)
#11 := (= #8 #10)
#12 := (not #11)
#13 := (forall (vars (?x1 int) (?x2 int)) #12)
#14 := (not #13)
-#74 := (iff #14 false)
+#67 := (iff #14 #64)
#31 := (+ 1::int #6)
#37 := (= #10 #31)
#42 := (not #37)
#45 := (forall (vars (?x1 int) (?x2 int)) #42)
#48 := (not #45)
-#72 := (iff #48 false)
-#1 := true
-#67 := (not true)
-#70 := (iff #67 false)
-#71 := [rewrite]: #70
-#68 := (iff #48 #67)
-#65 := (iff #45 true)
-#60 := (forall (vars (?x1 int) (?x2 int)) true)
-#63 := (iff #60 true)
-#64 := [elim-unused]: #63
-#61 := (iff #45 #60)
-#58 := (iff #42 true)
-#51 := (not false)
-#56 := (iff #51 true)
-#57 := [rewrite]: #56
-#52 := (iff #42 #51)
-#53 := (iff #37 false)
-#54 := [rewrite]: #53
-#55 := [monotonicity #54]: #52
-#59 := [trans #55 #57]: #58
-#62 := [quant-intro #59]: #61
-#66 := [trans #62 #64]: #65
-#69 := [monotonicity #66]: #68
-#73 := [trans #69 #71]: #72
+#65 := (iff #48 #64)
+#62 := (iff #45 #61)
+#59 := (iff #42 #58)
+#52 := (iff #37 #54)
+#57 := [rewrite]: #52
+#60 := [monotonicity #57]: #59
+#63 := [quant-intro #60]: #62
+#66 := [monotonicity #63]: #65
#49 := (iff #14 #48)
#46 := (iff #13 #45)
#43 := (iff #12 #42)
@@ -7482,9 +7573,19 @@
#44 := [monotonicity #41]: #43
#47 := [quant-intro #44]: #46
#50 := [monotonicity #47]: #49
-#75 := [trans #50 #73]: #74
+#68 := [trans #50 #66]: #67
#30 := [asserted]: #14
-[mp #30 #75]: false
+#69 := [mp #30 #68]: #64
+#85 := [mp~ #69 #82]: #80
+#86 := [mp #85 #111]: #94
+#168 := (or #102 #166)
+#169 := [th-lemma]: #168
+#170 := [unit-resolution #169 #86]: #166
+#167 := (>= #97 1::int)
+#171 := (or #102 #167)
+#172 := [th-lemma]: #171
+#173 := [unit-resolution #172 #86]: #167
+[th-lemma #173 #170]: false
unsat
oieid3+1h5s04LTQ9d796Q 2636
@@ -7960,38 +8061,62 @@
[unit-resolution #585 #307]: false
unsat
-4Dtb5Y1TTRPWZcHG9Griog 1594
+4Dtb5Y1TTRPWZcHG9Griog 2407
#2 := false
+#104 := -1::int
+decl ?x1!1 :: int
+#86 := ?x1!1
+#106 := -4::int
+#107 := (* -4::int ?x1!1)
+decl ?x2!0 :: int
+#85 := ?x2!0
+#7 := 6::int
+#105 := (* 6::int ?x2!0)
+#108 := (+ #105 #107)
+#168 := (<= #108 -1::int)
+#109 := (= #108 -1::int)
#12 := 1::int
+#33 := -6::int
+#87 := (* -6::int ?x2!0)
+#4 := 4::int
+#88 := (* 4::int ?x1!1)
+#89 := (+ #88 #87)
+#90 := (= #89 1::int)
+#112 := (iff #90 #109)
+#98 := (+ #87 #88)
+#101 := (= #98 1::int)
+#110 := (iff #101 #109)
+#111 := [rewrite]: #110
+#102 := (iff #90 #101)
+#99 := (= #89 #98)
+#100 := [rewrite]: #99
+#103 := [monotonicity #100]: #102
+#113 := [trans #103 #111]: #112
+#53 := (:var 0 int)
+#54 := (* -6::int #53)
#9 := (:var 1 int)
-#7 := 6::int
+#55 := (* 4::int #9)
+#56 := (+ #55 #54)
+#76 := (= #56 1::int)
+#74 := (exists (vars (?x1 int) (?x2 int)) #76)
+#91 := (~ #74 #90)
+#92 := [sk]: #91
#8 := (- 6::int)
#10 := (* #8 #9)
#5 := (:var 2 int)
-#4 := 4::int
#6 := (* 4::int #5)
#11 := (+ #6 #10)
#13 := (= #11 1::int)
#14 := (exists (vars (?x1 int) (?x2 int) (?x3 int)) #13)
#15 := (not #14)
#16 := (not #15)
-#82 := (iff #16 false)
-#53 := (:var 0 int)
-#33 := -6::int
-#54 := (* -6::int #53)
-#55 := (* 4::int #9)
-#56 := (+ #55 #54)
+#79 := (iff #16 #74)
#57 := (= 1::int #56)
#58 := (exists (vars (?x1 int) (?x2 int)) #57)
-#80 := (iff #58 false)
-#76 := (exists (vars (?x1 int) (?x2 int)) false)
-#78 := (iff #76 false)
-#79 := [elim-unused]: #78
-#77 := (iff #58 #76)
-#73 := (iff #57 false)
-#74 := [rewrite]: #73
-#75 := [quant-intro #74]: #77
-#81 := [trans #75 #79]: #80
+#77 := (iff #58 #74)
+#75 := (iff #57 #76)
+#73 := [rewrite]: #75
+#78 := [quant-intro #73]: #77
#71 := (iff #16 #58)
#63 := (not #58)
#66 := (not #63)
@@ -8025,9 +8150,20 @@
#65 := [monotonicity #62]: #64
#68 := [monotonicity #65]: #67
#72 := [trans #68 #70]: #71
-#83 := [trans #72 #81]: #82
+#80 := [trans #72 #78]: #79
#32 := [asserted]: #16
-[mp #32 #83]: false
+#81 := [mp #32 #80]: #74
+#95 := [mp~ #81 #92]: #90
+#96 := [mp #95 #113]: #109
+#170 := (not #109)
+#171 := (or #170 #168)
+#172 := [th-lemma]: #171
+#173 := [unit-resolution #172 #96]: #168
+#169 := (>= #108 -1::int)
+#174 := (or #170 #169)
+#175 := [th-lemma]: #174
+#176 := [unit-resolution #175 #96]: #169
+[th-lemma #176 #173]: false
unsat
dbOre63OdVavsqL3lG2ttw 2516
@@ -8445,12 +8581,12 @@
#46 := (+ #4 #45)
#44 := (>= #46 0::int)
#42 := (not #44)
-#57 := (or #44 #42)
-#61 := (iff #57 true)
+#60 := (or #44 #42)
+#61 := (iff #60 true)
#62 := [rewrite]: #61
-#59 := (iff #32 #57)
+#56 := (iff #32 #60)
#58 := (iff #11 #42)
-#56 := [rewrite]: #58
+#59 := [rewrite]: #58
#54 := (iff #31 #44)
#49 := (not #42)
#52 := (iff #49 #44)
@@ -8460,8 +8596,8 @@
#48 := [rewrite]: #47
#51 := [monotonicity #48]: #50
#55 := [trans #51 #53]: #54
-#60 := [monotonicity #55 #56]: #59
-#64 := [trans #60 #62]: #63
+#57 := [monotonicity #55 #59]: #56
+#64 := [trans #57 #62]: #63
#67 := [quant-intro #64]: #66
#71 := [trans #67 #69]: #70
#74 := [monotonicity #71]: #73
@@ -8764,7 +8900,7 @@
[mp #45 #150]: false
unsat
-iPZ87GNdi7uQw4ehEpbTPQ 6383
+iPZ87GNdi7uQw4ehEpbTPQ 7012
#2 := false
#9 := 0::int
decl uf_2 :: (-> T1 int)
@@ -8779,19 +8915,19 @@
#295 := -1::int
#274 := (* -1::int #293)
#610 := (+ #24 #274)
-#594 := (<= #610 0::int)
+#315 := (<= #610 0::int)
#612 := (= #610 0::int)
-#606 := (>= #23 0::int)
-#237 := (= #293 0::int)
-#549 := (not #237)
-#588 := (<= #293 0::int)
-#457 := (not #588)
+#255 := (>= #23 0::int)
+#317 := (= #293 0::int)
+#522 := (not #317)
+#577 := (<= #293 0::int)
+#519 := (not #577)
#26 := 1::int
-#558 := (>= #293 1::int)
-#555 := (= #293 1::int)
+#553 := (>= #293 1::int)
+#552 := (= #293 1::int)
#27 := (uf_1 1::int)
-#589 := (uf_2 #27)
-#301 := (= #589 1::int)
+#420 := (uf_2 #27)
+#565 := (= #420 1::int)
#10 := (:var 0 int)
#12 := (uf_1 #10)
#626 := (pattern #12)
@@ -8847,57 +8983,57 @@
#87 := [mp #51 #86]: #82
#146 := [mp~ #87 #130]: #82
#632 := [mp #146 #631]: #627
-#609 := (not #627)
-#578 := (or #609 #301)
-#311 := (>= 1::int 0::int)
-#585 := (not #311)
-#586 := (= 1::int #589)
-#590 := (or #586 #585)
-#419 := (or #609 #590)
-#421 := (iff #419 #578)
-#564 := (iff #578 #578)
-#565 := [rewrite]: #564
-#577 := (iff #590 #301)
-#574 := (or #301 false)
-#571 := (iff #574 #301)
-#576 := [rewrite]: #571
-#575 := (iff #590 #574)
-#584 := (iff #585 false)
+#237 := (not #627)
+#442 := (or #237 #565)
+#578 := (>= 1::int 0::int)
+#419 := (not #578)
+#421 := (= 1::int #420)
+#563 := (or #421 #419)
+#443 := (or #237 #563)
+#550 := (iff #443 #442)
+#547 := (iff #442 #442)
+#548 := [rewrite]: #547
+#559 := (iff #563 #565)
+#554 := (or #565 false)
+#558 := (iff #554 #565)
+#556 := [rewrite]: #558
+#555 := (iff #563 #554)
+#400 := (iff #419 false)
#1 := true
-#582 := (not true)
-#583 := (iff #582 false)
-#580 := [rewrite]: #583
-#296 := (iff #585 #582)
-#303 := (iff #311 true)
-#581 := [rewrite]: #303
-#579 := [monotonicity #581]: #296
-#573 := [trans #579 #580]: #584
-#300 := (iff #586 #301)
-#302 := [rewrite]: #300
-#570 := [monotonicity #302 #573]: #575
-#572 := [trans #570 #576]: #577
-#563 := [monotonicity #572]: #421
-#566 := [trans #563 #565]: #421
-#420 := [quant-inst]: #419
-#560 := [mp #420 #566]: #578
-#442 := [unit-resolution #560 #632]: #301
-#443 := (= #293 #589)
+#567 := (not true)
+#569 := (iff #567 false)
+#398 := [rewrite]: #569
+#568 := (iff #419 #567)
+#560 := (iff #578 true)
+#561 := [rewrite]: #560
+#562 := [monotonicity #561]: #568
+#401 := [trans #562 #398]: #400
+#564 := (iff #421 #565)
+#566 := [rewrite]: #564
+#557 := [monotonicity #566 #401]: #555
+#441 := [trans #557 #556]: #559
+#452 := [monotonicity #441]: #550
+#551 := [trans #452 #548]: #550
+#402 := [quant-inst]: #443
+#436 := [mp #402 #551]: #442
+#524 := [unit-resolution #436 #632]: #565
+#526 := (= #293 #420)
#28 := (= #25 #27)
#129 := [asserted]: #28
-#436 := [monotonicity #129]: #443
-#451 := [trans #436 #442]: #555
-#453 := (not #555)
-#454 := (or #453 #558)
-#447 := [th-lemma]: #454
-#455 := [unit-resolution #447 #451]: #558
-#456 := (not #558)
-#458 := (or #456 #457)
-#459 := [th-lemma]: #458
-#552 := [unit-resolution #459 #455]: #457
-#553 := (or #549 #588)
-#540 := [th-lemma]: #553
-#542 := [unit-resolution #540 #552]: #549
-#603 := (or #237 #606)
+#527 := [monotonicity #129]: #526
+#528 := [trans #527 #524]: #552
+#529 := (not #552)
+#525 := (or #529 #553)
+#530 := [th-lemma]: #525
+#516 := [unit-resolution #530 #528]: #553
+#517 := (not #553)
+#520 := (or #517 #519)
+#521 := [th-lemma]: #520
+#518 := [unit-resolution #521 #516]: #519
+#502 := (or #522 #577)
+#503 := [th-lemma]: #502
+#505 := [unit-resolution #503 #518]: #522
+#300 := (or #255 #317)
#18 := (= #13 0::int)
#118 := (or #18 #70)
#633 := (forall (vars (?x3 int)) (:pat #626) #118)
@@ -8954,71 +9090,95 @@
#128 := [mp #88 #127]: #123
#149 := [mp~ #128 #134]: #123
#638 := [mp #149 #637]: #633
-#604 := (not #633)
-#602 := (or #604 #237 #606)
+#582 := (not #633)
+#296 := (or #582 #255 #317)
#204 := (>= #24 0::int)
-#601 := (or #237 #204)
-#605 := (or #604 #601)
-#317 := (iff #605 #602)
-#592 := (or #604 #603)
-#315 := (iff #592 #602)
-#316 := [rewrite]: #315
-#299 := (iff #605 #592)
-#242 := (iff #601 #603)
-#279 := (iff #204 #606)
-#280 := [rewrite]: #279
-#243 := [monotonicity #280]: #242
-#314 := [monotonicity #243]: #299
-#210 := [trans #314 #316]: #317
-#591 := [quant-inst]: #605
-#587 := [mp #591 #210]: #602
-#534 := [unit-resolution #587 #638]: #603
-#531 := [unit-resolution #534 #542]: #606
-#613 := (not #606)
-#607 := (or #613 #612)
-#251 := (or #609 #613 #612)
+#210 := (or #317 #204)
+#579 := (or #582 #210)
+#570 := (iff #579 #296)
+#580 := (or #582 #300)
+#574 := (iff #580 #296)
+#575 := [rewrite]: #574
+#584 := (iff #579 #580)
+#303 := (iff #210 #300)
+#606 := (* 1::int #23)
+#279 := (>= #606 0::int)
+#311 := (or #279 #317)
+#301 := (iff #311 #300)
+#256 := (iff #279 #255)
+#251 := (= #606 #23)
+#593 := [rewrite]: #251
+#257 := [monotonicity #593]: #256
+#302 := [monotonicity #257]: #301
+#586 := (iff #210 #311)
+#587 := (or #317 #279)
+#585 := (iff #587 #311)
+#589 := [rewrite]: #585
+#588 := (iff #210 #587)
+#280 := (iff #204 #279)
+#613 := [rewrite]: #280
+#310 := [monotonicity #613]: #588
+#590 := [trans #310 #589]: #586
+#581 := [trans #590 #302]: #303
+#573 := [monotonicity #581]: #584
+#571 := [trans #573 #575]: #570
+#583 := [quant-inst]: #579
+#576 := [mp #583 #571]: #296
+#506 := [unit-resolution #576 #638]: #300
+#507 := [unit-resolution #506 #505]: #255
+#258 := (not #255)
+#597 := (or #258 #612)
+#601 := (or #237 #258 #612)
#289 := (not #204)
#294 := (= #24 #293)
#291 := (or #294 #289)
-#593 := (or #609 #291)
-#597 := (iff #593 #251)
-#256 := (or #609 #607)
-#595 := (iff #256 #251)
-#596 := [rewrite]: #595
-#257 := (iff #593 #256)
-#608 := (iff #291 #607)
-#616 := (or #612 #613)
-#266 := (iff #616 #607)
-#271 := [rewrite]: #266
-#611 := (iff #291 #616)
-#614 := (iff #289 #613)
-#615 := [monotonicity #280]: #614
+#603 := (or #237 #291)
+#592 := (iff #603 #601)
+#243 := (or #237 #597)
+#605 := (iff #243 #601)
+#591 := [rewrite]: #605
+#604 := (iff #603 #243)
+#594 := (iff #291 #597)
+#614 := (not #279)
+#266 := (or #614 #612)
+#598 := (iff #266 #597)
+#595 := (iff #614 #258)
+#596 := [monotonicity #257]: #595
+#599 := [monotonicity #596]: #598
+#267 := (iff #291 #266)
+#611 := (or #612 #614)
+#271 := (iff #611 #266)
+#608 := [rewrite]: #271
+#617 := (iff #291 #611)
+#615 := (iff #289 #614)
+#616 := [monotonicity #613]: #615
#268 := (iff #294 #612)
#399 := [rewrite]: #268
-#617 := [monotonicity #399 #615]: #611
-#267 := [trans #617 #271]: #608
-#258 := [monotonicity #267]: #257
-#598 := [trans #258 #596]: #597
-#255 := [quant-inst]: #593
-#599 := [mp #255 #598]: #251
-#533 := [unit-resolution #599 #632]: #607
-#543 := [unit-resolution #533 #531]: #612
-#544 := (not #612)
-#545 := (or #544 #594)
-#541 := [th-lemma]: #545
-#546 := [unit-resolution #541 #543]: #594
-#600 := (>= #610 0::int)
-#535 := (or #544 #600)
-#536 := [th-lemma]: #535
-#537 := [unit-resolution #536 #543]: #600
-#557 := (<= #293 1::int)
-#538 := (or #453 #557)
-#532 := [th-lemma]: #538
-#539 := [unit-resolution #532 #451]: #557
-[th-lemma #455 #539 #537 #546]: false
-unsat
-
-kDuOn7kAggfP4Um8ghhv5A 5551
+#607 := [monotonicity #399 #616]: #617
+#609 := [trans #607 #608]: #267
+#600 := [trans #609 #599]: #594
+#602 := [monotonicity #600]: #604
+#299 := [trans #602 #591]: #592
+#242 := [quant-inst]: #603
+#314 := [mp #242 #299]: #601
+#508 := [unit-resolution #314 #632]: #597
+#509 := [unit-resolution #508 #507]: #612
+#510 := (not #612)
+#511 := (or #510 #315)
+#512 := [th-lemma]: #511
+#513 := [unit-resolution #512 #509]: #315
+#316 := (>= #610 0::int)
+#514 := (or #510 #316)
+#504 := [th-lemma]: #514
+#515 := [unit-resolution #504 #509]: #316
+#549 := (<= #293 1::int)
+#493 := (or #529 #549)
+#494 := [th-lemma]: #493
+#496 := [unit-resolution #494 #528]: #549
+[th-lemma #516 #496 #515 #513]: false
+unsat
+
+kDuOn7kAggfP4Um8ghhv5A 6068
#2 := false
#23 := 3::int
decl uf_2 :: (-> T1 int)
@@ -9041,13 +9201,13 @@
#632 := -1::int
#634 := (* -1::int #28)
#290 := (+ #26 #634)
-#623 := (>= #290 0::int)
+#609 := (>= #290 0::int)
#421 := (= #290 0::int)
-#302 := (>= #22 0::int)
-#625 := (= #28 0::int)
-#318 := (not #625)
-#322 := (<= #28 0::int)
-#324 := (not #322)
+#273 := (>= #22 0::int)
+#610 := (= #28 0::int)
+#588 := (not #610)
+#441 := (<= #28 0::int)
+#443 := (not #441)
#29 := 7::int
#143 := (>= #28 7::int)
#30 := (< #28 7::int)
@@ -9064,12 +9224,12 @@
#151 := [trans #147 #149]: #150
#133 := [asserted]: #31
#152 := [mp #133 #151]: #143
-#325 := (or #324 #141)
-#603 := [th-lemma]: #325
-#604 := [unit-resolution #603 #152]: #324
-#601 := (or #318 #322)
-#605 := [th-lemma]: #601
-#602 := [unit-resolution #605 #604]: #318
+#585 := (or #443 #141)
+#586 := [th-lemma]: #585
+#587 := [unit-resolution #586 #152]: #443
+#582 := (or #588 #441)
+#583 := [th-lemma]: #582
+#589 := [unit-resolution #583 #587]: #588
#10 := (:var 0 int)
#12 := (uf_1 #10)
#648 := (pattern #12)
@@ -9132,33 +9292,45 @@
#131 := [mp #91 #130]: #126
#172 := [mp~ #131 #155]: #126
#660 := [mp #172 #659]: #655
-#337 := (not #655)
-#338 := (or #337 #302 #625)
+#605 := (not #655)
+#602 := (or #605 #273 #610)
#315 := (>= #26 0::int)
-#264 := (or #625 #315)
-#339 := (or #337 #264)
-#611 := (iff #339 #338)
-#627 := (or #302 #625)
-#609 := (or #337 #627)
-#333 := (iff #609 #338)
-#607 := [rewrite]: #333
-#610 := (iff #339 #609)
-#321 := (iff #264 #627)
-#265 := (or #625 #302)
-#613 := (iff #265 #627)
-#614 := [rewrite]: #613
-#626 := (iff #264 #265)
-#635 := (iff #315 #302)
-#636 := [rewrite]: #635
-#624 := [monotonicity #636]: #626
-#336 := [trans #624 #614]: #321
-#332 := [monotonicity #336]: #610
-#608 := [trans #332 #607]: #611
-#231 := [quant-inst]: #339
-#612 := [mp #231 #608]: #338
-#606 := [unit-resolution #612 #660 #602]: #302
-#637 := (not #302)
-#293 := (or #637 #421)
+#332 := (or #610 #315)
+#606 := (or #605 #332)
+#599 := (iff #606 #602)
+#323 := (or #273 #610)
+#596 := (or #605 #323)
+#593 := (iff #596 #602)
+#598 := [rewrite]: #593
+#597 := (iff #606 #596)
+#318 := (iff #332 #323)
+#302 := 1::int
+#635 := (* 1::int #22)
+#636 := (>= #635 0::int)
+#333 := (or #610 #636)
+#603 := (iff #333 #323)
+#608 := (or #610 #273)
+#324 := (iff #608 #323)
+#325 := [rewrite]: #324
+#612 := (iff #333 #608)
+#615 := (iff #636 #273)
+#289 := (= #635 #22)
+#631 := [rewrite]: #289
+#277 := [monotonicity #631]: #615
+#322 := [monotonicity #277]: #612
+#604 := [trans #322 #325]: #603
+#607 := (iff #332 #333)
+#637 := (iff #315 #636)
+#638 := [rewrite]: #637
+#611 := [monotonicity #638]: #607
+#601 := [trans #611 #604]: #318
+#592 := [monotonicity #601]: #597
+#594 := [trans #592 #598]: #599
+#595 := [quant-inst]: #606
+#600 := [mp #595 #594]: #602
+#590 := [unit-resolution #600 #660 #589]: #273
+#278 := (not #273)
+#620 := (or #278 #421)
#55 := (= #10 #13)
#80 := (or #55 #74)
#649 := (forall (vars (?x2 int)) (:pat #648) #80)
@@ -9208,39 +9380,47 @@
#90 := [mp #54 #89]: #85
#169 := [mp~ #90 #134]: #85
#654 := [mp #169 #653]: #649
-#615 := (not #649)
-#277 := (or #615 #637 #421)
+#264 := (not #649)
+#265 := (or #264 #278 #421)
#243 := (not #315)
#317 := (= #26 #28)
#296 := (or #317 #243)
-#278 := (or #615 #296)
-#621 := (iff #278 #277)
-#280 := (or #615 #293)
-#619 := (iff #280 #277)
-#620 := [rewrite]: #619
-#617 := (iff #278 #280)
-#631 := (iff #296 #293)
-#639 := (or #421 #637)
-#630 := (iff #639 #293)
-#289 := [rewrite]: #630
-#629 := (iff #296 #639)
-#638 := (iff #243 #637)
-#633 := [monotonicity #636]: #638
+#626 := (or #264 #296)
+#337 := (iff #626 #265)
+#627 := (or #264 #620)
+#321 := (iff #627 #265)
+#336 := [rewrite]: #321
+#613 := (iff #626 #627)
+#623 := (iff #296 #620)
+#633 := (not #636)
+#288 := (or #421 #633)
+#622 := (iff #288 #620)
+#617 := (or #421 #278)
+#621 := (iff #617 #620)
+#616 := [rewrite]: #621
+#618 := (iff #288 #617)
+#279 := (iff #633 #278)
+#280 := [monotonicity #277]: #279
+#619 := [monotonicity #280]: #618
+#259 := [trans #619 #616]: #622
+#293 := (iff #296 #288)
+#639 := (iff #243 #633)
+#629 := [monotonicity #638]: #639
#628 := (iff #317 #421)
#301 := [rewrite]: #628
-#288 := [monotonicity #301 #633]: #629
-#273 := [trans #288 #289]: #631
-#618 := [monotonicity #273]: #617
-#616 := [trans #618 #620]: #621
-#279 := [quant-inst]: #278
-#622 := [mp #279 #616]: #277
-#595 := [unit-resolution #622 #654]: #293
-#596 := [unit-resolution #595 #606]: #421
-#597 := (not #421)
-#592 := (or #597 #623)
-#593 := [th-lemma]: #592
-#598 := [unit-resolution #593 #596]: #623
-[th-lemma #152 #598 #139]: false
+#630 := [monotonicity #301 #629]: #293
+#625 := [trans #630 #259]: #623
+#614 := [monotonicity #625]: #613
+#338 := [trans #614 #336]: #337
+#624 := [quant-inst]: #626
+#339 := [mp #624 #338]: #265
+#584 := [unit-resolution #339 #654]: #620
+#591 := [unit-resolution #584 #590]: #421
+#420 := (not #421)
+#422 := (or #420 #609)
+#423 := [th-lemma]: #422
+#576 := [unit-resolution #423 #591]: #609
+[th-lemma #152 #576 #139]: false
unsat
aiB004AWADNjynNrqCDsxw 9284
@@ -9916,7 +10096,7 @@
[th-lemma #623 #188 #601 #628]: false
unsat
-ZcLxnpFewGGQ0H47MfRXGw 11816
+ZcLxnpFewGGQ0H47MfRXGw 12389
#2 := false
#9 := 0::int
decl uf_2 :: (-> T1 int)
@@ -9930,8 +10110,8 @@
#297 := (uf_2 #141)
#357 := (= #297 0::int)
#166 := (uf_1 0::int)
-#531 := (uf_2 #166)
-#537 := (= #531 0::int)
+#454 := (uf_2 #166)
+#515 := (= #454 0::int)
#10 := (:var 0 int)
#12 := (uf_1 #10)
#672 := (pattern #12)
@@ -9988,40 +10168,40 @@
#192 := [mp~ #95 #175]: #90
#678 := [mp #192 #677]: #673
#650 := (not #673)
-#528 := (or #650 #537)
-#529 := (>= 0::int 0::int)
-#530 := (not #529)
-#534 := (= 0::int #531)
-#535 := (or #534 #530)
-#508 := (or #650 #535)
-#509 := (iff #508 #528)
-#514 := (iff #528 #528)
-#515 := [rewrite]: #514
-#527 := (iff #535 #537)
-#520 := (or #537 false)
-#525 := (iff #520 #537)
-#526 := [rewrite]: #525
-#521 := (iff #535 #520)
-#519 := (iff #530 false)
+#468 := (or #650 #515)
+#528 := (>= 0::int 0::int)
+#508 := (not #528)
+#509 := (= 0::int #454)
+#490 := (or #509 #508)
+#469 := (or #650 #490)
+#471 := (iff #469 #468)
+#473 := (iff #468 #468)
+#474 := [rewrite]: #473
+#462 := (iff #490 #515)
+#495 := (or #515 false)
+#486 := (iff #495 #515)
+#507 := [rewrite]: #486
+#496 := (iff #490 #495)
+#492 := (iff #508 false)
#1 := true
-#512 := (not true)
-#517 := (iff #512 false)
-#518 := [rewrite]: #517
-#513 := (iff #530 #512)
-#538 := (iff #529 true)
-#511 := [rewrite]: #538
-#406 := [monotonicity #511]: #513
-#524 := [trans #406 #518]: #519
-#536 := (iff #534 #537)
-#532 := [rewrite]: #536
-#522 := [monotonicity #532 #524]: #521
-#523 := [trans #522 #526]: #527
-#490 := [monotonicity #523]: #509
-#510 := [trans #490 #515]: #509
-#454 := [quant-inst]: #508
-#516 := [mp #454 #510]: #528
-#394 := [unit-resolution #516 #678]: #537
-#355 := (= #297 #531)
+#491 := (not true)
+#483 := (iff #491 false)
+#485 := [rewrite]: #483
+#450 := (iff #508 #491)
+#516 := (iff #528 true)
+#484 := [rewrite]: #516
+#481 := [monotonicity #484]: #450
+#494 := [trans #481 #485]: #492
+#514 := (iff #509 #515)
+#510 := [rewrite]: #514
+#506 := [monotonicity #510 #494]: #496
+#463 := [trans #506 #507]: #462
+#472 := [monotonicity #463]: #471
+#475 := [trans #472 #474]: #471
+#470 := [quant-inst]: #469
+#476 := [mp #470 #475]: #468
+#351 := [unit-resolution #476 #678]: #515
+#287 := (= #297 #454)
#250 := (= #141 #166)
#26 := 2::int
#144 := (* 2::int #22)
@@ -10032,29 +10212,24 @@
#161 := (uf_1 #156)
#336 := (= #161 #166)
#327 := (not #336)
-#588 := (uf_2 #161)
-#555 := (= #588 0::int)
-#398 := (= #588 #531)
-#395 := [hypothesis]: #336
-#387 := [monotonicity #395]: #398
-#399 := [trans #387 #394]: #555
-#390 := (not #555)
-#547 := (<= #588 0::int)
-#403 := (not #547)
-#595 := (>= #150 0::int)
-#302 := -1::int
-#618 := (* -1::int #150)
-#624 := (+ #144 #618)
-#488 := (<= #624 0::int)
-#465 := (= #624 0::int)
-#609 := (>= #22 0::int)
-#442 := (= #22 0::int)
+#568 := (uf_2 #161)
+#537 := (= #568 0::int)
+#354 := (= #568 #454)
+#352 := [hypothesis]: #336
+#344 := [monotonicity #352]: #354
+#355 := [trans #344 #351]: #537
+#368 := (not #537)
+#527 := (<= #568 0::int)
+#375 := (not #527)
+#566 := (>= #150 0::int)
+#447 := (>= #22 0::int)
+#421 := (= #22 0::int)
#660 := (uf_1 #22)
-#495 := (uf_2 #660)
-#496 := (= #495 0::int)
-#612 := (not #609)
-#451 := [hypothesis]: #612
-#506 := (or #496 #609)
+#451 := (uf_2 #660)
+#452 := (= #451 0::int)
+#603 := (not #447)
+#424 := [hypothesis]: #603
+#455 := (or #447 #452)
#18 := (= #13 0::int)
#126 := (or #18 #78)
#679 := (forall (vars (?x3 int)) (:pat #672) #126)
@@ -10112,15 +10287,23 @@
#195 := [mp~ #136 #180]: #131
#684 := [mp #195 #683]: #679
#346 := (not #679)
-#462 := (or #346 #496 #609)
-#463 := (or #346 #506)
-#469 := (iff #463 #462)
-#470 := [rewrite]: #469
-#468 := [quant-inst]: #463
-#471 := [mp #468 #470]: #462
-#452 := [unit-resolution #471 #684]: #506
-#453 := [unit-resolution #452 #451]: #496
-#456 := (= #22 #495)
+#458 := (or #346 #447 #452)
+#453 := (or #452 #447)
+#459 := (or #346 #453)
+#434 := (iff #459 #458)
+#443 := (or #346 #455)
+#432 := (iff #443 #458)
+#433 := [rewrite]: #432
+#461 := (iff #459 #443)
+#456 := (iff #453 #455)
+#457 := [rewrite]: #456
+#431 := [monotonicity #457]: #461
+#436 := [trans #431 #433]: #434
+#460 := [quant-inst]: #459
+#437 := [mp #460 #436]: #458
+#420 := [unit-resolution #437 #684]: #455
+#425 := [unit-resolution #420 #424]: #452
+#405 := (= #22 #451)
#661 := (= uf_3 #660)
#4 := (:var 0 T1)
#5 := (uf_2 #4)
@@ -10151,117 +10334,136 @@
#663 := (not #665)
#653 := (or #663 #661)
#312 := [quant-inst]: #653
-#455 := [unit-resolution #312 #671]: #661
-#457 := [monotonicity #455]: #456
-#458 := [trans #457 #453]: #442
-#459 := (not #442)
-#460 := (or #459 #609)
-#443 := [th-lemma]: #460
-#461 := [unit-resolution #443 #451 #458]: false
-#431 := [lemma #461]: #609
-#613 := (or #465 #612)
-#615 := (or #650 #465 #612)
+#415 := [unit-resolution #312 #671]: #661
+#407 := [monotonicity #415]: #405
+#408 := [trans #407 #425]: #421
+#411 := (not #421)
+#412 := (or #411 #447)
+#416 := [th-lemma]: #412
+#409 := [unit-resolution #416 #424 #408]: false
+#417 := [lemma #409]: #447
+#302 := -1::int
+#618 := (* -1::int #150)
+#624 := (+ #144 #618)
+#595 := (<= #624 0::int)
+#465 := (= #624 0::int)
+#489 := (or #603 #465)
+#482 := (or #650 #603 #465)
#616 := (>= #144 0::int)
#617 := (not #616)
#622 := (= #144 #150)
#623 := (or #622 #617)
-#444 := (or #650 #623)
-#602 := (iff #444 #615)
-#447 := (or #650 #613)
-#603 := (iff #447 #615)
-#604 := [rewrite]: #603
-#600 := (iff #444 #447)
-#614 := (iff #623 #613)
-#606 := (iff #617 #612)
-#610 := (iff #616 #609)
-#611 := [rewrite]: #610
-#607 := [monotonicity #611]: #606
+#497 := (or #650 #623)
+#504 := (iff #497 #482)
+#500 := (or #650 #489)
+#502 := (iff #500 #482)
+#503 := [rewrite]: #502
+#493 := (iff #497 #500)
+#594 := (iff #623 #489)
+#609 := (* 1::int #22)
+#610 := (>= #609 0::int)
+#606 := (not #610)
+#614 := (or #465 #606)
+#498 := (iff #614 #489)
+#605 := (or #465 #603)
+#448 := (iff #605 #489)
+#596 := [rewrite]: #448
+#487 := (iff #614 #605)
+#604 := (iff #606 #603)
+#600 := (iff #610 #447)
+#444 := (= #609 #22)
+#446 := [rewrite]: #444
+#601 := [monotonicity #446]: #600
+#602 := [monotonicity #601]: #604
+#488 := [monotonicity #602]: #487
+#593 := [trans #488 #596]: #498
+#608 := (iff #623 #614)
+#607 := (iff #617 #606)
+#611 := (iff #616 #610)
+#612 := [rewrite]: #611
+#613 := [monotonicity #612]: #607
#466 := (iff #622 #465)
#467 := [rewrite]: #466
-#608 := [monotonicity #467 #607]: #614
-#601 := [monotonicity #608]: #600
-#605 := [trans #601 #604]: #602
-#446 := [quant-inst]: #444
-#487 := [mp #446 #605]: #615
-#439 := [unit-resolution #487 #678]: #613
-#435 := [unit-resolution #439 #431]: #465
-#440 := (not #465)
-#419 := (or #440 #488)
-#422 := [th-lemma]: #419
-#426 := [unit-resolution #422 #435]: #488
-#430 := (not #488)
-#433 := (or #595 #612 #430)
-#438 := [th-lemma]: #433
-#402 := [unit-resolution #438 #431 #426]: #595
-#590 := -3::int
-#579 := (* -1::int #588)
-#589 := (+ #150 #579)
-#553 := (<= #589 -3::int)
-#591 := (= #589 -3::int)
-#581 := (>= #150 -3::int)
+#615 := [monotonicity #467 #613]: #608
+#597 := [trans #615 #593]: #594
+#501 := [monotonicity #597]: #493
+#505 := [trans #501 #503]: #504
+#499 := [quant-inst]: #497
+#598 := [mp #499 #505]: #482
+#404 := [unit-resolution #598 #678]: #489
+#386 := [unit-resolution #404 #417]: #465
+#388 := (not #465)
+#389 := (or #388 #595)
+#390 := [th-lemma]: #389
+#391 := [unit-resolution #390 #386]: #595
+#395 := (not #595)
+#413 := (or #566 #603 #395)
+#403 := [th-lemma]: #413
+#373 := [unit-resolution #403 #391 #417]: #566
+#553 := -3::int
+#551 := (* -1::int #568)
+#552 := (+ #150 #551)
+#535 := (<= #552 -3::int)
+#554 := (= #552 -3::int)
+#557 := (>= #150 -3::int)
#644 := (>= #22 -1::int)
-#428 := (or #612 #644)
-#429 := [th-lemma]: #428
-#427 := [unit-resolution #429 #431]: #644
+#392 := (or #603 #644)
+#393 := [th-lemma]: #392
+#394 := [unit-resolution #393 #417]: #644
#646 := (not #644)
-#418 := (or #581 #646 #430)
-#421 := [th-lemma]: #418
-#423 := [unit-resolution #421 #426 #427]: #581
-#584 := (not #581)
-#573 := (or #584 #591)
-#562 := (or #650 #584 #591)
-#599 := (>= #156 0::int)
-#586 := (not #599)
-#580 := (= #156 #588)
-#577 := (or #580 #586)
-#563 := (or #650 #577)
-#549 := (iff #563 #562)
-#566 := (or #650 #573)
-#568 := (iff #566 #562)
-#548 := [rewrite]: #568
-#567 := (iff #563 #566)
-#571 := (iff #577 #573)
-#569 := (or #591 #584)
-#574 := (iff #569 #573)
-#575 := [rewrite]: #574
-#570 := (iff #577 #569)
-#578 := (iff #586 #584)
-#582 := (iff #599 #581)
-#583 := [rewrite]: #582
-#585 := [monotonicity #583]: #578
-#587 := (iff #580 #591)
-#592 := [rewrite]: #587
-#572 := [monotonicity #592 #585]: #570
-#576 := [trans #572 #575]: #571
-#564 := [monotonicity #576]: #567
-#551 := [trans #564 #548]: #549
-#565 := [quant-inst]: #563
-#552 := [mp #565 #551]: #562
-#424 := [unit-resolution #552 #678]: #573
-#420 := [unit-resolution #424 #423]: #591
-#425 := (not #591)
-#415 := (or #425 #553)
-#405 := [th-lemma]: #415
-#407 := [unit-resolution #405 #420]: #553
-#404 := (not #553)
-#401 := (not #595)
-#386 := (or #403 #401 #404)
-#388 := [th-lemma]: #386
-#389 := [unit-resolution #388 #407 #402]: #403
-#391 := (or #390 #547)
-#392 := [th-lemma]: #391
-#393 := [unit-resolution #392 #389]: #390
-#376 := [unit-resolution #393 #399]: false
-#378 := [lemma #376]: #327
+#396 := (or #557 #646 #395)
+#397 := [th-lemma]: #396
+#398 := [unit-resolution #397 #391 #394]: #557
+#560 := (not #557)
+#539 := (or #554 #560)
+#543 := (or #650 #554 #560)
+#567 := (>= #156 0::int)
+#564 := (not #567)
+#548 := (= #156 #568)
+#549 := (or #548 #564)
+#544 := (or #650 #549)
+#530 := (iff #544 #543)
+#546 := (or #650 #539)
+#533 := (iff #546 #543)
+#529 := [rewrite]: #533
+#541 := (iff #544 #546)
+#540 := (iff #549 #539)
+#550 := (iff #564 #560)
+#558 := (iff #567 #557)
+#559 := [rewrite]: #558
+#561 := [monotonicity #559]: #550
+#555 := (iff #548 #554)
+#556 := [rewrite]: #555
+#542 := [monotonicity #556 #561]: #540
+#547 := [monotonicity #542]: #541
+#531 := [trans #547 #529]: #530
+#545 := [quant-inst]: #544
+#534 := [mp #545 #531]: #543
+#387 := [unit-resolution #534 #678]: #539
+#399 := [unit-resolution #387 #398]: #554
+#376 := (not #554)
+#378 := (or #376 #535)
+#379 := [th-lemma]: #378
+#380 := [unit-resolution #379 #399]: #535
+#365 := (not #535)
+#364 := (not #566)
+#366 := (or #375 #364 #365)
+#358 := [th-lemma]: #366
+#367 := [unit-resolution #358 #380 #373]: #375
+#359 := (or #368 #527)
+#369 := [th-lemma]: #359
+#350 := [unit-resolution #369 #367]: #368
+#321 := [unit-resolution #350 #355]: false
+#323 := [lemma #321]: #327
#249 := (= #141 #161)
#334 := (not #249)
-#396 := (= #297 #588)
-#385 := [hypothesis]: #249
-#370 := [monotonicity #385]: #396
-#380 := (not #396)
-#434 := (+ #297 #579)
-#280 := (>= #434 0::int)
-#414 := (not #280)
+#343 := (= #297 #568)
+#322 := [hypothesis]: #249
+#333 := [monotonicity #322]: #343
+#315 := (not #343)
+#414 := (+ #297 #551)
+#401 := (>= #414 0::int)
+#372 := (not #401)
#303 := (* -1::int #297)
#304 := (+ #22 #303)
#356 := (>= #304 -1::int)
@@ -10290,21 +10492,21 @@
#256 := [trans #360 #362]: #363
#637 := [quant-inst]: #651
#633 := [mp #637 #256]: #648
-#408 := [unit-resolution #633 #678]: #649
-#411 := [unit-resolution #408 #427]: #641
-#412 := (not #641)
-#416 := (or #412 #356)
-#409 := [th-lemma]: #416
-#417 := [unit-resolution #409 #411]: #356
-#410 := [hypothesis]: #280
-#413 := [th-lemma #423 #410 #417 #407 #426]: false
-#400 := [lemma #413]: #414
-#381 := (or #380 #280)
-#382 := [th-lemma]: #381
-#377 := [unit-resolution #382 #400]: #380
-#371 := [unit-resolution #377 #370]: false
-#372 := [lemma #371]: #334
-#352 := (or #249 #250 #336)
+#381 := [unit-resolution #633 #678]: #649
+#382 := [unit-resolution #381 #394]: #641
+#383 := (not #641)
+#384 := (or #383 #356)
+#377 := [th-lemma]: #384
+#385 := [unit-resolution #377 #382]: #356
+#370 := [hypothesis]: #401
+#371 := [th-lemma #398 #370 #385 #380 #391]: false
+#374 := [lemma #371]: #372
+#328 := (or #315 #401)
+#329 := [th-lemma]: #328
+#332 := [unit-resolution #329 #374]: #315
+#316 := [unit-resolution #332 #333]: false
+#318 := [lemma #316]: #334
+#295 := (or #249 #250 #336)
#335 := (not #250)
#338 := (and #334 #335 #327)
#339 := (not #338)
@@ -10352,28 +10554,28 @@
#177 := [mp #137 #174]: #172
#326 := (or #169 #339)
#659 := [def-axiom]: #326
-#351 := [unit-resolution #659 #177]: #339
+#294 := [unit-resolution #659 #177]: #339
#314 := (or #338 #249 #250 #336)
#445 := [def-axiom]: #314
-#343 := [unit-resolution #445 #351]: #352
-#353 := [unit-resolution #343 #372 #378]: #250
-#321 := [monotonicity #353]: #355
-#323 := [trans #321 #394]: #357
-#368 := (not #357)
+#293 := [unit-resolution #445 #294]: #295
+#296 := [unit-resolution #293 #318 #323]: #250
+#290 := [monotonicity #296]: #287
+#285 := [trans #290 #351]: #357
+#310 := (not #357)
#620 := (<= #297 0::int)
-#364 := (not #620)
+#305 := (not #620)
#634 := (<= #304 -1::int)
-#374 := (or #412 #634)
-#373 := [th-lemma]: #374
-#375 := [unit-resolution #373 #411]: #634
-#365 := (not #634)
-#366 := (or #364 #612 #365)
-#358 := [th-lemma]: #366
-#367 := [unit-resolution #358 #375 #431]: #364
-#359 := (or #368 #620)
-#369 := [th-lemma]: #359
-#350 := [unit-resolution #369 #367]: #368
-[unit-resolution #350 #323]: false
+#319 := (or #383 #634)
+#298 := [th-lemma]: #319
+#300 := [unit-resolution #298 #382]: #634
+#306 := (not #634)
+#307 := (or #305 #603 #306)
+#308 := [th-lemma]: #307
+#309 := [unit-resolution #308 #300 #417]: #305
+#299 := (or #310 #620)
+#311 := [th-lemma]: #299
+#292 := [unit-resolution #311 #309]: #310
+[unit-resolution #292 #285]: false
unsat
ipe8HUL/t33OoeNl/z0smQ 4011
@@ -10539,7 +10741,7 @@
[th-lemma #656 #361 #261]: false
unsat
-eRjXXrQSzpzyc8Ro409d3Q 14366
+9FrZeHP8ZKJM+JmhfAjimQ 14889
#2 := false
#9 := 0::int
decl uf_2 :: (-> T1 int)
@@ -10551,46 +10753,46 @@
#38 := (* 4::int #37)
#39 := (uf_1 #38)
#40 := (uf_2 #39)
-#527 := (= #40 0::int)
-#976 := (not #527)
-#502 := (<= #40 0::int)
-#971 := (not #502)
+#504 := (= #40 0::int)
+#995 := (not #504)
+#475 := (<= #40 0::int)
+#990 := (not #475)
#22 := 1::int
#186 := (+ 1::int #40)
#189 := (uf_1 #186)
-#506 := (uf_2 #189)
-#407 := (<= #506 1::int)
-#876 := (not #407)
+#467 := (uf_2 #189)
+#380 := (<= #467 1::int)
+#893 := (not #380)
decl up_4 :: (-> T1 T1 bool)
#4 := (:var 0 T1)
-#408 := (up_4 #4 #189)
-#393 := (pattern #408)
-#413 := (= #4 #189)
-#414 := (not #408)
+#386 := (up_4 #4 #189)
+#374 := (pattern #386)
+#382 := (not #386)
+#385 := (= #4 #189)
#26 := (uf_1 1::int)
#27 := (= #4 #26)
-#392 := (or #27 #414 #413)
-#397 := (forall (vars (?x5 T1)) (:pat #393) #392)
-#383 := (not #397)
-#382 := (or #383 #407)
-#375 := (not #382)
+#845 := (or #27 #385 #382)
+#848 := (forall (vars (?x5 T1)) (:pat #374) #845)
+#851 := (not #848)
+#854 := (or #380 #851)
+#857 := (not #854)
decl up_3 :: (-> T1 bool)
#192 := (up_3 #189)
-#404 := (not #192)
-#841 := (or #404 #375)
+#840 := (not #192)
+#860 := (or #840 #857)
decl ?x5!0 :: (-> T1 T1)
-#422 := (?x5!0 #189)
-#434 := (= #189 #422)
-#417 := (up_4 #422 #189)
-#418 := (not #417)
-#415 := (= #26 #422)
-#847 := (or #415 #418 #434)
-#850 := (not #847)
-#853 := (or #192 #407 #850)
-#856 := (not #853)
-#844 := (not #841)
-#859 := (or #844 #856)
-#862 := (not #859)
+#392 := (?x5!0 #189)
+#397 := (up_4 #392 #189)
+#390 := (not #397)
+#396 := (= #26 #392)
+#395 := (= #189 #392)
+#866 := (or #395 #396 #390)
+#869 := (not #866)
+#872 := (or #192 #380 #869)
+#875 := (not #872)
+#863 := (not #860)
+#878 := (or #863 #875)
+#881 := (not #878)
#5 := (uf_2 #4)
#787 := (pattern #5)
#21 := (up_3 #4)
@@ -10769,64 +10971,59 @@
#314 := [mp #267 #313]: #311
#839 := [mp #314 #838]: #836
#589 := (not #836)
-#865 := (or #589 #862)
-#416 := (or #418 #415 #434)
-#419 := (not #416)
-#409 := (or #192 #407 #419)
-#410 := (not #409)
-#389 := (or #414 #27 #413)
-#394 := (forall (vars (?x5 T1)) (:pat #393) #389)
-#399 := (not #394)
-#401 := (or #407 #399)
-#402 := (not #401)
-#400 := (or #404 #402)
-#405 := (not #400)
-#388 := (or #405 #410)
-#391 := (not #388)
-#866 := (or #589 #391)
-#868 := (iff #866 #865)
-#870 := (iff #865 #865)
-#871 := [rewrite]: #870
-#863 := (iff #391 #862)
-#860 := (iff #388 #859)
-#857 := (iff #410 #856)
-#854 := (iff #409 #853)
-#851 := (iff #419 #850)
-#848 := (iff #416 #847)
-#849 := [rewrite]: #848
-#852 := [monotonicity #849]: #851
-#855 := [monotonicity #852]: #854
-#858 := [monotonicity #855]: #857
-#845 := (iff #405 #844)
-#842 := (iff #400 #841)
-#378 := (iff #402 #375)
-#376 := (iff #401 #382)
-#384 := (or #407 #383)
-#387 := (iff #384 #382)
-#374 := [rewrite]: #387
-#385 := (iff #401 #384)
-#380 := (iff #399 #383)
-#390 := (iff #394 #397)
-#395 := (iff #389 #392)
-#396 := [rewrite]: #395
-#398 := [quant-intro #396]: #390
-#381 := [monotonicity #398]: #380
-#386 := [monotonicity #381]: #385
-#377 := [trans #386 #374]: #376
-#840 := [monotonicity #377]: #378
-#843 := [monotonicity #840]: #842
-#846 := [monotonicity #843]: #845
-#861 := [monotonicity #846 #858]: #860
-#864 := [monotonicity #861]: #863
-#869 := [monotonicity #864]: #868
-#872 := [trans #869 #871]: #868
-#867 := [quant-inst]: #866
-#873 := [mp #867 #872]: #865
-#947 := [unit-resolution #873 #839]: #862
-#905 := (or #859 #841)
-#906 := [def-axiom]: #905
-#948 := [unit-resolution #906 #947]: #841
-#951 := (or #844 #375)
+#884 := (or #589 #881)
+#398 := (or #390 #396 #395)
+#383 := (not #398)
+#381 := (or #192 #380 #383)
+#384 := (not #381)
+#387 := (or #382 #27 #385)
+#376 := (forall (vars (?x5 T1)) (:pat #374) #387)
+#377 := (not #376)
+#375 := (or #380 #377)
+#378 := (not #375)
+#841 := (or #840 #378)
+#842 := (not #841)
+#843 := (or #842 #384)
+#844 := (not #843)
+#885 := (or #589 #844)
+#887 := (iff #885 #884)
+#889 := (iff #884 #884)
+#890 := [rewrite]: #889
+#882 := (iff #844 #881)
+#879 := (iff #843 #878)
+#876 := (iff #384 #875)
+#873 := (iff #381 #872)
+#870 := (iff #383 #869)
+#867 := (iff #398 #866)
+#868 := [rewrite]: #867
+#871 := [monotonicity #868]: #870
+#874 := [monotonicity #871]: #873
+#877 := [monotonicity #874]: #876
+#864 := (iff #842 #863)
+#861 := (iff #841 #860)
+#858 := (iff #378 #857)
+#855 := (iff #375 #854)
+#852 := (iff #377 #851)
+#849 := (iff #376 #848)
+#846 := (iff #387 #845)
+#847 := [rewrite]: #846
+#850 := [quant-intro #847]: #849
+#853 := [monotonicity #850]: #852
+#856 := [monotonicity #853]: #855
+#859 := [monotonicity #856]: #858
+#862 := [monotonicity #859]: #861
+#865 := [monotonicity #862]: #864
+#880 := [monotonicity #865 #877]: #879
+#883 := [monotonicity #880]: #882
+#888 := [monotonicity #883]: #887
+#891 := [trans #888 #890]: #887
+#886 := [quant-inst]: #885
+#892 := [mp #886 #891]: #884
+#966 := [unit-resolution #892 #839]: #881
+#924 := (or #878 #860)
+#925 := [def-axiom]: #924
+#967 := [unit-resolution #925 #966]: #860
+#970 := (or #863 #857)
#41 := (+ #40 1::int)
#42 := (uf_1 #41)
#43 := (up_3 #42)
@@ -10838,30 +11035,30 @@
#194 := [monotonicity #191]: #193
#185 := [asserted]: #43
#197 := [mp #185 #194]: #192
-#885 := (or #844 #404 #375)
-#886 := [def-axiom]: #885
-#952 := [unit-resolution #886 #197]: #951
-#953 := [unit-resolution #952 #948]: #375
-#877 := (or #382 #876)
-#878 := [def-axiom]: #877
-#954 := [unit-resolution #878 #953]: #876
+#904 := (or #863 #840 #857)
+#905 := [def-axiom]: #904
+#971 := [unit-resolution #905 #197]: #970
+#972 := [unit-resolution #971 #967]: #857
+#894 := (or #854 #893)
+#895 := [def-axiom]: #894
+#973 := [unit-resolution #895 #972]: #893
#542 := -1::int
-#508 := (* -1::int #506)
-#493 := (+ #40 #508)
-#438 := (>= #493 -1::int)
-#494 := (= #493 -1::int)
-#496 := (>= #40 -1::int)
-#451 := (= #506 0::int)
-#959 := (not #451)
-#432 := (<= #506 0::int)
-#955 := (not #432)
-#956 := (or #955 #407)
-#957 := [th-lemma]: #956
-#958 := [unit-resolution #957 #954]: #955
-#960 := (or #959 #432)
-#961 := [th-lemma]: #960
-#962 := [unit-resolution #961 #958]: #959
-#453 := (or #451 #496)
+#446 := (* -1::int #467)
+#447 := (+ #40 #446)
+#416 := (>= #447 -1::int)
+#438 := (= #447 -1::int)
+#453 := (>= #40 -1::int)
+#419 := (= #467 0::int)
+#978 := (not #419)
+#388 := (<= #467 0::int)
+#974 := (not #388)
+#975 := (or #974 #380)
+#976 := [th-lemma]: #975
+#977 := [unit-resolution #976 #973]: #974
+#979 := (or #978 #388)
+#980 := [th-lemma]: #979
+#981 := [unit-resolution #980 #977]: #978
+#409 := (or #419 #453)
#10 := (:var 0 int)
#12 := (uf_1 #10)
#795 := (pattern #12)
@@ -10924,28 +11121,28 @@
#145 := [mp #105 #144]: #140
#227 := [mp~ #145 #208]: #140
#807 := [mp #227 #806]: #802
-#514 := (not #802)
-#445 := (or #514 #451 #496)
-#504 := (>= #186 0::int)
-#452 := (or #451 #504)
-#456 := (or #514 #452)
-#429 := (iff #456 #445)
-#441 := (or #514 #453)
-#423 := (iff #441 #445)
-#428 := [rewrite]: #423
-#442 := (iff #456 #441)
-#454 := (iff #452 #453)
-#498 := (iff #504 #496)
-#487 := [rewrite]: #498
-#455 := [monotonicity #487]: #454
-#421 := [monotonicity #455]: #442
-#430 := [trans #421 #428]: #429
-#439 := [quant-inst]: #456
-#431 := [mp #439 #430]: #445
-#963 := [unit-resolution #431 #807]: #453
-#964 := [unit-resolution #963 #962]: #496
-#488 := (not #496)
-#490 := (or #494 #488)
+#496 := (not #802)
+#408 := (or #496 #419 #453)
+#476 := (>= #186 0::int)
+#407 := (or #419 #476)
+#414 := (or #496 #407)
+#404 := (iff #414 #408)
+#393 := (or #496 #409)
+#401 := (iff #393 #408)
+#402 := [rewrite]: #401
+#394 := (iff #414 #393)
+#410 := (iff #407 #409)
+#454 := (iff #476 #453)
+#455 := [rewrite]: #454
+#413 := [monotonicity #455]: #410
+#399 := [monotonicity #413]: #394
+#400 := [trans #399 #402]: #404
+#389 := [quant-inst]: #414
+#405 := [mp #389 #400]: #408
+#982 := [unit-resolution #405 #807]: #409
+#983 := [unit-resolution #982 #981]: #453
+#445 := (not #453)
+#441 := (or #438 #445)
#69 := (= #10 #13)
#94 := (or #69 #88)
#796 := (forall (vars (?x2 int)) (:pat #795) #94)
@@ -10995,48 +11192,48 @@
#104 := [mp #68 #103]: #99
#224 := [mp~ #104 #196]: #99
#801 := [mp #224 #800]: #796
-#530 := (not #796)
-#492 := (or #530 #494 #488)
-#505 := (not #504)
-#507 := (= #186 #506)
-#500 := (or #507 #505)
-#473 := (or #530 #500)
-#478 := (iff #473 #492)
-#475 := (or #530 #490)
-#477 := (iff #475 #492)
-#467 := [rewrite]: #477
-#466 := (iff #473 #475)
-#491 := (iff #500 #490)
-#489 := (iff #505 #488)
-#481 := [monotonicity #487]: #489
-#495 := (iff #507 #494)
-#497 := [rewrite]: #495
-#482 := [monotonicity #497 #481]: #491
-#476 := [monotonicity #482]: #466
-#444 := [trans #476 #467]: #478
-#474 := [quant-inst]: #473
-#446 := [mp #474 #444]: #492
-#965 := [unit-resolution #446 #801]: #490
-#966 := [unit-resolution #965 #964]: #494
-#967 := (not #494)
-#968 := (or #967 #438)
-#969 := [th-lemma]: #968
-#970 := [unit-resolution #969 #966]: #438
-#972 := (not #438)
-#973 := (or #971 #407 #972)
-#974 := [th-lemma]: #973
-#975 := [unit-resolution #974 #970 #954]: #971
-#977 := (or #976 #502)
-#978 := [th-lemma]: #977
-#979 := [unit-resolution #978 #975]: #976
-#553 := (>= #37 0::int)
-#546 := (not #553)
+#514 := (not #796)
+#423 := (or #514 #438 #445)
+#477 := (not #476)
+#478 := (= #186 #467)
+#444 := (or #478 #477)
+#428 := (or #514 #444)
+#434 := (iff #428 #423)
+#430 := (or #514 #441)
+#433 := (iff #430 #423)
+#422 := [rewrite]: #433
+#431 := (iff #428 #430)
+#442 := (iff #444 #441)
+#456 := (iff #477 #445)
+#439 := [monotonicity #455]: #456
+#451 := (iff #478 #438)
+#452 := [rewrite]: #451
+#421 := [monotonicity #452 #439]: #442
+#432 := [monotonicity #421]: #431
+#415 := [trans #432 #422]: #434
+#429 := [quant-inst]: #428
+#417 := [mp #429 #415]: #423
+#984 := [unit-resolution #417 #801]: #441
+#985 := [unit-resolution #984 #983]: #438
+#986 := (not #438)
+#987 := (or #986 #416)
+#988 := [th-lemma]: #987
+#989 := [unit-resolution #988 #985]: #416
+#991 := (not #416)
+#992 := (or #990 #380 #991)
+#993 := [th-lemma]: #992
+#994 := [unit-resolution #993 #989 #973]: #990
+#996 := (or #995 #475)
+#997 := [th-lemma]: #996
+#998 := [unit-resolution #997 #994]: #995
+#536 := (>= #37 0::int)
+#525 := (not #536)
#545 := (* -1::int #40)
#549 := (+ #38 #545)
#551 := (= #549 0::int)
-#984 := (not #551)
-#524 := (>= #549 0::int)
-#980 := (not #524)
+#1003 := (not #551)
+#503 := (>= #549 0::int)
+#999 := (not #503)
#201 := (>= #37 1::int)
#202 := (not #201)
#44 := (<= 1::int #37)
@@ -11047,55 +11244,79 @@
#204 := [monotonicity #200]: #203
#195 := [asserted]: #45
#205 := [mp #195 #204]: #202
-#981 := (or #980 #201 #407 #972)
-#982 := [th-lemma]: #981
-#983 := [unit-resolution #982 #205 #970 #954]: #980
-#985 := (or #984 #524)
-#986 := [th-lemma]: #985
-#987 := [unit-resolution #986 #983]: #984
-#548 := (or #551 #546)
-#531 := (or #530 #551 #546)
+#1000 := (or #999 #201 #380 #991)
+#1001 := [th-lemma]: #1000
+#1002 := [unit-resolution #1001 #205 #989 #973]: #999
+#1004 := (or #1003 #503)
+#1005 := [th-lemma]: #1004
+#1006 := [unit-resolution #1005 #1002]: #1003
+#527 := (or #525 #551)
+#515 := (or #514 #525 #551)
#403 := (>= #38 0::int)
#562 := (not #403)
#558 := (= #38 #40)
#563 := (or #558 #562)
-#534 := (or #530 #563)
-#537 := (iff #534 #531)
-#539 := (or #530 #548)
-#533 := (iff #539 #531)
-#536 := [rewrite]: #533
-#532 := (iff #534 #539)
-#538 := (iff #563 #548)
-#547 := (iff #562 #546)
-#541 := (iff #403 #553)
-#544 := [rewrite]: #541
-#543 := [monotonicity #544]: #547
-#552 := (iff #558 #551)
-#550 := [rewrite]: #552
-#528 := [monotonicity #550 #543]: #538
-#540 := [monotonicity #528]: #532
-#523 := [trans #540 #536]: #537
-#535 := [quant-inst]: #534
-#525 := [mp #535 #523]: #531
-#988 := [unit-resolution #525 #801]: #548
-#989 := [unit-resolution #988 #987]: #546
-#511 := (or #527 #553)
-#515 := (or #514 #527 #553)
-#509 := (or #527 #403)
-#516 := (or #514 #509)
+#516 := (or #514 #563)
#522 := (iff #516 #515)
-#518 := (or #514 #511)
+#518 := (or #514 #527)
#521 := (iff #518 #515)
#510 := [rewrite]: #521
#519 := (iff #516 #518)
-#512 := (iff #509 #511)
-#513 := [monotonicity #544]: #512
+#512 := (iff #563 #527)
+#553 := (* 1::int #37)
+#541 := (>= #553 0::int)
+#547 := (not #541)
+#531 := (or #547 #551)
+#509 := (iff #531 #527)
+#526 := (iff #547 #525)
+#537 := (iff #541 #536)
+#540 := (= #553 #37)
+#533 := [rewrite]: #540
+#523 := [monotonicity #533]: #537
+#524 := [monotonicity #523]: #526
+#511 := [monotonicity #524]: #509
+#539 := (iff #563 #531)
+#538 := (or #551 #547)
+#534 := (iff #538 #531)
+#535 := [rewrite]: #534
+#528 := (iff #563 #538)
+#543 := (iff #562 #547)
+#544 := (iff #403 #541)
+#546 := [rewrite]: #544
+#548 := [monotonicity #546]: #543
+#552 := (iff #558 #551)
+#550 := [rewrite]: #552
+#530 := [monotonicity #550 #548]: #528
+#532 := [trans #530 #535]: #539
+#513 := [trans #532 #511]: #512
#520 := [monotonicity #513]: #519
#499 := [trans #520 #510]: #522
#517 := [quant-inst]: #516
#501 := [mp #517 #499]: #515
-#990 := [unit-resolution #501 #807]: #511
-[unit-resolution #990 #989 #979]: false
+#1007 := [unit-resolution #501 #801]: #527
+#1008 := [unit-resolution #1007 #1006]: #525
+#508 := (or #504 #536)
+#498 := (or #496 #504 #536)
+#505 := (or #504 #403)
+#487 := (or #496 #505)
+#492 := (iff #487 #498)
+#489 := (or #496 #508)
+#491 := (iff #489 #498)
+#482 := [rewrite]: #491
+#481 := (iff #487 #489)
+#495 := (iff #505 #508)
+#506 := (or #504 #541)
+#493 := (iff #506 #508)
+#494 := [monotonicity #523]: #493
+#507 := (iff #505 #506)
+#500 := [monotonicity #546]: #507
+#497 := [trans #500 #494]: #495
+#490 := [monotonicity #497]: #481
+#473 := [trans #490 #482]: #492
+#488 := [quant-inst]: #487
+#474 := [mp #488 #473]: #498
+#1009 := [unit-resolution #474 #807]: #508
+[unit-resolution #1009 #1008 #998]: false
unsat
uq2MbDTgTG1nxWdwzZtWew 7
@@ -12233,7 +12454,7 @@
[mp #25 #34]: false
unsat
-YG20f6Uf93koN6rVg/alpA 9362
+YG20f6Uf93koN6rVg/alpA 9742
#2 := false
decl uf_1 :: (-> int T1)
#37 := 6::int
@@ -12248,18 +12469,18 @@
#35 := (uf_1 #34)
#36 := (uf_3 #35)
#39 := (= #36 #38)
-#476 := (uf_3 #38)
-#403 := (= #476 #38)
-#531 := (= #38 #476)
-#620 := (uf_2 #38)
+#484 := (uf_3 #38)
+#372 := (= #484 #38)
+#485 := (= #38 #484)
+#592 := (uf_2 #38)
#142 := -10::int
-#513 := (+ -10::int #620)
-#472 := (uf_1 #513)
-#503 := (uf_3 #472)
-#505 := (= #476 #503)
+#496 := (+ -10::int #592)
+#497 := (uf_1 #496)
+#498 := (uf_3 #497)
+#499 := (= #484 #498)
#22 := 10::int
-#507 := (>= #620 10::int)
-#514 := (ite #507 #505 #531)
+#500 := (>= #592 10::int)
+#501 := (ite #500 #499 #485)
#4 := (:var 0 T1)
#21 := (uf_3 #4)
#707 := (pattern #21)
@@ -12333,12 +12554,12 @@
#212 := [mp #207 #211]: #193
#713 := [mp #212 #712]: #708
#336 := (not #708)
-#518 := (or #336 #514)
-#528 := [quant-inst]: #518
-#477 := [unit-resolution #528 #713]: #514
-#529 := (not #507)
-#498 := (<= #620 6::int)
-#610 := (= #620 6::int)
+#463 := (or #336 #501)
+#464 := [quant-inst]: #463
+#444 := [unit-resolution #464 #713]: #501
+#473 := (not #500)
+#453 := (<= #592 6::int)
+#597 := (= #592 6::int)
#10 := (:var 0 int)
#12 := (uf_1 #10)
#694 := (pattern #12)
@@ -12396,79 +12617,79 @@
#201 := [mp~ #99 #183]: #94
#700 := [mp #201 #699]: #695
#673 := (not #695)
-#591 := (or #673 #610)
-#526 := (>= 6::int 0::int)
-#527 := (not #526)
-#617 := (= 6::int #620)
-#621 := (or #617 #527)
-#592 := (or #673 #621)
-#595 := (iff #592 #591)
-#597 := (iff #591 #591)
-#593 := [rewrite]: #597
-#600 := (iff #621 #610)
-#614 := (or #610 false)
-#605 := (iff #614 #610)
-#606 := [rewrite]: #605
-#603 := (iff #621 #614)
-#613 := (iff #527 false)
+#576 := (or #673 #597)
+#607 := (>= 6::int 0::int)
+#591 := (not #607)
+#594 := (= 6::int #592)
+#595 := (or #594 #591)
+#577 := (or #673 #595)
+#579 := (iff #577 #576)
+#581 := (iff #576 #576)
+#582 := [rewrite]: #581
+#574 := (iff #595 #597)
+#586 := (or #597 false)
+#571 := (iff #586 #597)
+#573 := [rewrite]: #571
+#590 := (iff #595 #586)
+#588 := (iff #591 false)
#1 := true
#663 := (not true)
#666 := (iff #663 false)
#667 := [rewrite]: #666
-#611 := (iff #527 #663)
-#599 := (iff #526 true)
-#601 := [rewrite]: #599
-#612 := [monotonicity #601]: #611
-#609 := [trans #612 #667]: #613
-#608 := (iff #617 #610)
-#602 := [rewrite]: #608
-#604 := [monotonicity #602 #609]: #603
-#607 := [trans #604 #606]: #600
-#596 := [monotonicity #607]: #595
-#598 := [trans #596 #593]: #595
-#594 := [quant-inst]: #592
-#584 := [mp #594 #598]: #591
-#478 := [unit-resolution #584 #700]: #610
-#453 := (not #610)
-#454 := (or #453 #498)
-#455 := [th-lemma]: #454
-#456 := [unit-resolution #455 #478]: #498
-#458 := (not #498)
-#459 := (or #458 #529)
-#460 := [th-lemma]: #459
-#302 := [unit-resolution #460 #456]: #529
-#508 := (not #514)
-#490 := (or #508 #507 #531)
-#491 := [def-axiom]: #490
-#461 := [unit-resolution #491 #302 #477]: #531
-#404 := [symm #461]: #403
-#405 := (= #36 #476)
+#585 := (iff #591 #663)
+#598 := (iff #607 true)
+#584 := [rewrite]: #598
+#587 := [monotonicity #584]: #585
+#589 := [trans #587 #667]: #588
+#596 := (iff #594 #597)
+#593 := [rewrite]: #596
+#570 := [monotonicity #593 #589]: #590
+#575 := [trans #570 #573]: #574
+#580 := [monotonicity #575]: #579
+#572 := [trans #580 #582]: #579
+#578 := [quant-inst]: #577
+#583 := [mp #578 #572]: #576
+#448 := [unit-resolution #583 #700]: #597
+#445 := (not #597)
+#446 := (or #445 #453)
+#442 := [th-lemma]: #446
+#447 := [unit-resolution #442 #448]: #453
+#437 := (not #453)
+#427 := (or #437 #473)
+#429 := [th-lemma]: #427
+#430 := [unit-resolution #429 #447]: #473
+#471 := (not #501)
+#477 := (or #471 #500 #485)
+#478 := [def-axiom]: #477
+#433 := [unit-resolution #478 #430 #444]: #485
+#373 := [symm #433]: #372
+#374 := (= #36 #484)
#649 := (uf_2 #35)
-#582 := (+ -10::int #649)
-#553 := (uf_1 #582)
-#556 := (uf_3 #553)
-#401 := (= #556 #476)
-#417 := (= #553 #38)
-#415 := (= #582 6::int)
+#554 := (+ -10::int #649)
+#549 := (uf_1 #554)
+#545 := (uf_3 #549)
+#381 := (= #545 #484)
+#395 := (= #549 #38)
+#394 := (= #554 6::int)
#335 := (uf_2 #31)
#647 := -1::int
-#502 := (* -1::int #335)
-#463 := (+ #33 #502)
-#464 := (<= #463 0::int)
-#486 := (= #33 #335)
-#445 := (= #32 #31)
-#574 := (= #31 #32)
-#575 := (+ -10::int #335)
-#576 := (uf_1 #575)
-#577 := (uf_3 #576)
-#578 := (= #32 #577)
-#579 := (>= #335 10::int)
-#580 := (ite #579 #578 #574)
-#572 := (or #336 #580)
-#583 := [quant-inst]: #572
-#457 := [unit-resolution #583 #713]: #580
-#562 := (not #579)
-#554 := (<= #335 4::int)
+#459 := (* -1::int #335)
+#460 := (+ #33 #459)
+#302 := (<= #460 0::int)
+#458 := (= #33 #335)
+#426 := (= #32 #31)
+#555 := (= #31 #32)
+#551 := (+ -10::int #335)
+#552 := (uf_1 #551)
+#553 := (uf_3 #552)
+#556 := (= #32 #553)
+#557 := (>= #335 10::int)
+#558 := (ite #557 #556 #555)
+#560 := (or #336 #558)
+#533 := [quant-inst]: #560
+#434 := [unit-resolution #533 #713]: #558
+#535 := (not #557)
+#531 := (<= #335 4::int)
#324 := (= #335 4::int)
#659 := (or #673 #324)
#678 := (>= 4::int 0::int)
@@ -12498,110 +12719,125 @@
#277 := [trans #383 #385]: #382
#367 := [quant-inst]: #660
#655 := [mp #367 #277]: #659
-#462 := [unit-resolution #655 #700]: #324
-#441 := (not #324)
-#444 := (or #441 #554)
-#448 := [th-lemma]: #444
-#450 := [unit-resolution #448 #462]: #554
-#451 := (not #554)
-#449 := (or #451 #562)
-#452 := [th-lemma]: #449
-#440 := [unit-resolution #452 #450]: #562
-#561 := (not #580)
-#566 := (or #561 #579 #574)
-#567 := [def-axiom]: #566
-#443 := [unit-resolution #567 #440 #457]: #574
-#446 := [symm #443]: #445
-#442 := [monotonicity #446]: #486
-#447 := (not #486)
-#437 := (or #447 #464)
-#427 := [th-lemma]: #437
-#429 := [unit-resolution #427 #442]: #464
-#471 := (>= #463 0::int)
-#430 := (or #447 #471)
-#433 := [th-lemma]: #430
-#434 := [unit-resolution #433 #442]: #471
-#560 := (>= #335 4::int)
-#438 := (or #441 #560)
-#431 := [th-lemma]: #438
-#439 := [unit-resolution #431 #462]: #560
+#438 := [unit-resolution #655 #700]: #324
+#431 := (not #324)
+#439 := (or #431 #531)
+#432 := [th-lemma]: #439
+#435 := [unit-resolution #432 #438]: #531
+#436 := (not #531)
+#422 := (or #436 #535)
+#424 := [th-lemma]: #422
+#425 := [unit-resolution #424 #435]: #535
+#534 := (not #558)
+#540 := (or #534 #557 #555)
+#541 := [def-axiom]: #540
+#423 := [unit-resolution #541 #425 #434]: #555
+#408 := [symm #423]: #426
+#410 := [monotonicity #408]: #458
+#411 := (not #458)
+#412 := (or #411 #302)
+#413 := [th-lemma]: #412
+#414 := [unit-resolution #413 #410]: #302
+#461 := (>= #460 0::int)
+#415 := (or #411 #461)
+#416 := [th-lemma]: #415
+#417 := [unit-resolution #416 #410]: #461
+#512 := (>= #335 4::int)
+#418 := (or #431 #512)
+#419 := [th-lemma]: #418
+#420 := [unit-resolution #419 #438]: #512
#651 := (* -1::int #649)
#648 := (+ #34 #651)
-#625 := (<= #648 0::int)
+#521 := (<= #648 0::int)
#652 := (= #648 0::int)
-#643 := (>= #33 0::int)
-#435 := (not #471)
-#432 := (not #560)
-#436 := (or #643 #432 #435)
-#422 := [th-lemma]: #436
-#424 := [unit-resolution #422 #439 #434]: #643
-#644 := (not #643)
-#489 := (or #644 #652)
-#628 := (or #673 #644 #652)
+#630 := (>= #33 0::int)
+#421 := (not #461)
+#409 := (not #512)
+#398 := (or #630 #409 #421)
+#400 := [th-lemma]: #398
+#401 := [unit-resolution #400 #420 #417]: #630
+#468 := (not #630)
+#623 := (or #468 #652)
+#509 := (or #673 #468 #652)
#370 := (>= #34 0::int)
#371 := (not #370)
#650 := (= #34 #649)
#364 := (or #650 #371)
-#629 := (or #673 #364)
-#469 := (iff #629 #628)
-#636 := (or #673 #489)
-#466 := (iff #636 #628)
-#468 := [rewrite]: #466
-#630 := (iff #629 #636)
-#633 := (iff #364 #489)
-#646 := (or #652 #644)
-#631 := (iff #646 #489)
-#632 := [rewrite]: #631
-#487 := (iff #364 #646)
-#645 := (iff #371 #644)
-#638 := (iff #370 #643)
-#639 := [rewrite]: #638
-#640 := [monotonicity #639]: #645
+#510 := (or #673 #364)
+#619 := (iff #510 #509)
+#470 := (or #673 #623)
+#615 := (iff #470 #509)
+#616 := [rewrite]: #615
+#618 := (iff #510 #470)
+#624 := (iff #364 #623)
+#643 := 1::int
+#638 := (* 1::int #33)
+#639 := (>= #638 0::int)
+#640 := (not #639)
+#632 := (or #640 #652)
+#625 := (iff #632 #623)
+#469 := (iff #640 #468)
+#637 := (iff #639 #630)
+#635 := (= #638 #33)
+#636 := [rewrite]: #635
+#466 := [monotonicity #636]: #637
+#622 := [monotonicity #466]: #469
+#626 := [monotonicity #622]: #625
+#628 := (iff #364 #632)
+#488 := (or #652 #640)
+#633 := (iff #488 #632)
+#634 := [rewrite]: #633
+#489 := (iff #364 #488)
+#646 := (iff #371 #640)
+#644 := (iff #370 #639)
+#645 := [rewrite]: #644
+#487 := [monotonicity #645]: #646
#641 := (iff #650 #652)
#642 := [rewrite]: #641
-#488 := [monotonicity #642 #640]: #487
-#634 := [trans #488 #632]: #633
-#637 := [monotonicity #634]: #630
-#622 := [trans #637 #468]: #469
-#635 := [quant-inst]: #629
-#623 := [mp #635 #622]: #628
-#425 := [unit-resolution #623 #700]: #489
-#423 := [unit-resolution #425 #424]: #652
-#426 := (not #652)
-#408 := (or #426 #625)
-#410 := [th-lemma]: #408
-#411 := [unit-resolution #410 #423]: #625
-#626 := (>= #648 0::int)
-#412 := (or #426 #626)
-#413 := [th-lemma]: #412
-#414 := [unit-resolution #413 #423]: #626
-#416 := [th-lemma #414 #411 #439 #450 #434 #429]: #415
-#418 := [monotonicity #416]: #417
-#402 := [monotonicity #418]: #401
-#557 := (= #36 #556)
-#581 := (= #35 #36)
-#558 := (>= #649 10::int)
-#559 := (ite #558 #557 #581)
-#533 := (or #336 #559)
-#534 := [quant-inst]: #533
-#419 := [unit-resolution #534 #713]: #559
-#420 := (not #625)
-#409 := (or #558 #420 #432 #435)
-#421 := [th-lemma]: #409
-#398 := [unit-resolution #421 #411 #439 #434]: #558
-#428 := (not #558)
-#535 := (not #559)
-#539 := (or #535 #428 #557)
-#540 := [def-axiom]: #539
-#400 := [unit-resolution #540 #398 #419]: #557
-#406 := [trans #400 #402]: #405
-#399 := [trans #406 #404]: #39
+#631 := [monotonicity #642 #487]: #489
+#629 := [trans #631 #634]: #628
+#627 := [trans #629 #626]: #624
+#520 := [monotonicity #627]: #618
+#504 := [trans #520 #616]: #619
+#511 := [quant-inst]: #510
+#519 := [mp #511 #504]: #509
+#402 := [unit-resolution #519 #700]: #623
+#403 := [unit-resolution #402 #401]: #652
+#404 := (not #652)
+#405 := (or #404 #521)
+#406 := [th-lemma]: #405
+#399 := [unit-resolution #406 #403]: #521
+#522 := (>= #648 0::int)
+#407 := (or #404 #522)
+#392 := [th-lemma]: #407
+#393 := [unit-resolution #392 #403]: #522
+#396 := [th-lemma #393 #399 #420 #435 #417 #414]: #394
+#397 := [monotonicity #396]: #395
+#391 := [monotonicity #397]: #381
+#550 := (= #36 #545)
+#559 := (= #35 #36)
+#530 := (>= #649 10::int)
+#476 := (ite #530 #550 #559)
+#536 := (or #336 #476)
+#537 := [quant-inst]: #536
+#386 := [unit-resolution #537 #713]: #476
+#387 := (not #521)
+#388 := (or #530 #387 #409 #421)
+#380 := [th-lemma]: #388
+#389 := [unit-resolution #380 #399 #420 #417]: #530
+#538 := (not #530)
+#532 := (not #476)
+#506 := (or #532 #538 #550)
+#513 := [def-axiom]: #506
+#390 := [unit-resolution #513 #389 #386]: #550
+#365 := [trans #390 #391]: #374
+#375 := [trans #365 #373]: #39
#40 := (not #39)
#182 := [asserted]: #40
-[unit-resolution #182 #399]: false
-unsat
-
-/fwo5o8vhLVHyS4oYEs4QA 10833
+[unit-resolution #182 #375]: false
+unsat
+
+/fwo5o8vhLVHyS4oYEs4QA 10902
#2 := false
#22 := 0::int
#8 := 2::int
@@ -12677,18 +12913,18 @@
#604 := [trans #593 #597]: #562
#603 := [quant-inst]: #596
#606 := [mp #603 #604]: #628
-#528 := [unit-resolution #606 #817]: #566
-#521 := (not #566)
-#464 := (or #521 #608)
-#456 := [th-lemma]: #464
-#465 := [unit-resolution #456 #528]: #608
+#516 := [unit-resolution #606 #817]: #566
+#498 := (not #566)
+#432 := (or #498 #608)
+#411 := [th-lemma]: #432
+#413 := [unit-resolution #411 #516]: #608
decl uf_10 :: int
#52 := uf_10
#57 := (mod uf_10 2::int)
#243 := (* -1::int #57)
#244 := (+ #56 #243)
#447 := (>= #244 0::int)
-#387 := (not #447)
+#372 := (not #447)
#245 := (= #244 0::int)
#248 := (not #245)
#218 := (* -1::int #55)
@@ -12708,9 +12944,9 @@
#662 := (not #672)
#1 := true
#81 := [true-axiom]: true
-#520 := (or false #662)
-#523 := [th-lemma]: #520
-#524 := [unit-resolution #523 #81]: #662
+#514 := (or false #662)
+#515 := [th-lemma]: #514
+#513 := [unit-resolution #515 #81]: #662
#701 := (* -1::int #613)
#204 := -2::int
#691 := (* -2::int #222)
@@ -12723,82 +12959,58 @@
#546 := [th-lemma]: #545
#548 := [unit-resolution #546 #81]: #692
#549 := (not #692)
-#497 := (or #549 #694)
-#482 := [th-lemma]: #497
-#483 := [unit-resolution #482 #548]: #694
+#482 := (or #549 #694)
+#483 := [th-lemma]: #482
+#484 := [unit-resolution #483 #548]: #694
#536 := (not #448)
-#395 := [hypothesis]: #536
+#382 := [hypothesis]: #536
#555 := (* -1::int uf_10)
#573 := (+ #51 #555)
#543 := (<= #573 0::int)
#53 := (= #51 uf_10)
#256 := (not #253)
#259 := (or #248 #256)
-#502 := 1::int
#731 := (div uf_10 2::int)
-#515 := (* -1::int #731)
-#513 := (+ #640 #515)
+#723 := (* -2::int #731)
+#521 := (* -2::int #624)
+#529 := (+ #521 #723)
#618 := (div #51 2::int)
-#514 := (* -1::int #618)
-#516 := (+ #514 #513)
-#498 := (+ #243 #516)
-#500 := (+ #56 #498)
-#501 := (+ uf_10 #500)
-#503 := (>= #501 1::int)
-#486 := (not #503)
-#361 := (<= #244 0::int)
+#583 := (* -2::int #618)
+#522 := (+ #583 #529)
+#528 := (* -2::int #57)
+#525 := (+ #528 #522)
+#524 := (* 2::int #56)
+#526 := (+ #524 #525)
+#523 := (* 2::int uf_10)
+#512 := (+ #523 #526)
+#520 := (>= #512 2::int)
#453 := (not #259)
-#517 := [hypothesis]: #453
+#519 := [hypothesis]: #453
#440 := (or #259 #245)
#451 := [def-axiom]: #440
-#519 := [unit-resolution #451 #517]: #245
-#478 := (or #248 #361)
-#470 := [th-lemma]: #478
-#479 := [unit-resolution #470 #519]: #361
-#449 := (>= #252 0::int)
+#470 := [unit-resolution #451 #519]: #245
+#465 := (or #248 #447)
+#466 := [th-lemma]: #465
+#457 := [unit-resolution #466 #470]: #447
+#544 := (>= #573 0::int)
+#441 := (not #544)
#452 := (or #259 #253)
#380 := [def-axiom]: #452
-#480 := [unit-resolution #380 #517]: #253
-#471 := (or #256 #449)
-#481 := [th-lemma]: #471
-#462 := [unit-resolution #481 #480]: #449
-#487 := (not #361)
-#485 := (not #449)
-#476 := (or #486 #485 #487)
-#607 := (<= #620 0::int)
-#529 := (or #521 #607)
-#522 := [th-lemma]: #529
-#525 := [unit-resolution #522 #528]: #607
-#723 := (* -2::int #731)
-#724 := (+ #243 #723)
-#718 := (+ uf_10 #724)
-#720 := (<= #718 0::int)
-#722 := (= #718 0::int)
-#526 := (or false #722)
-#512 := [th-lemma]: #526
-#504 := [unit-resolution #512 #81]: #722
-#505 := (not #722)
-#506 := (or #505 #720)
-#507 := [th-lemma]: #506
-#508 := [unit-resolution #507 #504]: #720
-#509 := [hypothesis]: #361
-#583 := (* -2::int #618)
-#584 := (+ #583 #640)
-#585 := (+ #51 #584)
-#587 := (<= #585 0::int)
-#582 := (= #585 0::int)
-#510 := (or false #582)
-#499 := [th-lemma]: #510
-#511 := [unit-resolution #499 #81]: #582
-#488 := (not #582)
-#490 := (or #488 #587)
-#491 := [th-lemma]: #490
-#492 := [unit-resolution #491 #511]: #587
-#493 := [hypothesis]: #503
+#481 := [unit-resolution #380 #519]: #253
+#467 := (or #256 #448)
+#434 := [th-lemma]: #467
+#436 := [unit-resolution #434 #481]: #448
+#532 := (or #543 #536)
+#695 := (>= #699 0::int)
+#550 := (or #549 #695)
+#393 := [th-lemma]: #550
+#551 := [unit-resolution #393 #548]: #695
+#547 := (not #543)
+#552 := [hypothesis]: #547
#649 := (* -2::int #60)
#644 := (+ #218 #649)
#650 := (+ #51 #644)
-#636 := (>= #650 0::int)
+#631 := (<= #650 0::int)
#623 := (= #650 0::int)
#43 := (uf_7 uf_2 #35)
#44 := (uf_6 #34 #43)
@@ -12859,33 +13071,6 @@
#630 := [quant-inst]: #629
#531 := [unit-resolution #630 #824]: #623
#534 := (not #623)
-#494 := (or #534 #636)
-#495 := [th-lemma]: #494
-#496 := [unit-resolution #495 #531]: #636
-#489 := [hypothesis]: #449
-#484 := [th-lemma #483 #489 #496 #493 #492 #509 #508 #525 #524]: false
-#477 := [lemma #484]: #476
-#463 := [unit-resolution #477 #462 #479]: #486
-#727 := (>= #718 0::int)
-#466 := (or #505 #727)
-#457 := [th-lemma]: #466
-#467 := [unit-resolution #457 #504]: #727
-#434 := (or #248 #447)
-#436 := [th-lemma]: #434
-#437 := [unit-resolution #436 #519]: #447
-#544 := (>= #573 0::int)
-#445 := (not #544)
-#428 := (or #256 #448)
-#441 := [th-lemma]: #428
-#442 := [unit-resolution #441 #480]: #448
-#532 := (or #543 #536)
-#695 := (>= #699 0::int)
-#550 := (or #549 #695)
-#393 := [th-lemma]: #550
-#551 := [unit-resolution #393 #548]: #695
-#547 := (not #543)
-#552 := [hypothesis]: #547
-#631 := (<= #650 0::int)
#538 := (or #534 #631)
#540 := [th-lemma]: #538
#541 := [unit-resolution #540 #531]: #631
@@ -12896,8 +13081,8 @@
#533 := [unit-resolution #530 #81]: #666
#535 := [th-lemma #533 #539 #541 #552 #551]: false
#537 := [lemma #535]: #532
-#443 := [unit-resolution #537 #442]: #543
-#429 := (or #547 #445)
+#437 := [unit-resolution #537 #436]: #543
+#444 := (or #547 #441)
#764 := (not #53)
#771 := (or #764 #259)
#262 := (iff #53 #259)
@@ -12950,65 +13135,118 @@
#438 := (or #764 #259 #433)
#439 := [def-axiom]: #438
#772 := [unit-resolution #439 #267]: #771
-#444 := [unit-resolution #772 #517]: #764
-#435 := (or #53 #547 #445)
-#446 := [th-lemma]: #435
-#431 := [unit-resolution #446 #444]: #429
-#432 := [unit-resolution #431 #443]: #445
+#428 := [unit-resolution #772 #519]: #764
+#442 := (or #53 #547 #441)
+#443 := [th-lemma]: #442
+#445 := [unit-resolution #443 #428]: #444
+#435 := [unit-resolution #445 #437]: #441
+#584 := (+ #583 #640)
+#585 := (+ #51 #584)
#588 := (>= #585 0::int)
-#411 := (or #488 #588)
-#413 := [th-lemma]: #411
-#418 := [unit-resolution #413 #511]: #588
-#419 := [th-lemma #418 #432 #437 #467 #465 #463]: false
-#420 := [lemma #419]: #259
+#582 := (= #585 0::int)
+#499 := (or false #582)
+#511 := [th-lemma]: #499
+#488 := [unit-resolution #511 #81]: #582
+#490 := (not #582)
+#446 := (or #490 #588)
+#429 := [th-lemma]: #446
+#431 := [unit-resolution #429 #488]: #588
+#724 := (+ #243 #723)
+#718 := (+ uf_10 #724)
+#727 := (>= #718 0::int)
+#722 := (= #718 0::int)
+#503 := (or false #722)
+#504 := [th-lemma]: #503
+#505 := [unit-resolution #504 #81]: #722
+#506 := (not #722)
+#418 := (or #506 #727)
+#419 := [th-lemma]: #418
+#420 := [unit-resolution #419 #505]: #727
+#421 := [th-lemma #420 #413 #431 #435 #457]: #520
+#485 := (not #520)
+#361 := (<= #244 0::int)
+#479 := (or #248 #361)
+#480 := [th-lemma]: #479
+#471 := [unit-resolution #480 #470]: #361
+#449 := (>= #252 0::int)
+#462 := (or #256 #449)
+#463 := [th-lemma]: #462
+#464 := [unit-resolution #463 #481]: #449
+#476 := (not #361)
+#487 := (not #449)
+#477 := (or #485 #487 #476)
+#607 := (<= #620 0::int)
+#500 := (or #498 #607)
+#501 := [th-lemma]: #500
+#502 := [unit-resolution #501 #516]: #607
+#720 := (<= #718 0::int)
+#507 := (or #506 #720)
+#508 := [th-lemma]: #507
+#509 := [unit-resolution #508 #505]: #720
+#510 := [hypothesis]: #361
+#587 := (<= #585 0::int)
+#491 := (or #490 #587)
+#492 := [th-lemma]: #491
+#493 := [unit-resolution #492 #488]: #587
+#494 := [hypothesis]: #520
+#636 := (>= #650 0::int)
+#495 := (or #534 #636)
+#496 := [th-lemma]: #495
+#489 := [unit-resolution #496 #531]: #636
+#497 := [hypothesis]: #449
+#486 := [th-lemma #484 #497 #489 #494 #493 #510 #509 #502 #513]: false
+#478 := [lemma #486]: #477
+#456 := [unit-resolution #478 #464 #471]: #485
+#422 := [unit-resolution #456 #421]: false
+#423 := [lemma #422]: #259
#427 := (or #53 #453)
#768 := (or #53 #453 #433)
#770 := [def-axiom]: #768
#557 := [unit-resolution #770 #267]: #427
-#406 := [unit-resolution #557 #420]: #53
-#377 := (or #764 #543)
-#381 := [th-lemma]: #377
-#382 := [unit-resolution #381 #406]: #543
-#385 := [th-lemma #496 #382 #395 #483 #524]: false
-#386 := [lemma #385]: #448
-#390 := (or #253 #536)
-#408 := [hypothesis]: #485
-#409 := (or #764 #544)
-#397 := [th-lemma]: #409
-#399 := [unit-resolution #397 #406]: #544
-#400 := [th-lemma #399 #408 #533 #551 #541]: false
-#403 := [lemma #400]: #449
-#392 := (or #253 #536 #485)
-#394 := [th-lemma]: #392
-#657 := [unit-resolution #394 #403]: #390
-#658 := [unit-resolution #657 #386]: #253
+#399 := [unit-resolution #557 #423]: #53
+#385 := (or #764 #543)
+#386 := [th-lemma]: #385
+#387 := [unit-resolution #386 #399]: #543
+#379 := [th-lemma #489 #387 #382 #484 #513]: false
+#388 := [lemma #379]: #448
+#381 := (or #253 #536)
+#397 := [hypothesis]: #487
+#400 := (or #764 #544)
+#403 := [th-lemma]: #400
+#398 := [unit-resolution #403 #399]: #544
+#404 := [th-lemma #398 #397 #533 #551 #541]: false
+#378 := [lemma #404]: #449
+#395 := (or #253 #536 #487)
+#377 := [th-lemma]: #395
+#658 := [unit-resolution #377 #378]: #381
+#653 := [unit-resolution #658 #388]: #253
#450 := (or #453 #248 #256)
#454 := [def-axiom]: #450
-#762 := [unit-resolution #454 #420]: #259
-#664 := [unit-resolution #762 #658]: #248
-#372 := (or #245 #387)
-#560 := (+ #57 #640)
-#610 := (>= #560 0::int)
-#742 := (= #57 #624)
-#424 := (= #624 #57)
-#405 := [monotonicity #406]: #424
-#407 := [symm #405]: #742
-#705 := (not #742)
-#706 := (or #705 #610)
-#568 := [th-lemma]: #706
-#569 := [unit-resolution #568 #407]: #610
-#398 := [hypothesis]: #487
-#404 := [th-lemma #525 #398 #569]: false
-#378 := [lemma #404]: #361
-#379 := (or #245 #487 #387)
-#388 := [th-lemma]: #379
-#369 := [unit-resolution #388 #378]: #372
-#370 := [unit-resolution #369 #664]: #387
-#708 := (<= #560 0::int)
-#373 := (or #705 #708)
-#374 := [th-lemma]: #373
-#375 := [unit-resolution #374 #407]: #708
-[th-lemma #375 #370 #465]: false
+#664 := [unit-resolution #454 #423]: #259
+#665 := [unit-resolution #664 #653]: #248
+#373 := (or #245 #372)
+#708 := (+ #57 #640)
+#705 := (>= #708 0::int)
+#560 := (= #57 #624)
+#408 := (= #624 #57)
+#406 := [monotonicity #399]: #408
+#409 := [symm #406]: #560
+#706 := (not #560)
+#655 := (or #706 #705)
+#569 := [th-lemma]: #655
+#570 := [unit-resolution #569 #409]: #705
+#383 := [hypothesis]: #476
+#384 := [th-lemma #502 #383 #570]: false
+#389 := [lemma #384]: #361
+#369 := (or #245 #476 #372)
+#370 := [th-lemma]: #369
+#374 := [unit-resolution #370 #389]: #373
+#375 := [unit-resolution #374 #665]: #372
+#610 := (<= #708 0::int)
+#371 := (or #706 #610)
+#376 := [th-lemma]: #371
+#363 := [unit-resolution #376 #409]: #610
+[th-lemma #363 #375 #413]: false
unsat
s8LL71+1HTN+eIuEYeKhUw 1251
--- a/src/HOL/SMT/Examples/SMT_Examples.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/SMT/Examples/SMT_Examples.thy Tue Feb 09 17:06:05 2010 +0100
@@ -17,7 +17,7 @@
the following option is set to "false":
*}
-declare [[smt_record=false]]
+declare [[smt_record=false]]
--- a/src/HOL/SMT/Tools/smt_solver.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/SMT/Tools/smt_solver.ML Tue Feb 09 17:06:05 2010 +0100
@@ -156,7 +156,7 @@
else (["certificate"], "Using certificate from " ^ quote certs' ^ " ...")
val _ = tracing msg
in
- system_out (space_implode " " ("perl -w" ::
+ bash_output (space_implode " " (
File.shell_path (Path.explode (getenv "RUN_SMT_SOLVER")) :: certs' ::
map File.shell_quote (solver @ args) @
map File.shell_path [problem_path, proof_path]) ^ " 2>&1")
--- a/src/HOL/SMT/Tools/z3_proof_rules.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/SMT/Tools/z3_proof_rules.ML Tue Feb 09 17:06:05 2010 +0100
@@ -83,7 +83,7 @@
val rule_of_string = Symtab.lookup rule_names
fun string_of_rule r =
let fun fit (s, r') = if r = r' then SOME s else NONE
- in the (Symtab.get_first NONE fit rule_names) end
+ in the (Symtab.get_first fit rule_names) end
(* proof representation *)
@@ -545,7 +545,7 @@
fun derive conj t lits idx ptab =
let
- val (l, lit) = the (Termtab.get_first NONE (get_lit conj t) lits)
+ val (l, lit) = the (Termtab.get_first (get_lit conj t) lits)
val ls = explode_thm conj false false [t] lit
val lits' = fold (Termtab.update o ` prop_of) ls (Termtab.delete l lits)
fun upd (Sequent {hyps, vars, thm}) =
@@ -1231,7 +1231,7 @@
(case Termtab.lookup tab @{term False} of
SOME rs => extract_lit thm rs
| NONE =>
- pairself (extract_lit thm) (the (Termtab.get_first NONE pnlits tab))
+ pairself (extract_lit thm) (the (Termtab.get_first pnlits tab))
|> (fn (nlit, plit) => nlit COMP (plit COMP contra_rule)))
end
val falseE_v = Thm.dest_arg (Thm.dest_arg (Thm.cprop_of @{thm FalseE}))
--- a/src/HOL/SMT/etc/settings Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/SMT/etc/settings Tue Feb 09 17:06:05 2010 +0100
@@ -1,6 +1,7 @@
ISABELLE_SMT="$COMPONENT"
-RUN_SMT_SOLVER="$ISABELLE_SMT/lib/scripts/run_smt_solver.pl"
+RUN_SMT_SOLVER="$ISABELLE_SMT/lib/scripts/run_smt_solver"
+REMOTE_SMT="$ISABELLE_SMT/lib/scripts/remote_smt"
REMOTE_SMT_URL="http://smt.in.tum.de/smt"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/SMT/lib/scripts/remote_smt Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,31 @@
+#!/usr/bin/env perl
+#
+# Author: Sascha Boehme, TU Muenchen
+#
+# Invoke remote SMT solvers.
+
+use strict;
+use warnings;
+use LWP;
+
+
+# arguments
+
+my $solver = $ARGV[0];
+my @options = @ARGV[1 .. ($#ARGV - 1)];
+my $problem_file = $ARGV[-1];
+
+
+# call solver
+
+my $agent = LWP::UserAgent->new;
+$agent->agent("SMT-Request");
+$agent->timeout(180);
+my $response = $agent->post($ENV{"REMOTE_SMT_URL"}, [
+ "Solver" => $solver,
+ "Options" => join(" ", @options),
+ "Problem" => [$problem_file] ],
+ "Content_Type" => "form-data");
+if (not $response->is_success) { die "HTTP-Error: " . $response->message; }
+else { print $response->content; }
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/SMT/lib/scripts/run_smt_solver Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,93 @@
+#!/usr/bin/env perl
+#
+# Author: Sascha Boehme, TU Muenchen
+#
+# Cache for prover results, based on discriminating problems using MD5.
+
+use strict;
+use warnings;
+use Digest::MD5;
+
+
+# arguments
+
+my $certs_file = shift @ARGV;
+my $location = shift @ARGV;
+my $result_file = pop @ARGV;
+my $problem_file = $ARGV[-1];
+
+my $use_certs = not ($certs_file eq "-");
+
+
+# create MD5 problem digest
+
+my $problem_digest = "";
+if ($use_certs) {
+ my $md5 = Digest::MD5->new;
+ open FILE, "<$problem_file" or
+ die "ERROR: Failed to open '$problem_file' ($!)";
+ $md5->addfile(*FILE);
+ close FILE;
+ $problem_digest = $md5->b64digest;
+}
+
+
+# lookup problem digest among existing certificates and
+# return a cached result (if there is a certificate for the problem)
+
+if ($use_certs and -e $certs_file) {
+ my $cached = 0;
+ open CERTS, "<$certs_file" or die "ERROR: Failed to open '$certs_file' ($!)";
+ while (<CERTS>) {
+ if (m/(\S+) (\d+)/) {
+ if ($1 eq $problem_digest) {
+ my $start = tell CERTS;
+ open FILE, ">$result_file" or
+ die "ERROR: Failed to open '$result_file ($!)";
+ while ((tell CERTS) - $start < $2) {
+ my $line = <CERTS>;
+ print FILE $line;
+ }
+ close FILE;
+ $cached = 1;
+ last;
+ }
+ else { seek CERTS, $2, 1; }
+ }
+ else { die "ERROR: Invalid file format in '$certs_file'"; }
+ }
+ close CERTS;
+ if ($cached) { exit 0; }
+}
+
+
+# invoke (remote or local) prover
+
+my $result;
+
+if ($location eq "remote") {
+ $result = system "$ENV{'REMOTE_SMT'} @ARGV >$result_file 2>&1";
+}
+elsif ($location eq "local") {
+ $result = system "@ARGV >$result_file 2>&1";
+}
+else { die "ERROR: No SMT solver invoked"; }
+
+
+# cache result
+
+if ($use_certs) {
+ open CERTS, ">>$certs_file" or
+ die "ERROR: Failed to open '$certs_file' ($!)";
+ print CERTS
+ ("$problem_digest " . ((-s $result_file) + (length "\n")) . "\n");
+
+ open FILE, "<$result_file" or
+ die "ERROR: Failed to open '$result_file' ($!)";
+ foreach (<FILE>) { print CERTS $_; }
+ close FILE;
+
+ print CERTS "\n";
+ close CERTS;
+}
+
--- a/src/HOL/SMT/lib/scripts/run_smt_solver.pl Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-#
-# Author: Sascha Boehme, TU Muenchen
-#
-# Cache for prover results, based on discriminating problems using MD5.
-
-use strict;
-use warnings;
-use Digest::MD5;
-use LWP;
-
-
-# arguments
-
-my $certs_file = shift @ARGV;
-my $location = shift @ARGV;
-my $result_file = pop @ARGV;
-my $problem_file = $ARGV[-1];
-
-my $use_certs = not ($certs_file eq "-");
-
-
-# create MD5 problem digest
-
-my $problem_digest = "";
-if ($use_certs) {
- my $md5 = Digest::MD5->new;
- open FILE, "<$problem_file" or
- die "ERROR: Failed to open '$problem_file' ($!)";
- $md5->addfile(*FILE);
- close FILE;
- $problem_digest = $md5->b64digest;
-}
-
-
-# lookup problem digest among existing certificates and
-# return a cached result (if there is a certificate for the problem)
-
-if ($use_certs and -e $certs_file) {
- my $cached = 0;
- open CERTS, "<$certs_file" or die "ERROR: Failed to open '$certs_file' ($!)";
- while (<CERTS>) {
- if (m/(\S+) (\d+)/) {
- if ($1 eq $problem_digest) {
- my $start = tell CERTS;
- open FILE, ">$result_file" or
- die "ERROR: Failed to open '$result_file ($!)";
- while ((tell CERTS) - $start < $2) {
- my $line = <CERTS>;
- print FILE $line;
- }
- close FILE;
- $cached = 1;
- last;
- }
- else { seek CERTS, $2, 1; }
- }
- else { die "ERROR: Invalid file format in '$certs_file'"; }
- }
- close CERTS;
- if ($cached) { exit 0; }
-}
-
-
-# invoke (remote or local) prover
-
-if ($location eq "remote") {
- # arguments
- my $solver = $ARGV[0];
- my @options = @ARGV[1 .. ($#ARGV - 1)];
-
- # call solver
- my $agent = LWP::UserAgent->new;
- $agent->agent("SMT-Request");
- $agent->timeout(180);
- my $response = $agent->post($ENV{"REMOTE_SMT_URL"}, [
- "Solver" => $solver,
- "Options" => join(" ", @options),
- "Problem" => [$problem_file] ],
- "Content_Type" => "form-data");
- if (not $response->is_success) { die "HTTP-Error: " . $response->message; }
- else {
- open FILE, ">$result_file" or
- die "ERROR: Failed to create '$result_file' ($!)";
- print FILE $response->content;
- close FILE;
- }
-}
-elsif ($location eq "local") {
- system "@ARGV >$result_file 2>&1";
-}
-else { die "ERROR: No SMT solver invoked"; }
-
-
-# cache result
-
-if ($use_certs) {
- open CERTS, ">>$certs_file" or
- die "ERROR: Failed to open '$certs_file' ($!)";
- print CERTS
- ("$problem_digest " . ((-s $result_file) + (length "\n")) . "\n");
-
- open FILE, "<$result_file" or
- die "ERROR: Failed to open '$result_file' ($!)";
- foreach (<FILE>) { print CERTS $_; }
- close FILE;
-
- print CERTS "\n";
- close CERTS;
-}
-
--- a/src/HOL/Series.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Series.thy Tue Feb 09 17:06:05 2010 +0100
@@ -381,7 +381,7 @@
shows "norm x < 1 \<Longrightarrow> summable (\<lambda>n. x ^ n)"
by (rule geometric_sums [THEN sums_summable])
-lemma half: "0 < 1 / (2::'a::{number_ring,division_by_zero,ordered_field})"
+lemma half: "0 < 1 / (2::'a::{number_ring,division_by_zero,linordered_field})"
by arith
lemma power_half_series: "(\<lambda>n. (1/2::real)^Suc n) sums 1"
--- a/src/HOL/Statespace/state_fun.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Statespace/state_fun.ML Tue Feb 09 17:06:05 2010 +0100
@@ -310,7 +310,7 @@
val prop = list_all ([("n",nT),("x",eT)],
Logic.mk_equals (Const ("Ex",Tex)$Abs(s,T,eq),
HOLogic.true_const));
- val thm = Drule.standard (prove prop);
+ val thm = Drule.export_without_context (prove prop);
val thm' = if swap then swap_ex_eq OF [thm] else thm
in SOME thm' end
handle TERM _ => NONE)
--- a/src/HOL/SupInf.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/SupInf.thy Tue Feb 09 17:06:05 2010 +0100
@@ -6,38 +6,6 @@
imports RComplete
begin
-lemma minus_max_eq_min:
- fixes x :: "'a::{lordered_ab_group_add, linorder}"
- shows "- (max x y) = min (-x) (-y)"
-by (metis le_imp_neg_le linorder_linear min_max.inf_absorb2 min_max.le_iff_inf min_max.le_iff_sup min_max.sup_absorb1)
-
-lemma minus_min_eq_max:
- fixes x :: "'a::{lordered_ab_group_add, linorder}"
- shows "- (min x y) = max (-x) (-y)"
-by (metis minus_max_eq_min minus_minus)
-
-lemma minus_Max_eq_Min [simp]:
- fixes S :: "'a::{lordered_ab_group_add, linorder} set"
- shows "finite S \<Longrightarrow> S \<noteq> {} \<Longrightarrow> - (Max S) = Min (uminus ` S)"
-proof (induct S rule: finite_ne_induct)
- case (singleton x)
- thus ?case by simp
-next
- case (insert x S)
- thus ?case by (simp add: minus_max_eq_min)
-qed
-
-lemma minus_Min_eq_Max [simp]:
- fixes S :: "'a::{lordered_ab_group_add, linorder} set"
- shows "finite S \<Longrightarrow> S \<noteq> {} \<Longrightarrow> - (Min S) = Max (uminus ` S)"
-proof (induct S rule: finite_ne_induct)
- case (singleton x)
- thus ?case by simp
-next
- case (insert x S)
- thus ?case by (simp add: minus_min_eq_max)
-qed
-
instantiation real :: Sup
begin
definition
--- a/src/HOL/Tools/ATP_Manager/atp_wrapper.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/ATP_Manager/atp_wrapper.ML Tue Feb 09 17:06:05 2010 +0100
@@ -168,7 +168,7 @@
fun run_on probfile =
if File.exists cmd then
write probfile clauses
- |> pair (apfst split_time' (system_out (cmd_line probfile)))
+ |> pair (apfst split_time' (bash_output (cmd_line probfile)))
else error ("Bad executable: " ^ Path.implode cmd);
(* if problemfile has not been exported, delete problemfile; otherwise export proof, too *)
@@ -306,7 +306,7 @@
fun get_systems () =
let
- val (answer, rc) = system_out ("\"$ISABELLE_ATP_MANAGER/SystemOnTPTP\" -w")
+ val (answer, rc) = bash_output ("\"$ISABELLE_ATP_MANAGER/SystemOnTPTP\" -w")
in
if rc <> 0 then error ("Failed to get available systems from SystemOnTPTP:\n" ^ answer)
else split_lines answer
--- a/src/HOL/Tools/Datatype/datatype.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Datatype/datatype.ML Tue Feb 09 17:06:05 2010 +0100
@@ -253,9 +253,11 @@
val rep_const = cterm_of thy
(Const (Sign.intern_const thy ("Rep_" ^ tname), T --> Univ_elT));
val cong' =
- Drule.standard (cterm_instantiate [(cterm_of thy cong_f, rep_const)] arg_cong);
+ Drule.export_without_context
+ (cterm_instantiate [(cterm_of thy cong_f, rep_const)] arg_cong);
val dist =
- Drule.standard (cterm_instantiate [(cterm_of thy distinct_f, rep_const)] distinct_lemma);
+ Drule.export_without_context
+ (cterm_instantiate [(cterm_of thy distinct_f, rep_const)] distinct_lemma);
val (thy', defs', eqns', _) = fold ((make_constr_def tname T) (length constrs))
(constrs ~~ constr_syntax) (Sign.add_path tname thy, defs, [], 1);
in
@@ -532,7 +534,7 @@
let
val dist_thm = Skip_Proof.prove_global thy5 [] [] t (fn _ =>
EVERY [simp_tac (HOL_ss addsimps dist_rewrites') 1])
- in dist_thm :: Drule.standard (dist_thm RS not_sym) :: prove ts end;
+ in dist_thm :: Drule.export_without_context (dist_thm RS not_sym) :: prove ts end;
in prove ts end;
val distinct_thms = map2 (prove_distinct_thms)
--- a/src/HOL/Tools/Function/size.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Function/size.ML Tue Feb 09 17:06:05 2010 +0100
@@ -153,7 +153,7 @@
val ctxt = ProofContext.init thy';
- val simpset1 = HOL_basic_ss addsimps @{thm add_0} :: @{thm add_0_right} ::
+ val simpset1 = HOL_basic_ss addsimps @{thm add_0} :: @{thm Nat.add_0_right} ::
size_def_thms @ size_def_thms' @ rec_rewrites @ extra_rewrites;
val xs = map (fn i => "x" ^ string_of_int i) (1 upto length recTs2);
@@ -197,7 +197,7 @@
fun prove_size_eqs p size_fns size_ofp simpset =
maps (fn (((_, (_, _, constrs)), size_const), T) =>
- map (fn constr => Drule.standard (Skip_Proof.prove ctxt [] []
+ map (fn constr => Drule.export_without_context (Skip_Proof.prove ctxt [] []
(gen_mk_size_eq p (AList.lookup op = (new_type_names ~~ size_fns))
size_ofp size_const T constr)
(fn _ => simp_tac simpset 1))) constrs)
--- a/src/HOL/Tools/Nitpick/kodkod.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Nitpick/kodkod.ML Tue Feb 09 17:06:05 2010 +0100
@@ -1004,7 +1004,7 @@
read_next_problems (Substring.full (File.read path), [], []) |>> rev ||> rev
(* The fudge term below is to account for Kodkodi's slow start-up time, which
- is partly due to the JVM and partly due to the ML "system" function. *)
+ is partly due to the JVM and partly due to the ML "bash" function. *)
val fudge_ms = 250
(* bool -> Time.time option -> int -> int -> problem list -> outcome *)
@@ -1053,24 +1053,24 @@
val outcome =
let
val code =
- system ("cd " ^ File.shell_quote temp_dir ^ ";\n" ^
- "env CLASSPATH=\"$KODKODI_CLASSPATH:$CLASSPATH\" \
- \JAVA_LIBRARY_PATH=\"$KODKODI_JAVA_LIBRARY_PATH:\
- \$JAVA_LIBRARY_PATH\" \
- \LD_LIBRARY_PATH=\"$KODKODI_JAVA_LIBRARY_PATH:\
- \$LD_LIBRARY_PATH\" \
- \\"$KODKODI\"/bin/kodkodi" ^
- (if ms >= 0 then " -max-msecs " ^ string_of_int ms
- else "") ^
- (if max_solutions > 1 then " -solve-all" else "") ^
- " -max-solutions " ^ string_of_int max_solutions ^
- (if max_threads > 0 then
- " -max-threads " ^ string_of_int max_threads
- else
- "") ^
- " < " ^ File.shell_path in_path ^
- " > " ^ File.shell_path out_path ^
- " 2> " ^ File.shell_path err_path)
+ bash ("cd " ^ File.shell_quote temp_dir ^ ";\n" ^
+ "env CLASSPATH=\"$KODKODI_CLASSPATH:$CLASSPATH\" \
+ \JAVA_LIBRARY_PATH=\"$KODKODI_JAVA_LIBRARY_PATH:\
+ \$JAVA_LIBRARY_PATH\" \
+ \LD_LIBRARY_PATH=\"$KODKODI_JAVA_LIBRARY_PATH:\
+ \$LD_LIBRARY_PATH\" \
+ \\"$KODKODI\"/bin/kodkodi" ^
+ (if ms >= 0 then " -max-msecs " ^ string_of_int ms
+ else "") ^
+ (if max_solutions > 1 then " -solve-all" else "") ^
+ " -max-solutions " ^ string_of_int max_solutions ^
+ (if max_threads > 0 then
+ " -max-threads " ^ string_of_int max_threads
+ else
+ "") ^
+ " < " ^ File.shell_path in_path ^
+ " > " ^ File.shell_path out_path ^
+ " 2> " ^ File.shell_path err_path)
val (ps, nontriv_js) = read_output_file out_path
|>> map (apfst reindex) ||> map reindex
val js = triv_js @ nontriv_js
--- a/src/HOL/Tools/Nitpick/minipick.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Nitpick/minipick.ML Tue Feb 09 17:06:05 2010 +0100
@@ -200,19 +200,19 @@
else
raise NOT_SUPPORTED "transitive closure for function or pair type"
| Const (@{const_name trancl}, _) => to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name lower_semilattice_class.inf},
+ | Const (@{const_name semilattice_inf_class.inf},
Type ("fun", [Type ("fun", [_, @{typ bool}]), _])) $ t1 $ t2 =>
Intersect (to_R_rep Ts t1, to_R_rep Ts t2)
- | Const (@{const_name lower_semilattice_class.inf}, _) $ _ =>
+ | Const (@{const_name semilattice_inf_class.inf}, _) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name lower_semilattice_class.inf}, _) =>
+ | Const (@{const_name semilattice_inf_class.inf}, _) =>
to_R_rep Ts (eta_expand Ts t 2)
- | Const (@{const_name upper_semilattice_class.sup},
+ | Const (@{const_name semilattice_sup_class.sup},
Type ("fun", [Type ("fun", [_, @{typ bool}]), _])) $ t1 $ t2 =>
Union (to_R_rep Ts t1, to_R_rep Ts t2)
- | Const (@{const_name upper_semilattice_class.sup}, _) $ _ =>
+ | Const (@{const_name semilattice_sup_class.sup}, _) $ _ =>
to_R_rep Ts (eta_expand Ts t 1)
- | Const (@{const_name upper_semilattice_class.sup}, _) =>
+ | Const (@{const_name semilattice_sup_class.sup}, _) =>
to_R_rep Ts (eta_expand Ts t 2)
| Const (@{const_name minus_class.minus},
Type ("fun", [Type ("fun", [_, @{typ bool}]), _])) $ t1 $ t2 =>
--- a/src/HOL/Tools/Nitpick/nitpick_hol.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_hol.ML Tue Feb 09 17:06:05 2010 +0100
@@ -369,8 +369,8 @@
[((@{const_name of_nat}, nat_T --> int_T), 0),
((@{const_name of_nat}, @{typ "unsigned_bit word => signed_bit word"}), 0)]
val built_in_set_consts =
- [(@{const_name lower_semilattice_fun_inst.inf_fun}, 2),
- (@{const_name upper_semilattice_fun_inst.sup_fun}, 2),
+ [(@{const_name semilattice_inf_fun_inst.inf_fun}, 2),
+ (@{const_name semilattice_sup_fun_inst.sup_fun}, 2),
(@{const_name minus_fun_inst.minus_fun}, 2),
(@{const_name ord_fun_inst.less_eq_fun}, 2)]
@@ -2042,6 +2042,7 @@
[] => Ts
| xs => map snd xs)
| _ => insert (op =) T accum
+
(* hol_context -> typ -> typ list *)
fun ground_types_in_type hol_ctxt T = add_ground_types hol_ctxt T []
(* hol_context -> term list -> typ list *)
--- a/src/HOL/Tools/Nitpick/nitpick_mono.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_mono.ML Tue Feb 09 17:06:05 2010 +0100
@@ -667,9 +667,9 @@
in (CFun (ab_set_C, S Minus, ba_set_C), accum) end
| @{const_name trancl} => do_fragile_set_operation T accum
| @{const_name rtrancl} => (print_g "*** rtrancl"; unsolvable)
- | @{const_name lower_semilattice_fun_inst.inf_fun} =>
+ | @{const_name semilattice_inf_fun_inst.inf_fun} =>
do_robust_set_operation T accum
- | @{const_name upper_semilattice_fun_inst.sup_fun} =>
+ | @{const_name semilattice_sup_fun_inst.sup_fun} =>
do_robust_set_operation T accum
| @{const_name finite} =>
let val C1 = ctype_for (domain_type (domain_type T)) in
--- a/src/HOL/Tools/Nitpick/nitpick_nut.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_nut.ML Tue Feb 09 17:06:05 2010 +0100
@@ -655,10 +655,10 @@
| (Const (@{const_name of_nat},
T as @{typ "unsigned_bit word => signed_bit word"}), []) =>
Cst (NatToInt, T, Any)
- | (Const (@{const_name lower_semilattice_fun_inst.inf_fun}, T),
+ | (Const (@{const_name semilattice_inf_fun_inst.inf_fun}, T),
[t1, t2]) =>
Op2 (Intersect, nth_range_type 2 T, Any, sub t1, sub t2)
- | (Const (@{const_name upper_semilattice_fun_inst.sup_fun}, T),
+ | (Const (@{const_name semilattice_sup_fun_inst.sup_fun}, T),
[t1, t2]) =>
Op2 (Union, nth_range_type 2 T, Any, sub t1, sub t2)
| (t0 as Const (@{const_name minus_fun_inst.minus_fun}, T), [t1, t2]) =>
--- a/src/HOL/Tools/Nitpick/nitpick_preproc.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Nitpick/nitpick_preproc.ML Tue Feb 09 17:06:05 2010 +0100
@@ -715,11 +715,11 @@
if gfp then
(lbfp_prefix,
@{const "op |"},
- @{const_name upper_semilattice_fun_inst.sup_fun})
+ @{const_name semilattice_sup_fun_inst.sup_fun})
else
(ubfp_prefix,
@{const "op &"},
- @{const_name lower_semilattice_fun_inst.inf_fun})
+ @{const_name semilattice_inf_fun_inst.inf_fun})
(* unit -> term *)
fun pos () = unrolled_inductive_pred_const hol_ctxt gfp x
|> aux ss Ts js depth polar
--- a/src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_core.ML Tue Feb 09 17:06:05 2010 +0100
@@ -577,7 +577,7 @@
(*val elim = singleton (Inductive_Set.codegen_preproc thy) (preprocess_elim thy nparams
(expand_tuples_elim pre_elim))*)
val elim =
- (Drule.standard o Skip_Proof.make_thm thy)
+ (Drule.export_without_context o Skip_Proof.make_thm thy)
(mk_casesrule (ProofContext.init thy) pred intros)
in
mk_pred_data ((intros, SOME elim), no_compilation)
@@ -641,7 +641,7 @@
else ()
val pred = Const (constname, T)
val pre_elim =
- (Drule.standard o Skip_Proof.make_thm thy)
+ (Drule.export_without_context o Skip_Proof.make_thm thy)
(mk_casesrule (ProofContext.init thy) pred pre_intros)
in register_predicate (constname, pre_intros, pre_elim) thy end
@@ -2178,7 +2178,8 @@
(join_preds_modes moded_clauses compiled_terms)
fun prove_by_skip options thy _ _ _ _ compiled_terms =
- map_preds_modes (fn pred => fn mode => fn t => Drule.standard (Skip_Proof.make_thm thy t))
+ map_preds_modes
+ (fn pred => fn mode => fn t => Drule.export_without_context (Skip_Proof.make_thm thy t))
compiled_terms
(* preparation of introduction rules into special datastructures *)
@@ -2269,7 +2270,7 @@
val elim = the_elim_of thy predname
val nparams = nparams_of thy predname
val elim' =
- (Drule.standard o (Skip_Proof.make_thm thy))
+ (Drule.export_without_context o Skip_Proof.make_thm thy)
(mk_casesrule (ProofContext.init thy) nparams intros)
in
if not (Thm.equiv_thm (elim, elim')) then
--- a/src/HOL/Tools/Predicate_Compile/predicate_compile_fun.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Predicate_Compile/predicate_compile_fun.ML Tue Feb 09 17:06:05 2010 +0100
@@ -391,7 +391,7 @@
|> map (fn (concl'::conclprems, _) =>
Logic.list_implies ((flat prems') @ conclprems, concl')))
in
- map (Drule.standard o (Skip_Proof.make_thm thy)) intro_ts'
+ map (Drule.export_without_context o Skip_Proof.make_thm thy) intro_ts'
end
end;
--- a/src/HOL/Tools/Qelim/cooper.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/Qelim/cooper.ML Tue Feb 09 17:06:05 2010 +0100
@@ -79,9 +79,9 @@
| Const (@{const_name Algebras.less_eq}, _) $ y $ z =>
if term_of x aconv y then Le (Thm.dest_arg ct)
else if term_of x aconv z then Ge (Thm.dest_arg1 ct) else Nox
-| Const (@{const_name Ring_and_Field.dvd},_)$_$(Const(@{const_name Algebras.plus},_)$y$_) =>
+| Const (@{const_name Rings.dvd},_)$_$(Const(@{const_name Algebras.plus},_)$y$_) =>
if term_of x aconv y then Dvd (Thm.dest_binop ct ||> Thm.dest_arg) else Nox
-| Const (@{const_name Not},_) $ (Const (@{const_name Ring_and_Field.dvd},_)$_$(Const(@{const_name Algebras.plus},_)$y$_)) =>
+| Const (@{const_name Not},_) $ (Const (@{const_name Rings.dvd},_)$_$(Const(@{const_name Algebras.plus},_)$y$_)) =>
if term_of x aconv y then
NDvd (Thm.dest_binop (Thm.dest_arg ct) ||> Thm.dest_arg) else Nox
| _ => Nox)
@@ -222,8 +222,8 @@
| lin (vs as x::_) (Const (@{const_name Not},_) $ (Const(@{const_name Algebras.less_eq}, T) $ s $ t)) =
lin vs (Const (@{const_name Algebras.less}, T) $ t $ s)
| lin vs (Const (@{const_name Not},T)$t) = Const (@{const_name Not},T)$ (lin vs t)
- | lin (vs as x::_) (Const(@{const_name Ring_and_Field.dvd},_)$d$t) =
- HOLogic.mk_binrel @{const_name Ring_and_Field.dvd} (numeral1 abs d, lint vs t)
+ | lin (vs as x::_) (Const(@{const_name Rings.dvd},_)$d$t) =
+ HOLogic.mk_binrel @{const_name Rings.dvd} (numeral1 abs d, lint vs t)
| lin (vs as x::_) ((b as Const("op =",_))$s$t) =
(case lint vs (subC$t$s) of
(t as a$(m$c$y)$r) =>
@@ -253,7 +253,7 @@
| is_intrel _ = false;
fun linearize_conv ctxt vs ct = case term_of ct of
- Const(@{const_name Ring_and_Field.dvd},_)$d$t =>
+ Const(@{const_name Rings.dvd},_)$d$t =>
let
val th = binop_conv (lint_conv ctxt vs) ct
val (d',t') = Thm.dest_binop (Thm.rhs_of th)
@@ -278,7 +278,7 @@
| _ => dth
end
end
-| Const (@{const_name Not},_)$(Const(@{const_name Ring_and_Field.dvd},_)$_$_) => arg_conv (linearize_conv ctxt vs) ct
+| Const (@{const_name Not},_)$(Const(@{const_name Rings.dvd},_)$_$_) => arg_conv (linearize_conv ctxt vs) ct
| t => if is_intrel t
then (provelin ctxt ((HOLogic.eq_const bT)$t$(lin vs t) |> HOLogic.mk_Trueprop))
RS eq_reflection
@@ -301,7 +301,7 @@
if x aconv y andalso member (op =)
[@{const_name Algebras.less}, @{const_name Algebras.less_eq}] s
then (ins (dest_numeral c) acc, dacc) else (acc,dacc)
- | Const(@{const_name Ring_and_Field.dvd},_)$_$(Const(@{const_name Algebras.plus},_)$(Const(@{const_name Algebras.times},_)$c$y)$_) =>
+ | Const(@{const_name Rings.dvd},_)$_$(Const(@{const_name Algebras.plus},_)$(Const(@{const_name Algebras.times},_)$c$y)$_) =>
if x aconv y then (acc,ins (dest_numeral c) dacc) else (acc,dacc)
| Const("op &",_)$_$_ => h (h (acc,dacc) (Thm.dest_arg1 t)) (Thm.dest_arg t)
| Const("op |",_)$_$_ => h (h (acc,dacc) (Thm.dest_arg1 t)) (Thm.dest_arg t)
@@ -343,7 +343,7 @@
if x=y andalso member (op =)
[@{const_name Algebras.less}, @{const_name Algebras.less_eq}] s
then cv (l div dest_numeral c) t else Thm.reflexive t
- | Const(@{const_name Ring_and_Field.dvd},_)$d$(r as (Const(@{const_name Algebras.plus},_)$(Const(@{const_name Algebras.times},_)$c$y)$_)) =>
+ | Const(@{const_name Rings.dvd},_)$d$(r as (Const(@{const_name Algebras.plus},_)$(Const(@{const_name Algebras.times},_)$c$y)$_)) =>
if x=y then
let
val k = l div dest_numeral c
@@ -562,7 +562,7 @@
| Const("False",_) => F
| Const(@{const_name Algebras.less},_)$t1$t2 => Lt (Sub (i_of_term vs t1,i_of_term vs t2))
| Const(@{const_name Algebras.less_eq},_)$t1$t2 => Le (Sub(i_of_term vs t1,i_of_term vs t2))
- | Const(@{const_name Ring_and_Field.dvd},_)$t1$t2 =>
+ | Const(@{const_name Rings.dvd},_)$t1$t2 =>
(Dvd(HOLogic.dest_number t1 |> snd, i_of_term vs t2) handle _ => cooper "Reification: unsupported dvd") (* FIXME avoid handle _ *)
| @{term "op = :: int => _"}$t1$t2 => Eq (Sub (i_of_term vs t1,i_of_term vs t2))
| @{term "op = :: bool => _ "}$t1$t2 => Iff(qf_of_term ps vs t1,qf_of_term ps vs t2)
--- a/src/HOL/Tools/TFL/post.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/TFL/post.ML Tue Feb 09 17:06:05 2010 +0100
@@ -129,7 +129,7 @@
(*lcp: put a theorem into Isabelle form, using meta-level connectives*)
val meta_outer =
- curry_rule o Drule.standard o
+ curry_rule o Drule.export_without_context o
rule_by_tactic (REPEAT (FIRSTGOAL (resolve_tac [allI, impI, conjI] ORELSE' etac conjE)));
(*Strip off the outer !P*)
@@ -151,7 +151,7 @@
{f = f, R = R, rules = rules,
full_pats_TCs = full_pats_TCs,
TCs = TCs}
- val rules' = map (Drule.standard o ObjectLogic.rulify_no_asm)
+ val rules' = map (Drule.export_without_context o ObjectLogic.rulify_no_asm)
(R.CONJUNCTS rules)
in {induct = meta_outer (ObjectLogic.rulify_no_asm (induction RS spec')),
rules = ListPair.zip(rules', rows),
@@ -180,7 +180,7 @@
| solve_eq (th, [a], i) = [(a, i)]
| solve_eq (th, splitths as (_ :: _), i) =
(writeln "Proving unsplit equation...";
- [((Drule.standard o ObjectLogic.rulify_no_asm)
+ [((Drule.export_without_context o ObjectLogic.rulify_no_asm)
(CaseSplit.splitto splitths th), i)])
(* if there's an error, pretend nothing happened with this definition
We should probably print something out so that the user knows...? *)
@@ -236,7 +236,7 @@
in (theory,
(*return the conjoined induction rule and recursion equations,
with assumptions remaining to discharge*)
- Drule.standard (induction RS (rules RS conjI)))
+ Drule.export_without_context (induction RS (rules RS conjI)))
end
fun defer thy congs fid seqs =
--- a/src/HOL/Tools/arith_data.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/arith_data.ML Tue Feb 09 17:06:05 2010 +0100
@@ -110,8 +110,8 @@
fun prove_conv tacs ctxt (_: thm list) = prove_conv_nohyps tacs ctxt;
-fun prove_conv2 expand_tac norm_tac ss tu = (*FIXME avoid standard*)
- mk_meta_eq (Drule.standard (Goal.prove (Simplifier.the_context ss) [] []
+fun prove_conv2 expand_tac norm_tac ss tu = (* FIXME avoid Drule.export_without_context *)
+ mk_meta_eq (Drule.export_without_context (Goal.prove (Simplifier.the_context ss) [] []
(HOLogic.mk_Trueprop (HOLogic.mk_eq tu))
(K (EVERY [expand_tac, norm_tac ss]))));
--- a/src/HOL/Tools/choice_specification.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/choice_specification.ML Tue Feb 09 17:06:05 2010 +0100
@@ -75,7 +75,7 @@
fun add_specification axiomatic cos arg =
arg |> apsnd Thm.freezeT
|> proc_exprop axiomatic cos
- |> apsnd Drule.standard
+ |> apsnd Drule.export_without_context
(* Collect all intances of constants in term *)
@@ -189,7 +189,7 @@
in
args |> apsnd (remove_alls frees)
|> apsnd undo_imps
- |> apsnd Drule.standard
+ |> apsnd Drule.export_without_context
|> Thm.theory_attributes (map (Attrib.attribute thy) atts)
|> add_final
|> Library.swap
--- a/src/HOL/Tools/inductive_codegen.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/inductive_codegen.ML Tue Feb 09 17:06:05 2010 +0100
@@ -544,7 +544,7 @@
(fn NONE => "X" | SOME k' => string_of_int k')
(ks @ [SOME k]))) arities));
-fun prep_intrs intrs = map (rename_term o #prop o rep_thm o Drule.standard) intrs;
+fun prep_intrs intrs = map (rename_term o #prop o rep_thm o Drule.export_without_context) intrs;
fun constrain cs [] = []
| constrain cs ((s, xs) :: ys) =
--- a/src/HOL/Tools/int_arith.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/int_arith.ML Tue Feb 09 17:06:05 2010 +0100
@@ -71,8 +71,8 @@
val lhss' =
[@{cpat "(?x::?'a::ring_char_0) = (?y::?'a)"},
- @{cpat "(?x::?'a::ordered_idom) < (?y::?'a)"},
- @{cpat "(?x::?'a::ordered_idom) <= (?y::?'a)"}]
+ @{cpat "(?x::?'a::linordered_idom) < (?y::?'a)"},
+ @{cpat "(?x::?'a::linordered_idom) <= (?y::?'a)"}]
val zero_one_idom_simproc =
make_simproc {lhss = lhss' , name = "zero_one_idom_simproc",
@@ -80,9 +80,9 @@
val fast_int_arith_simproc =
Simplifier.simproc @{theory} "fast_int_arith"
- ["(m::'a::{ordered_idom,number_ring}) < n",
- "(m::'a::{ordered_idom,number_ring}) <= n",
- "(m::'a::{ordered_idom,number_ring}) = n"] (K Lin_Arith.simproc);
+ ["(m::'a::{linordered_idom,number_ring}) < n",
+ "(m::'a::{linordered_idom,number_ring}) <= n",
+ "(m::'a::{linordered_idom,number_ring}) = n"] (K Lin_Arith.simproc);
val global_setup = Simplifier.map_simpset
(fn simpset => simpset addsimprocs [fast_int_arith_simproc]);
--- a/src/HOL/Tools/lin_arith.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/lin_arith.ML Tue Feb 09 17:06:05 2010 +0100
@@ -236,7 +236,7 @@
end handle Rat.DIVZERO => NONE;
fun of_lin_arith_sort thy U =
- Sign.of_sort thy (U, @{sort Ring_and_Field.ordered_idom});
+ Sign.of_sort thy (U, @{sort Rings.linordered_idom});
fun allows_lin_arith thy (discrete : string list) (U as Type (D, [])) : bool * bool =
if of_lin_arith_sort thy U then (true, member (op =) discrete D)
@@ -804,18 +804,18 @@
val init_arith_data =
Fast_Arith.map_data (fn {add_mono_thms, mult_mono_thms, inj_thms, lessD, number_of, ...} =>
- {add_mono_thms = @{thms add_mono_thms_ordered_semiring} @ @{thms add_mono_thms_ordered_field} @ add_mono_thms,
+ {add_mono_thms = @{thms add_mono_thms_linordered_semiring} @ @{thms add_mono_thms_linordered_field} @ add_mono_thms,
mult_mono_thms = @{thm mult_strict_left_mono} :: @{thm mult_left_mono} ::
@{lemma "a = b ==> c*a = c*b" by (rule arg_cong)} :: mult_mono_thms,
inj_thms = inj_thms,
lessD = lessD @ [@{thm "Suc_leI"}],
- neqE = [@{thm linorder_neqE_nat}, @{thm linorder_neqE_ordered_idom}],
+ neqE = [@{thm linorder_neqE_nat}, @{thm linorder_neqE_linordered_idom}],
simpset = HOL_basic_ss
addsimps @{thms ring_distribs}
addsimps [@{thm if_True}, @{thm if_False}]
addsimps
- [@{thm "monoid_add_class.add_0_left"},
- @{thm "monoid_add_class.add_0_right"},
+ [@{thm add_0_left},
+ @{thm add_0_right},
@{thm "Zero_not_Suc"}, @{thm "Suc_not_Zero"}, @{thm "le_0_eq"}, @{thm "One_nat_def"},
@{thm "order_less_irrefl"}, @{thm "zero_neq_one"}, @{thm "zero_less_one"},
@{thm "zero_le_one"}, @{thm "zero_neq_one"} RS not_sym, @{thm "not_one_le_zero"},
--- a/src/HOL/Tools/nat_arith.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/nat_arith.ML Tue Feb 09 17:06:05 2010 +0100
@@ -50,8 +50,8 @@
val mk_sum = mk_norm_sum;
val dest_sum = dest_sum;
val prove_conv = Arith_Data.prove_conv2;
- val norm_tac1 = Arith_Data.simp_all_tac [@{thm "add_Suc"}, @{thm "add_Suc_right"},
- @{thm "add_0"}, @{thm "add_0_right"}];
+ val norm_tac1 = Arith_Data.simp_all_tac [@{thm add_Suc}, @{thm add_Suc_right},
+ @{thm add_0}, @{thm Nat.add_0_right}];
val norm_tac2 = Arith_Data.simp_all_tac @{thms add_ac};
fun norm_tac ss = norm_tac1 ss THEN norm_tac2 ss;
fun gen_uncancel_tac rule = let val rule' = rule RS @{thm subst_equals}
--- a/src/HOL/Tools/nat_numeral_simprocs.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/nat_numeral_simprocs.ML Tue Feb 09 17:06:05 2010 +0100
@@ -124,7 +124,7 @@
(*Simplify 1*n and n*1 to n*)
-val add_0s = map rename_numerals [@{thm add_0}, @{thm add_0_right}];
+val add_0s = map rename_numerals [@{thm add_0}, @{thm Nat.add_0_right}];
val mult_1s = map rename_numerals [@{thm nat_mult_1}, @{thm nat_mult_1_right}];
(*Final simplification: cancel + and *; replace Numeral0 by 0 and Numeral1 by 1*)
@@ -136,7 +136,7 @@
val simplify_meta_eq =
Arith_Data.simplify_meta_eq
- ([@{thm nat_numeral_0_eq_0}, @{thm numeral_1_eq_Suc_0}, @{thm add_0}, @{thm add_0_right},
+ ([@{thm nat_numeral_0_eq_0}, @{thm numeral_1_eq_Suc_0}, @{thm add_0}, @{thm Nat.add_0_right},
@{thm mult_0}, @{thm mult_0_right}, @{thm mult_1}, @{thm mult_1_right}] @ contra_rules);
@@ -290,8 +290,8 @@
structure DvdCancelNumeralFactor = CancelNumeralFactorFun
(open CancelNumeralFactorCommon
val prove_conv = Arith_Data.prove_conv
- val mk_bal = HOLogic.mk_binrel @{const_name Ring_and_Field.dvd}
- val dest_bal = HOLogic.dest_bin @{const_name Ring_and_Field.dvd} HOLogic.natT
+ val mk_bal = HOLogic.mk_binrel @{const_name Rings.dvd}
+ val dest_bal = HOLogic.dest_bin @{const_name Rings.dvd} HOLogic.natT
val cancel = @{thm nat_mult_dvd_cancel1} RS trans
val neg_exchanges = false
)
@@ -411,8 +411,8 @@
structure DvdCancelFactor = ExtractCommonTermFun
(open CancelFactorCommon
val prove_conv = Arith_Data.prove_conv
- val mk_bal = HOLogic.mk_binrel @{const_name Ring_and_Field.dvd}
- val dest_bal = HOLogic.dest_bin @{const_name Ring_and_Field.dvd} HOLogic.natT
+ val mk_bal = HOLogic.mk_binrel @{const_name Rings.dvd}
+ val dest_bal = HOLogic.dest_bin @{const_name Rings.dvd} HOLogic.natT
fun simp_conv _ _ = SOME @{thm nat_mult_dvd_cancel_disj}
);
--- a/src/HOL/Tools/numeral_simprocs.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/numeral_simprocs.ML Tue Feb 09 17:06:05 2010 +0100
@@ -181,9 +181,8 @@
(*To let us treat division as multiplication*)
val divide_simps = [@{thm divide_inverse}, @{thm inverse_mult_distrib}, @{thm inverse_inverse_eq}];
-(*push the unary minus down: - x * y = x * - y *)
-val minus_mult_eq_1_to_2 =
- [@{thm mult_minus_left}, @{thm minus_mult_right}] MRS trans |> Drule.standard;
+(*push the unary minus down*)
+val minus_mult_eq_1_to_2 = @{lemma "- (a::'a::ring) * b = a * - b" by simp};
(*to extract again any uncancelled minuses*)
val minus_from_mult_simps =
@@ -256,20 +255,20 @@
"(l::'a::number_ring) = m * n"],
K EqCancelNumerals.proc),
("intless_cancel_numerals",
- ["(l::'a::{ordered_idom,number_ring}) + m < n",
- "(l::'a::{ordered_idom,number_ring}) < m + n",
- "(l::'a::{ordered_idom,number_ring}) - m < n",
- "(l::'a::{ordered_idom,number_ring}) < m - n",
- "(l::'a::{ordered_idom,number_ring}) * m < n",
- "(l::'a::{ordered_idom,number_ring}) < m * n"],
+ ["(l::'a::{linordered_idom,number_ring}) + m < n",
+ "(l::'a::{linordered_idom,number_ring}) < m + n",
+ "(l::'a::{linordered_idom,number_ring}) - m < n",
+ "(l::'a::{linordered_idom,number_ring}) < m - n",
+ "(l::'a::{linordered_idom,number_ring}) * m < n",
+ "(l::'a::{linordered_idom,number_ring}) < m * n"],
K LessCancelNumerals.proc),
("intle_cancel_numerals",
- ["(l::'a::{ordered_idom,number_ring}) + m <= n",
- "(l::'a::{ordered_idom,number_ring}) <= m + n",
- "(l::'a::{ordered_idom,number_ring}) - m <= n",
- "(l::'a::{ordered_idom,number_ring}) <= m - n",
- "(l::'a::{ordered_idom,number_ring}) * m <= n",
- "(l::'a::{ordered_idom,number_ring}) <= m * n"],
+ ["(l::'a::{linordered_idom,number_ring}) + m <= n",
+ "(l::'a::{linordered_idom,number_ring}) <= m + n",
+ "(l::'a::{linordered_idom,number_ring}) - m <= n",
+ "(l::'a::{linordered_idom,number_ring}) <= m - n",
+ "(l::'a::{linordered_idom,number_ring}) * m <= n",
+ "(l::'a::{linordered_idom,number_ring}) <= m * n"],
K LeCancelNumerals.proc)];
structure CombineNumeralsData =
@@ -374,7 +373,7 @@
[@{thm eq_number_of_eq}, @{thm less_number_of}, @{thm le_number_of}] @ simps
fun numeral_simp_tac ss = ALLGOALS (simp_tac (Simplifier.inherit_context ss numeral_simp_ss))
val simplify_meta_eq = Arith_Data.simplify_meta_eq
- [@{thm add_0}, @{thm add_0_right}, @{thm mult_zero_left},
+ [@{thm add_0}, @{thm Nat.add_0_right}, @{thm mult_zero_left},
@{thm mult_zero_right}, @{thm mult_Bit1}, @{thm mult_1_right}];
end
@@ -432,12 +431,12 @@
"(l::'a::{idom,number_ring}) = m * n"],
K EqCancelNumeralFactor.proc),
("ring_less_cancel_numeral_factor",
- ["(l::'a::{ordered_idom,number_ring}) * m < n",
- "(l::'a::{ordered_idom,number_ring}) < m * n"],
+ ["(l::'a::{linordered_idom,number_ring}) * m < n",
+ "(l::'a::{linordered_idom,number_ring}) < m * n"],
K LessCancelNumeralFactor.proc),
("ring_le_cancel_numeral_factor",
- ["(l::'a::{ordered_idom,number_ring}) * m <= n",
- "(l::'a::{ordered_idom,number_ring}) <= m * n"],
+ ["(l::'a::{linordered_idom,number_ring}) * m <= n",
+ "(l::'a::{linordered_idom,number_ring}) <= m * n"],
K LeCancelNumeralFactor.proc),
("int_div_cancel_numeral_factors",
["((l::'a::{semiring_div,number_ring}) * m) div n",
@@ -562,8 +561,8 @@
structure DvdCancelFactor = ExtractCommonTermFun
(open CancelFactorCommon
val prove_conv = Arith_Data.prove_conv
- val mk_bal = HOLogic.mk_binrel @{const_name Ring_and_Field.dvd}
- val dest_bal = HOLogic.dest_bin @{const_name Ring_and_Field.dvd} Term.dummyT
+ val mk_bal = HOLogic.mk_binrel @{const_name Rings.dvd}
+ val dest_bal = HOLogic.dest_bin @{const_name Rings.dvd} Term.dummyT
fun simp_conv _ _ = SOME @{thm dvd_mult_cancel_left}
);
@@ -582,13 +581,13 @@
["(l::'a::idom) * m = n",
"(l::'a::idom) = m * n"],
K EqCancelFactor.proc),
- ("ordered_ring_le_cancel_factor",
- ["(l::'a::ordered_ring) * m <= n",
- "(l::'a::ordered_ring) <= m * n"],
+ ("linordered_ring_le_cancel_factor",
+ ["(l::'a::linordered_ring) * m <= n",
+ "(l::'a::linordered_ring) <= m * n"],
K LeCancelFactor.proc),
- ("ordered_ring_less_cancel_factor",
- ["(l::'a::ordered_ring) * m < n",
- "(l::'a::ordered_ring) < m * n"],
+ ("linordered_ring_less_cancel_factor",
+ ["(l::'a::linordered_ring) * m < n",
+ "(l::'a::linordered_ring) < m * n"],
K LessCancelFactor.proc),
("int_div_cancel_factor",
["((l::'a::semiring_div) * m) div n", "(l::'a::semiring_div) div (m * n)"],
--- a/src/HOL/Tools/record.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/record.ML Tue Feb 09 17:06:05 2010 +0100
@@ -1014,7 +1014,7 @@
else if Goal.future_enabled () then
Goal.future_result (ProofContext.init thy) (Future.fork_pri ~1 prf) prop
else prf ()
- in Drule.standard thm end;
+ in Drule.export_without_context thm end;
fun prove_common immediate stndrd thy asms prop tac =
let
@@ -1023,7 +1023,7 @@
else if immediate orelse not (Goal.future_enabled ()) then Goal.prove
else Goal.prove_future;
val prf = prv (ProofContext.init thy) [] asms prop tac;
- in if stndrd then Drule.standard prf else prf end;
+ in if stndrd then Drule.export_without_context prf else prf end;
val prove_future_global = prove_common false;
val prove_global = prove_common true;
@@ -1072,7 +1072,7 @@
if is_sel_upd_pair thy acc upd
then o_eq_dest
else o_eq_id_dest;
- in Drule.standard (othm RS dest) end;
+ in Drule.export_without_context (othm RS dest) end;
in map get_simp upd_funs end;
fun get_updupd_simp thy defset u u' comp =
@@ -1092,7 +1092,7 @@
REPEAT_DETERM (Iso_Tuple_Support.iso_tuple_intros_tac 1) THEN
TRY (simp_tac (HOL_ss addsimps [id_apply]) 1));
val dest = if comp then o_eq_dest_lhs else o_eq_dest;
- in Drule.standard (othm RS dest) end;
+ in Drule.export_without_context (othm RS dest) end;
fun get_updupd_simps thy term defset =
let
@@ -1279,8 +1279,8 @@
val ss = get_sel_upd_defs thy;
val uathm = get_upd_acc_cong_thm upd acc thy ss;
in
- [Drule.standard (uathm RS updacc_noopE),
- Drule.standard (uathm RS updacc_noop_compE)]
+ [Drule.export_without_context (uathm RS updacc_noopE),
+ Drule.export_without_context (uathm RS updacc_noop_compE)]
end;
(*If f is constant then (f o g) = f. We know that K_skeleton
@@ -1721,8 +1721,8 @@
to prove other theorems. We haven't given names to the accessors
f, g etc yet however, so we generate an ext structure with
free variables as all arguments and allow the introduction tactic to
- operate on it as far as it can. We then use Drule.standard to convert
- the free variables into unifiable variables and unify them with
+ operate on it as far as it can. We then use Drule.export_without_context
+ to convert the free variables into unifiable variables and unify them with
(roughly) the definition of the accessor.*)
fun surject_prf () =
let
@@ -1733,7 +1733,7 @@
REPEAT_ALL_NEW Iso_Tuple_Support.iso_tuple_intros_tac 1;
val tactic2 = REPEAT (rtac surject_assistI 1 THEN rtac refl 1);
val [halfway] = Seq.list_of (tactic1 start);
- val [surject] = Seq.list_of (tactic2 (Drule.standard halfway));
+ val [surject] = Seq.list_of (tactic2 (Drule.export_without_context halfway));
in
surject
end;
@@ -2136,14 +2136,14 @@
fun sel_convs_prf () =
map (prove_simp false ss (sel_defs @ accessor_thms)) sel_conv_props;
val sel_convs = timeit_msg "record sel_convs proof:" sel_convs_prf;
- fun sel_convs_standard_prf () = map Drule.standard sel_convs
+ fun sel_convs_standard_prf () = map Drule.export_without_context sel_convs;
val sel_convs_standard =
timeit_msg "record sel_convs_standard proof:" sel_convs_standard_prf;
fun upd_convs_prf () =
map (prove_simp false ss (upd_defs @ updator_thms)) upd_conv_props;
val upd_convs = timeit_msg "record upd_convs proof:" upd_convs_prf;
- fun upd_convs_standard_prf () = map Drule.standard upd_convs
+ fun upd_convs_standard_prf () = map Drule.export_without_context upd_convs;
val upd_convs_standard =
timeit_msg "record upd_convs_standard proof:" upd_convs_standard_prf;
@@ -2151,7 +2151,7 @@
let
val symdefs = map symmetric (sel_defs @ upd_defs);
val fold_ss = HOL_basic_ss addsimps symdefs;
- val ua_congs = map (Drule.standard o simplify fold_ss) upd_acc_cong_assists;
+ val ua_congs = map (Drule.export_without_context o simplify fold_ss) upd_acc_cong_assists;
in (ua_congs RL [updacc_foldE], ua_congs RL [updacc_unfoldE]) end;
val (fold_congs, unfold_congs) =
timeit_msg "record upd fold/unfold congs:" get_upd_acc_congs;
@@ -2221,7 +2221,7 @@
rtac (prop_subst OF [surjective]),
REPEAT o etac meta_allE, atac]);
val split_meta = timeit_msg "record split_meta proof:" split_meta_prf;
- fun split_meta_standardise () = Drule.standard split_meta;
+ fun split_meta_standardise () = Drule.export_without_context split_meta;
val split_meta_standard =
timeit_msg "record split_meta standard:" split_meta_standardise;
--- a/src/HOL/Tools/sat_solver.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/sat_solver.ML Tue Feb 09 17:06:05 2010 +0100
@@ -282,7 +282,7 @@
(* string -> (PropLogic.prop_formula -> unit) -> (unit -> result) -> solver *)
fun make_external_solver cmd writefn readfn fm =
- (writefn fm; system cmd; readfn ());
+ (writefn fm; bash cmd; readfn ());
(* ------------------------------------------------------------------------- *)
(* read_dimacs_cnf_file: returns a propositional formula that corresponds to *)
@@ -586,7 +586,7 @@
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
val proofpath = File.tmp_path (Path.explode ("result" ^ serial_str ^ ".prf"))
- val cmd = (getenv "MINISAT_HOME") ^ "/minisat " ^ (Path.implode inpath) ^ " -r " ^ (Path.implode outpath) ^ " -t " ^ (Path.implode proofpath) ^ "> /dev/null"
+ val cmd = getenv "MINISAT_HOME" ^ "/minisat " ^ File.shell_path inpath ^ " -r " ^ File.shell_path outpath ^ " -t " ^ File.shell_path proofpath ^ "> /dev/null"
fun writefn fm = SatSolver.write_dimacs_cnf_file inpath fm
fun readfn () = SatSolver.read_std_result_file outpath ("SAT", "", "UNSAT")
val _ = if File.exists inpath then warning ("overwriting existing file " ^ quote (Path.implode inpath)) else ()
@@ -767,11 +767,11 @@
let
fun minisat fm =
let
- val _ = if (getenv "MINISAT_HOME") = "" then raise SatSolver.NOT_CONFIGURED else ()
+ val _ = if getenv "MINISAT_HOME" = "" then raise SatSolver.NOT_CONFIGURED else ()
val serial_str = serial_string ()
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
- val cmd = (getenv "MINISAT_HOME") ^ "/minisat " ^ (Path.implode inpath) ^ " -r " ^ (Path.implode outpath) ^ " > /dev/null"
+ val cmd = getenv "MINISAT_HOME" ^ "/minisat " ^ File.shell_path inpath ^ " -r " ^ File.shell_path outpath ^ " > /dev/null"
fun writefn fm = SatSolver.write_dimacs_cnf_file inpath (PropLogic.defcnf fm)
fun readfn () = SatSolver.read_std_result_file outpath ("SAT", "", "UNSAT")
val _ = if File.exists inpath then warning ("overwriting existing file " ^ quote (Path.implode inpath)) else ()
@@ -926,11 +926,11 @@
let
fun zchaff fm =
let
- val _ = if (getenv "ZCHAFF_HOME") = "" then raise SatSolver.NOT_CONFIGURED else ()
+ val _ = if getenv "ZCHAFF_HOME" = "" then raise SatSolver.NOT_CONFIGURED else ()
val serial_str = serial_string ()
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
- val cmd = (getenv "ZCHAFF_HOME") ^ "/zchaff " ^ (Path.implode inpath) ^ " > " ^ (Path.implode outpath)
+ val cmd = getenv "ZCHAFF_HOME" ^ "/zchaff " ^ File.shell_path inpath ^ " > " ^ File.shell_path outpath
fun writefn fm = SatSolver.write_dimacs_cnf_file inpath (PropLogic.defcnf fm)
fun readfn () = SatSolver.read_std_result_file outpath ("Instance Satisfiable", "", "Instance Unsatisfiable")
val _ = if File.exists inpath then warning ("overwriting existing file " ^ quote (Path.implode inpath)) else ()
@@ -957,7 +957,7 @@
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
val exec = getenv "BERKMIN_EXE"
- val cmd = (getenv "BERKMIN_HOME") ^ "/" ^ (if exec = "" then "BerkMin561" else exec) ^ " " ^ (Path.implode inpath) ^ " > " ^ (Path.implode outpath)
+ val cmd = getenv "BERKMIN_HOME" ^ "/" ^ (if exec = "" then "BerkMin561" else exec) ^ " " ^ File.shell_path inpath ^ " > " ^ File.shell_path outpath
fun writefn fm = SatSolver.write_dimacs_cnf_file inpath (PropLogic.defcnf fm)
fun readfn () = SatSolver.read_std_result_file outpath ("Satisfiable !!", "solution =", "UNSATISFIABLE !!")
val _ = if File.exists inpath then warning ("overwriting existing file " ^ quote (Path.implode inpath)) else ()
@@ -983,7 +983,7 @@
val serial_str = serial_string ()
val inpath = File.tmp_path (Path.explode ("isabelle" ^ serial_str ^ ".cnf"))
val outpath = File.tmp_path (Path.explode ("result" ^ serial_str))
- val cmd = (getenv "JERUSAT_HOME") ^ "/Jerusat1.3 " ^ (Path.implode inpath) ^ " > " ^ (Path.implode outpath)
+ val cmd = getenv "JERUSAT_HOME" ^ "/Jerusat1.3 " ^ File.shell_path inpath ^ " > " ^ File.shell_path outpath
fun writefn fm = SatSolver.write_dimacs_cnf_file inpath (PropLogic.defcnf fm)
fun readfn () = SatSolver.read_std_result_file outpath ("s SATISFIABLE", "v ", "s UNSATISFIABLE")
val _ = if File.exists inpath then warning ("overwriting existing file " ^ quote (Path.implode inpath)) else ()
--- a/src/HOL/Tools/split_rule.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/split_rule.ML Tue Feb 09 17:06:05 2010 +0100
@@ -100,13 +100,13 @@
| (t, ts) => fold collect_vars ts);
-val split_rule_var = (Drule.standard o remove_internal_split) oo split_rule_var';
+val split_rule_var = (Drule.export_without_context o remove_internal_split) oo split_rule_var';
(*curries ALL function variables occurring in a rule's conclusion*)
fun split_rule rl =
fold_rev split_rule_var' (OldTerm.term_vars (concl_of rl)) rl
|> remove_internal_split
- |> Drule.standard;
+ |> Drule.export_without_context;
(*curries ALL function variables*)
fun complete_split_rule rl =
@@ -117,7 +117,7 @@
in
fst (fold_rev complete_split_rule_var vars (rl, xs))
|> remove_internal_split
- |> Drule.standard
+ |> Drule.export_without_context
|> Rule_Cases.save rl
end;
@@ -137,7 +137,7 @@
rl
|> fold_index one_goal xss
|> Simplifier.full_simplify split_rule_ss
- |> Drule.standard
+ |> Drule.export_without_context
|> Rule_Cases.save rl
end;
--- a/src/HOL/Tools/typedef.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Tools/typedef.ML Tue Feb 09 17:06:05 2010 +0100
@@ -122,7 +122,7 @@
let
val cert = Thm.cterm_of (Thm.theory_of_thm def_eq);
val goal_eq = MetaSimplifier.rewrite true [def_eq] (cert goal');
- in Drule.standard (Drule.equal_elim_rule2 OF [goal_eq, th]) end;
+ in Drule.export_without_context (Drule.equal_elim_rule2 OF [goal_eq, th]) end;
fun typedef_result inhabited =
ObjectLogic.typedecl (t, vs, mx)
@@ -139,7 +139,7 @@
##> Theory.add_deps "" (dest_Const AbsC) typedef_deps
#-> (fn ([type_definition], set_def) => fn thy1 =>
let
- fun make th = Drule.standard (th OF [type_definition]);
+ fun make th = Drule.export_without_context (th OF [type_definition]);
val ([Rep, Rep_inverse, Abs_inverse, Rep_inject, Abs_inject,
Rep_cases, Abs_cases, Rep_induct, Abs_induct], thy2) =
thy1
--- a/src/HOL/Transcendental.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Transcendental.thy Tue Feb 09 17:06:05 2010 +0100
@@ -381,7 +381,7 @@
done
lemma real_setsum_nat_ivl_bounded2:
- fixes K :: "'a::ordered_semidom"
+ fixes K :: "'a::linordered_semidom"
assumes f: "\<And>p::nat. p < n \<Longrightarrow> f p \<le> K"
assumes K: "0 \<le> K"
shows "setsum f {0..<n-k} \<le> of_nat n * K"
@@ -2904,10 +2904,12 @@
next
case False hence "0 < \<bar>x\<bar>" and "- \<bar>x\<bar> < \<bar>x\<bar>" by auto
have "suminf (?c (-\<bar>x\<bar>)) - arctan (-\<bar>x\<bar>) = suminf (?c 0) - arctan 0"
- by (rule suminf_eq_arctan_bounded[where x="0" and a="-\<bar>x\<bar>" and b="\<bar>x\<bar>", symmetric], auto simp add: `\<bar>x\<bar> < r` `-\<bar>x\<bar> < \<bar>x\<bar>`)
+ by (rule suminf_eq_arctan_bounded[where x="0" and a="-\<bar>x\<bar>" and b="\<bar>x\<bar>", symmetric])
+ (simp_all only: `\<bar>x\<bar> < r` `-\<bar>x\<bar> < \<bar>x\<bar>` neg_less_iff_less)
moreover
have "suminf (?c x) - arctan x = suminf (?c (-\<bar>x\<bar>)) - arctan (-\<bar>x\<bar>)"
- by (rule suminf_eq_arctan_bounded[where x="x" and a="-\<bar>x\<bar>" and b="\<bar>x\<bar>"], auto simp add: `\<bar>x\<bar> < r` `-\<bar>x\<bar> < \<bar>x\<bar>`)
+ by (rule suminf_eq_arctan_bounded[where x="x" and a="-\<bar>x\<bar>" and b="\<bar>x\<bar>"])
+ (simp_all only: `\<bar>x\<bar> < r` `-\<bar>x\<bar> < \<bar>x\<bar>` neg_less_iff_less)
ultimately
show ?thesis using suminf_arctan_zero by auto
qed
--- a/src/HOL/UNITY/PPROD.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/UNITY/PPROD.thy Tue Feb 09 17:06:05 2010 +0100
@@ -11,17 +11,14 @@
theory PPROD imports Lift_prog begin
constdefs
-
PLam :: "[nat set, nat => ('b * ((nat=>'b) * 'c)) program]
=> ((nat=>'b) * 'c) program"
"PLam I F == \<Squnion>i \<in> I. lift i (F i)"
syntax
- "@PLam" :: "[pttrn, nat set, 'b set] => (nat => 'b) set"
- ("(3plam _:_./ _)" 10)
-
+ "_PLam" :: "[pttrn, nat set, 'b set] => (nat => 'b) set" ("(3plam _:_./ _)" 10)
translations
- "plam x : A. B" == "PLam A (%x. B)"
+ "plam x : A. B" == "CONST PLam A (%x. B)"
(*** Basic properties ***)
--- a/src/HOL/UNITY/Union.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/UNITY/Union.thy Tue Feb 09 17:06:05 2010 +0100
@@ -36,19 +36,19 @@
"safety_prop X == SKIP: X & (\<forall>G. Acts G \<subseteq> UNION X Acts --> G \<in> X)"
syntax
- "@JOIN1" :: "[pttrns, 'b set] => 'b set" ("(3JN _./ _)" 10)
- "@JOIN" :: "[pttrn, 'a set, 'b set] => 'b set" ("(3JN _:_./ _)" 10)
+ "_JOIN1" :: "[pttrns, 'b set] => 'b set" ("(3JN _./ _)" 10)
+ "_JOIN" :: "[pttrn, 'a set, 'b set] => 'b set" ("(3JN _:_./ _)" 10)
translations
- "JN x : A. B" == "JOIN A (%x. B)"
- "JN x y. B" == "JN x. JN y. B"
- "JN x. B" == "JOIN CONST UNIV (%x. B)"
+ "JN x: A. B" == "CONST JOIN A (%x. B)"
+ "JN x y. B" == "JN x. JN y. B"
+ "JN x. B" == "JOIN CONST UNIV (%x. B)"
syntax (xsymbols)
SKIP :: "'a program" ("\<bottom>")
Join :: "['a program, 'a program] => 'a program" (infixl "\<squnion>" 65)
- "@JOIN1" :: "[pttrns, 'b set] => 'b set" ("(3\<Squnion> _./ _)" 10)
- "@JOIN" :: "[pttrn, 'a set, 'b set] => 'b set" ("(3\<Squnion> _\<in>_./ _)" 10)
+ "_JOIN1" :: "[pttrns, 'b set] => 'b set" ("(3\<Squnion> _./ _)" 10)
+ "_JOIN" :: "[pttrn, 'a set, 'b set] => 'b set" ("(3\<Squnion> _\<in>_./ _)" 10)
subsection{*SKIP*}
--- a/src/HOL/Word/BinGeneral.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Word/BinGeneral.thy Tue Feb 09 17:06:05 2010 +0100
@@ -742,7 +742,7 @@
lemma sb_inc_lem':
"(a::int) < - (2^k) \<Longrightarrow> a + 2^k + 2^(Suc k) <= (a + 2^k) mod 2^(Suc k)"
- by (rule iffD1 [OF less_diff_eq, THEN sb_inc_lem, simplified OrderedGroup.diff_0])
+ by (rule sb_inc_lem) simp
lemma sbintrunc_inc:
"x < - (2^n) ==> x + 2^(Suc n) <= sbintrunc n x"
--- a/src/HOL/Word/Word.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/Word/Word.thy Tue Feb 09 17:06:05 2010 +0100
@@ -2,7 +2,7 @@
Author: Gerwin Klein, NICTA
*)
-header {* Word Library interafce *}
+header {* Word Library interface *}
theory Word
imports WordGenLib
--- a/src/HOL/ZF/Games.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/ZF/Games.thy Tue Feb 09 17:06:05 2010 +0100
@@ -922,7 +922,7 @@
apply (auto simp add: Pg_def quotient_def)
done
-instance Pg :: pordered_ab_group_add
+instance Pg :: ordered_ab_group_add
proof
fix a b c :: Pg
show "a - b = a + (- b)" by (simp add: Pg_diff_def)
--- a/src/HOL/ex/Numeral.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/ex/Numeral.thy Tue Feb 09 17:06:05 2010 +0100
@@ -442,12 +442,12 @@
end
subsubsection {*
- Comparisons: class @{text ordered_semidom}
+ Comparisons: class @{text linordered_semidom}
*}
text {* Could be perhaps more general than here. *}
-context ordered_semidom
+context linordered_semidom
begin
lemma of_num_pos [numeral]: "0 < of_num n"
@@ -490,7 +490,7 @@
end
-context ordered_idom
+context linordered_idom
begin
lemma minus_of_num_less_of_num_iff: "- of_num m < of_num n"
@@ -896,19 +896,19 @@
declare (in semiring_char_0) of_num_eq_one_iff [simp]
declare (in semiring_char_0) one_eq_of_num_iff [simp]
-declare (in ordered_semidom) of_num_pos [simp]
-declare (in ordered_semidom) of_num_less_eq_iff [simp]
-declare (in ordered_semidom) of_num_less_eq_one_iff [simp]
-declare (in ordered_semidom) one_less_eq_of_num_iff [simp]
-declare (in ordered_semidom) of_num_less_iff [simp]
-declare (in ordered_semidom) of_num_less_one_iff [simp]
-declare (in ordered_semidom) one_less_of_num_iff [simp]
-declare (in ordered_semidom) of_num_nonneg [simp]
-declare (in ordered_semidom) of_num_less_zero_iff [simp]
-declare (in ordered_semidom) of_num_le_zero_iff [simp]
+declare (in linordered_semidom) of_num_pos [simp]
+declare (in linordered_semidom) of_num_less_eq_iff [simp]
+declare (in linordered_semidom) of_num_less_eq_one_iff [simp]
+declare (in linordered_semidom) one_less_eq_of_num_iff [simp]
+declare (in linordered_semidom) of_num_less_iff [simp]
+declare (in linordered_semidom) of_num_less_one_iff [simp]
+declare (in linordered_semidom) one_less_of_num_iff [simp]
+declare (in linordered_semidom) of_num_nonneg [simp]
+declare (in linordered_semidom) of_num_less_zero_iff [simp]
+declare (in linordered_semidom) of_num_le_zero_iff [simp]
-declare (in ordered_idom) le_signed_numeral_special [simp]
-declare (in ordered_idom) less_signed_numeral_special [simp]
+declare (in linordered_idom) le_signed_numeral_special [simp]
+declare (in linordered_idom) less_signed_numeral_special [simp]
declare (in semiring_1_minus) Dig_of_num_minus_one [simp]
declare (in semiring_1_minus) Dig_one_minus_of_num [simp]
--- a/src/HOL/ex/RPred.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/ex/RPred.thy Tue Feb 09 17:06:05 2010 +0100
@@ -25,7 +25,7 @@
(* (infixl "\<squnion>" 80) *)
where
"supp RP1 RP2 = (\<lambda>s. let (P1, s') = RP1 s; (P2, s'') = RP2 s'
- in (upper_semilattice_class.sup P1 P2, s''))"
+ in (semilattice_sup_class.sup P1 P2, s''))"
definition if_rpred :: "bool \<Rightarrow> unit rpred"
where
--- a/src/HOL/ex/ReflectionEx.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/ex/ReflectionEx.thy Tue Feb 09 17:06:05 2010 +0100
@@ -385,7 +385,7 @@
(* An example for equations containing type variables *)
datatype prod = Zero | One | Var nat | Mul prod prod
| Pw prod nat | PNM nat nat prod
-consts Iprod :: " prod \<Rightarrow> ('a::{ordered_idom}) list \<Rightarrow>'a"
+consts Iprod :: " prod \<Rightarrow> ('a::{linordered_idom}) list \<Rightarrow>'a"
primrec
"Iprod Zero vs = 0"
"Iprod One vs = 1"
@@ -397,7 +397,7 @@
datatype sgn = Pos prod | Neg prod | ZeroEq prod | NZeroEq prod | Tr | F
| Or sgn sgn | And sgn sgn
-consts Isgn :: " sgn \<Rightarrow> ('a::{ordered_idom}) list \<Rightarrow>bool"
+consts Isgn :: " sgn \<Rightarrow> ('a::{linordered_idom}) list \<Rightarrow>bool"
primrec
"Isgn Tr vs = True"
"Isgn F vs = False"
@@ -410,7 +410,7 @@
lemmas eqs = Isgn.simps Iprod.simps
-lemma "(x::'a::{ordered_idom})^4 * y * z * y^2 * z^23 > 0"
+lemma "(x::'a::{linordered_idom})^4 * y * z * y^2 * z^23 > 0"
apply (reify eqs)
oops
--- a/src/HOL/ex/svc_funcs.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOL/ex/svc_funcs.ML Tue Feb 09 17:06:05 2010 +0100
@@ -62,11 +62,11 @@
val _ = if !trace then tracing ("Calling SVC:\n" ^ svc_input) else ()
val svc_input_file = File.tmp_path (Path.basic "SVM_in");
val svc_output_file = File.tmp_path (Path.basic "SVM_out");
- val _ = (File.write svc_input_file svc_input;
- #1 (system_out (check_valid ^ " -dump-result " ^
- File.shell_path svc_output_file ^
- " " ^ File.shell_path svc_input_file ^
- ">/dev/null 2>&1")))
+ val _ = File.write svc_input_file svc_input;
+ val _ =
+ bash_output (check_valid ^ " -dump-result " ^
+ File.shell_path svc_output_file ^ " " ^ File.shell_path svc_input_file ^
+ ">/dev/null 2>&1")
val svc_output =
(case try File.read svc_output_file of
SOME out => out
--- a/src/HOLCF/Fix.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOLCF/Fix.thy Tue Feb 09 17:06:05 2010 +0100
@@ -73,6 +73,10 @@
apply simp
done
+lemma iterate_below_fix: "iterate n\<cdot>f\<cdot>\<bottom> \<sqsubseteq> fix\<cdot>f"
+ unfolding fix_def2
+ using chain_iterate by (rule is_ub_thelub)
+
text {*
Kleene's fixed point theorems for continuous functions in pointed
omega cpo's
--- a/src/HOLCF/Tools/Domain/domain_isomorphism.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOLCF/Tools/Domain/domain_isomorphism.ML Tue Feb 09 17:06:05 2010 +0100
@@ -251,7 +251,7 @@
(* register unfold theorems *)
val (unfold_thms, thy) =
- (PureThy.add_thms o map (Thm.no_attributes o apsnd Drule.standard))
+ (PureThy.add_thms o map (Thm.no_attributes o apsnd Drule.export_without_context))
(mk_unfold_thms unfold_binds tuple_unfold_thm) thy;
in
((proj_thms, unfold_thms), thy)
@@ -446,7 +446,7 @@
(* prove isomorphism and isodefl rules *)
fun mk_iso_thms ((tbind, REP_eq), (rep_def, abs_def)) thy =
let
- fun make thm = Drule.standard (thm OF [REP_eq, abs_def, rep_def]);
+ fun make thm = Drule.export_without_context (thm OF [REP_eq, abs_def, rep_def]);
val rep_iso_thm = make @{thm domain_rep_iso};
val abs_iso_thm = make @{thm domain_abs_iso};
val isodefl_thm = make @{thm isodefl_abs_rep};
@@ -545,7 +545,7 @@
val thmR = thm RS @{thm conjunct2};
in (n, thmL):: conjuncts ns thmR end;
val (isodefl_thms, thy) = thy |>
- (PureThy.add_thms o map (Thm.no_attributes o apsnd Drule.standard))
+ (PureThy.add_thms o map (Thm.no_attributes o apsnd Drule.export_without_context))
(conjuncts isodefl_binds isodefl_thm);
val thy = IsodeflData.map (fold Thm.add_thm isodefl_thms) thy;
--- a/src/HOLCF/Tools/Domain/domain_theorems.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOLCF/Tools/Domain/domain_theorems.ML Tue Feb 09 17:06:05 2010 +0100
@@ -180,7 +180,7 @@
val rep_strict = ax_abs_iso RS (allI RS retraction_strict);
val abs_defin' = iso_locale RS iso_abs_defin';
val rep_defin' = iso_locale RS iso_rep_defin';
-val iso_rews = map Drule.standard [ax_abs_iso,ax_rep_iso,abs_strict,rep_strict];
+val iso_rews = map Drule.export_without_context [ax_abs_iso, ax_rep_iso, abs_strict, rep_strict];
(* ----- generating beta reduction rules from definitions-------------------- *)
@@ -263,7 +263,7 @@
val exhaust = pg con_appls (mk_trp exh) (K tacs);
val _ = trace " Proving casedist...";
val casedist =
- Drule.standard (rewrite_rule exh_casedists (exhaust RS exh_casedist0));
+ Drule.export_without_context (rewrite_rule exh_casedists (exhaust RS exh_casedist0));
end;
local
@@ -554,7 +554,7 @@
flat
(ListPair.map (distinct c) ((map #1 cs),leqs)) @
distincts cs;
- in map Drule.standard (distincts (cons ~~ distincts_le)) end;
+ in map Drule.export_without_context (distincts (cons ~~ distincts_le)) end;
local
fun pgterm rel con args =
@@ -593,7 +593,12 @@
val goal = mk_trp (strict (dc_copy `% "f"));
val rules = [abs_strict, rep_strict] @ @{thms domain_map_stricts};
val tacs = [asm_simp_tac (HOLCF_ss addsimps rules) 1];
- in pg [ax_copy_def] goal (K tacs) end;
+ in
+ SOME (pg [ax_copy_def] goal (K tacs))
+ handle
+ THM (s, _, _) => (trace s; NONE)
+ | ERROR s => (trace s; NONE)
+ end;
local
fun copy_app (con, args) =
@@ -605,6 +610,9 @@
(proj (%:"f") eqs) (dtyp_of arg) ` (%# arg)
else (%# arg);
val rhs = con_app2 con one_rhs args;
+ fun is_rec arg = Datatype_Aux.is_rec_type (dtyp_of arg);
+ fun is_nonlazy_rec arg = is_rec arg andalso not (is_lazy arg);
+ fun nonlazy_rec args = map vname (filter is_nonlazy_rec args);
val goal = lift_defined %: (nonlazy_rec args, mk_trp (lhs === rhs));
val args' = filter_out (fn a => is_rec a orelse is_lazy a) args;
val stricts = abs_strict :: rep_strict :: @{thms domain_map_stricts};
@@ -621,18 +629,23 @@
fun one_strict (con, args) =
let
val goal = mk_trp (dc_copy`UU`(con_app con args) === UU);
- val rews = copy_strict :: copy_apps @ con_rews;
+ val rews = the_list copy_strict @ copy_apps @ con_rews;
fun tacs ctxt = map (case_UU_tac ctxt rews 1) (nonlazy args) @
[asm_simp_tac (HOLCF_ss addsimps rews) 1];
- in pg [] goal tacs end;
+ in
+ SOME (pg [] goal tacs)
+ handle
+ THM (s, _, _) => (trace s; NONE)
+ | ERROR s => (trace s; NONE)
+ end;
fun has_nonlazy_rec (_, args) = exists is_nonlazy_rec args;
in
val _ = trace " Proving copy_stricts...";
- val copy_stricts = map one_strict (filter has_nonlazy_rec cons);
+ val copy_stricts = map_filter one_strict (filter has_nonlazy_rec cons);
end;
-val copy_rews = copy_strict :: copy_apps @ copy_stricts;
+val copy_rews = the_list copy_strict @ copy_apps @ copy_stricts;
in
thy
@@ -706,57 +719,48 @@
val copy_take_defs =
(if n_eqs = 1 then [] else [ax_copy2_def]) @ axs_take_def;
val _ = trace " Proving take_stricts...";
- val take_stricts =
+ fun one_take_strict ((dn, args), _) =
let
- fun one_eq ((dn, args), _) = strict (dc_take dn $ %:"n");
- val goal = mk_trp (foldr1 mk_conj (map one_eq eqs));
- fun tacs ctxt = [
- InductTacs.induct_tac ctxt [[SOME "n"]] 1,
- simp_tac iterate_Cprod_ss 1,
- asm_simp_tac (iterate_Cprod_ss addsimps copy_rews) 1];
- in pg copy_take_defs goal tacs end;
-
- val take_stricts' = rewrite_rule copy_take_defs take_stricts;
+ val goal = mk_trp (strict (dc_take dn $ %:"n"));
+ val rules = [
+ @{thm monofun_fst [THEN monofunE]},
+ @{thm monofun_snd [THEN monofunE]}];
+ val tacs = [
+ rtac @{thm UU_I} 1,
+ rtac @{thm below_eq_trans} 1,
+ resolve_tac axs_reach 2,
+ rtac @{thm monofun_cfun_fun} 1,
+ REPEAT (resolve_tac rules 1),
+ rtac @{thm iterate_below_fix} 1];
+ in pg axs_take_def goal (K tacs) end;
+ val take_stricts = map one_take_strict eqs;
fun take_0 n dn =
let
- val goal = mk_trp ((dc_take dn $ %%: @{const_name Algebras.zero}) `% x_name n === UU);
+ val goal = mk_trp ((dc_take dn $ @{term "0::nat"}) `% x_name n === UU);
in pg axs_take_def goal (K [simp_tac iterate_Cprod_ss 1]) end;
val take_0s = mapn take_0 1 dnames;
- fun c_UU_tac ctxt = case_UU_tac ctxt (take_stricts'::copy_con_rews) 1;
val _ = trace " Proving take_apps...";
- val take_apps =
+ fun one_take_app dn (con, args) =
let
- fun mk_eqn dn (con, args) =
- let
- fun mk_take n = dc_take (List.nth (dnames, n)) $ %:"n";
- fun one_rhs arg =
- if Datatype_Aux.is_rec_type (dtyp_of arg)
- then Domain_Axioms.copy_of_dtyp map_tab
- mk_take (dtyp_of arg) ` (%# arg)
- else (%# arg);
- val lhs = (dc_take dn $ (%%:"Suc" $ %:"n"))`(con_app con args);
- val rhs = con_app2 con one_rhs args;
- in Library.foldr mk_all (map vname args, lhs === rhs) end;
- fun mk_eqns ((dn, _), cons) = map (mk_eqn dn) cons;
- val goal = mk_trp (foldr1 mk_conj (maps mk_eqns eqs));
- val simps = filter (has_fewer_prems 1) copy_rews;
- fun con_tac ctxt (con, args) =
- if nonlazy_rec args = []
- then all_tac
- else EVERY (map (c_UU_tac ctxt) (nonlazy_rec args)) THEN
- asm_full_simp_tac (HOLCF_ss addsimps copy_rews) 1;
- fun eq_tacs ctxt ((dn, _), cons) = map (con_tac ctxt) cons;
- fun tacs ctxt =
- simp_tac iterate_Cprod_ss 1 ::
- InductTacs.induct_tac ctxt [[SOME "n"]] 1 ::
- simp_tac (iterate_Cprod_ss addsimps copy_con_rews) 1 ::
- asm_full_simp_tac (HOLCF_ss addsimps simps) 1 ::
- TRY (safe_tac HOL_cs) ::
- maps (eq_tacs ctxt) eqs;
- in pg copy_take_defs goal tacs end;
+ fun mk_take n = dc_take (List.nth (dnames, n)) $ %:"n";
+ fun one_rhs arg =
+ if Datatype_Aux.is_rec_type (dtyp_of arg)
+ then Domain_Axioms.copy_of_dtyp map_tab
+ mk_take (dtyp_of arg) ` (%# arg)
+ else (%# arg);
+ val lhs = (dc_take dn $ (%%:"Suc" $ %:"n"))`(con_app con args);
+ val rhs = con_app2 con one_rhs args;
+ fun is_rec arg = Datatype_Aux.is_rec_type (dtyp_of arg);
+ fun is_nonlazy_rec arg = is_rec arg andalso not (is_lazy arg);
+ fun nonlazy_rec args = map vname (filter is_nonlazy_rec args);
+ val goal = lift_defined %: (nonlazy_rec args, mk_trp (lhs === rhs));
+ val tacs = [asm_simp_tac (HOLCF_ss addsimps copy_con_rews) 1];
+ in pg copy_take_defs goal (K tacs) end;
+ fun one_take_apps ((dn, _), cons) = map (one_take_app dn) cons;
+ val take_apps = maps one_take_apps eqs;
in
- val take_rews = map Drule.standard
- (atomize global_ctxt take_stricts @ take_0s @ atomize global_ctxt take_apps);
+ val take_rews = map Drule.export_without_context
+ (take_stricts @ take_0s @ take_apps);
end; (* local *)
local
--- a/src/HOLCF/Tools/pcpodef.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOLCF/Tools/pcpodef.ML Tue Feb 09 17:06:05 2010 +0100
@@ -89,7 +89,7 @@
(Tactic.rtac (@{thm typedef_cpo} OF cpo_thms) 1);
(* transfer thms so that they will know about the new cpo instance *)
val cpo_thms' = map (Thm.transfer thy2) cpo_thms;
- fun make thm = Drule.standard (thm OF cpo_thms');
+ fun make thm = Drule.export_without_context (thm OF cpo_thms');
val ([adm, cont_Rep, cont_Abs, lub, thelub, compact], thy3) =
thy2
|> Sign.add_path (Binding.name_of name)
@@ -127,7 +127,7 @@
|> AxClass.prove_arity (full_tname, lhs_sorts, @{sort pcpo})
(Tactic.rtac (@{thm typedef_pcpo} OF pcpo_thms) 1);
val pcpo_thms' = map (Thm.transfer thy2) pcpo_thms;
- fun make thm = Drule.standard (thm OF pcpo_thms');
+ fun make thm = Drule.export_without_context (thm OF pcpo_thms');
val ([Rep_strict, Abs_strict, Rep_strict_iff, Abs_strict_iff,
Rep_defined, Abs_defined], thy3) =
thy2
--- a/src/HOLCF/Tools/repdef.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/HOLCF/Tools/repdef.ML Tue Feb 09 17:06:05 2010 +0100
@@ -139,7 +139,7 @@
|> Sign.add_path (Binding.name_of name)
|> PureThy.add_thms
[((Binding.prefix_name "REP_" name,
- Drule.standard (@{thm typedef_REP} OF typedef_thms')), [])]
+ Drule.export_without_context (@{thm typedef_REP} OF typedef_thms')), [])]
||> Sign.parent_path;
val rep_info =
--- a/src/Provers/hypsubst.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Provers/hypsubst.ML Tue Feb 09 17:06:05 2010 +0100
@@ -165,7 +165,7 @@
end;
-val ssubst = Drule.standard (Data.sym RS Data.subst);
+val ssubst = Drule.export_without_context (Data.sym RS Data.subst);
fun inst_subst_tac b rl = CSUBGOAL (fn (cBi, i) =>
case try (Logic.strip_assums_hyp #> hd #>
--- a/src/Provers/typedsimp.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Provers/typedsimp.ML Tue Feb 09 17:06:05 2010 +0100
@@ -43,11 +43,11 @@
(*For simplifying both sides of an equation:
[| a=c; b=c |] ==> b=a
Can use resolve_tac [split_eqn] to prepare an equation for simplification. *)
-val split_eqn = Drule.standard (sym RSN (2,trans) RS sym);
+val split_eqn = Drule.export_without_context (sym RSN (2,trans) RS sym);
(* [| a=b; b=c |] ==> reduce(a,c) *)
-val red_trans = Drule.standard (trans RS red_if_equal);
+val red_trans = Drule.export_without_context (trans RS red_if_equal);
(*For REWRITE rule: Make a reduction rule for simplification, e.g.
[| a: C(0); ... ; a=c: C(0) |] ==> rec(0,a,b) = c: C(0) *)
--- a/src/Pure/Concurrent/future.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Concurrent/future.ML Tue Feb 09 17:06:05 2010 +0100
@@ -88,22 +88,24 @@
(* datatype future *)
+type 'a result = 'a Exn.result Single_Assignment.var;
+
datatype 'a future = Future of
{promised: bool,
task: task,
group: group,
- result: 'a Exn.result option Synchronized.var};
+ result: 'a result};
fun task_of (Future {task, ...}) = task;
fun group_of (Future {group, ...}) = group;
fun result_of (Future {result, ...}) = result;
-fun peek x = Synchronized.value (result_of x);
+fun peek x = Single_Assignment.peek (result_of x);
fun is_finished x = is_some (peek x);
fun assign_result group result res =
let
- val _ = Synchronized.assign result (K (SOME res));
+ val _ = Single_Assignment.assign result res;
val ok =
(case res of
Exn.Exn exn => (Task_Queue.cancel_group group exn; false)
@@ -167,7 +169,7 @@
fun future_job group (e: unit -> 'a) =
let
- val result = Synchronized.var "future" (NONE: 'a Exn.result option);
+ val result = Single_Assignment.var "future" : 'a result;
fun job ok =
let
val res =
@@ -409,9 +411,6 @@
Exn.Exn (Exn.EXCEPTIONS (Exn.flatten_list (Task_Queue.group_status (group_of x))))
| SOME res => res);
-fun passive_wait x =
- Synchronized.readonly_access (result_of x) (fn NONE => NONE | SOME _ => SOME ());
-
fun join_next deps = (*requires SYNCHRONIZED*)
if null deps then NONE
else
@@ -438,7 +437,7 @@
else
(case worker_task () of
SOME task => join_depend task (map task_of xs)
- | NONE => List.app passive_wait xs;
+ | NONE => List.app (ignore o Single_Assignment.await o result_of) xs;
map get_result xs);
end;
@@ -452,7 +451,7 @@
fun value (x: 'a) =
let
val group = Task_Queue.new_group NONE;
- val result = Synchronized.var "value" NONE : 'a Exn.result option Synchronized.var;
+ val result = Single_Assignment.var "value" : 'a result;
val _ = assign_result group result (Exn.Result x);
in Future {promised = false, task = Task_Queue.dummy_task, group = group, result = result} end;
@@ -476,7 +475,7 @@
fun promise_group group : 'a future =
let
- val result = Synchronized.var "promise" (NONE: 'a Exn.result option);
+ val result = Single_Assignment.var "promise" : 'a result;
val task = SYNCHRONIZED "enqueue" (fn () =>
Unsynchronized.change_result queue (Task_Queue.enqueue_passive group));
in Future {promised = true, task = task, group = group, result = result} end;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Concurrent/single_assignment.ML Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,57 @@
+(* Title: Pure/Concurrent/single_assignment.ML
+ Author: Makarius
+
+Single-assignment variables with locking/signalling.
+*)
+
+signature SINGLE_ASSIGNMENT =
+sig
+ type 'a var
+ val var: string -> 'a var
+ val peek: 'a var -> 'a option
+ val await: 'a var -> 'a
+ val assign: 'a var -> 'a -> unit
+end;
+
+structure Single_Assignment: SINGLE_ASSIGNMENT =
+struct
+
+abstype 'a var = Var of
+ {name: string,
+ lock: Mutex.mutex,
+ cond: ConditionVar.conditionVar,
+ var: 'a SingleAssignment.saref}
+with
+
+fun var name = Var
+ {name = name,
+ lock = Mutex.mutex (),
+ cond = ConditionVar.conditionVar (),
+ var = SingleAssignment.saref ()};
+
+fun peek (Var {var, ...}) = SingleAssignment.savalue var;
+
+fun await (v as Var {name, lock, cond, var}) =
+ SimpleThread.synchronized name lock (fn () =>
+ let
+ fun wait () =
+ (case peek v of
+ NONE =>
+ (case Multithreading.sync_wait NONE NONE cond lock of
+ Exn.Result _ => wait ()
+ | Exn.Exn exn => reraise exn)
+ | SOME x => x);
+ in wait () end);
+
+fun assign (v as Var {name, lock, cond, var}) x =
+ SimpleThread.synchronized name lock (fn () =>
+ (case peek v of
+ SOME _ => raise Fail ("Duplicate assignment to variable " ^ quote name)
+ | NONE =>
+ uninterruptible (fn _ => fn () =>
+ (SingleAssignment.saset (var, x); ConditionVar.broadcast cond)) ()));
+
+end;
+
+end;
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Concurrent/single_assignment_sequential.ML Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,30 @@
+(* Title: Pure/Concurrent/single_assignment_sequential.ML
+ Author: Makarius
+
+Single-assignment variables (sequential version).
+*)
+
+structure Single_Assignment: SINGLE_ASSIGNMENT =
+struct
+
+abstype 'a var = Var of 'a SingleAssignment.saref
+with
+
+fun var _ = Var (SingleAssignment.saref ());
+
+fun peek (Var var) = SingleAssignment.savalue var;
+
+fun await v =
+ (case peek v of
+ SOME x => x
+ | NONE => Thread.unavailable ());
+
+fun assign (v as Var var) x =
+ (case peek v of
+ SOME _ => raise Fail "Duplicate assignment to variable"
+ | NONE => SingleAssignment.saset (var, x));
+
+end;
+
+end;
+
--- a/src/Pure/Concurrent/synchronized.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Concurrent/synchronized.ML Tue Feb 09 17:06:05 2010 +0100
@@ -11,10 +11,8 @@
val value: 'a var -> 'a
val timed_access: 'a var -> ('a -> Time.time option) -> ('a -> ('b * 'a) option) -> 'b option
val guarded_access: 'a var -> ('a -> ('b * 'a) option) -> 'b
- val readonly_access: 'a var -> ('a -> 'b option) -> 'b
val change_result: 'a var -> ('a -> 'b * 'a) -> 'b
val change: 'a var -> ('a -> 'a) -> unit
- val assign: 'a var -> ('a -> 'a) -> unit
end;
structure Synchronized: SYNCHRONIZED =
@@ -22,11 +20,12 @@
(* state variables *)
-datatype 'a var = Var of
+abstype 'a var = Var of
{name: string,
lock: Mutex.mutex,
cond: ConditionVar.conditionVar,
- var: 'a Unsynchronized.ref};
+ var: 'a Unsynchronized.ref}
+with
fun var name x = Var
{name = name,
@@ -39,7 +38,7 @@
(* synchronized access *)
-fun access {time_limit, readonly, finish} (Var {name, lock, cond, var}) f =
+fun timed_access (Var {name, lock, cond, var}) time_limit f =
SimpleThread.synchronized name lock (fn () =>
let
fun try_change () =
@@ -51,36 +50,19 @@
| Exn.Result false => NONE
| Exn.Exn exn => reraise exn)
| SOME (y, x') =>
- if readonly then SOME y
- else
- let
- val _ = magic_immutability_test var
- andalso raise Fail ("Attempt to change finished variable " ^ quote name);
- val _ = var := x';
- val _ = if finish then magic_immutability_mark var else ();
- in SOME y end)
+ uninterruptible (fn _ => fn () =>
+ (var := x'; ConditionVar.broadcast cond; SOME y)) ())
end;
- val res = try_change ();
- val _ = ConditionVar.broadcast cond;
- in res end);
-
-fun timed_access var time_limit f =
- access {time_limit = time_limit, readonly = false, finish = false} var f;
+ in try_change () end);
fun guarded_access var f = the (timed_access var (K NONE) f);
-fun readonly_access var f =
- the (access {time_limit = K NONE, readonly = true, finish = false} var
- (fn x => (case f x of NONE => NONE | SOME y => SOME (y, x))));
-
(* unconditional change *)
fun change_result var f = guarded_access var (SOME o f);
fun change var f = change_result var (fn x => ((), f x));
-fun assign var f =
- the (access {time_limit = K NONE, readonly = false, finish = true} var
- (fn x => SOME ((), f x)));
+end;
end;
--- a/src/Pure/Concurrent/synchronized_sequential.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Concurrent/synchronized_sequential.ML Tue Feb 09 17:06:05 2010 +0100
@@ -20,13 +20,8 @@
fun guarded_access var f = the (timed_access var (K NONE) f);
-fun readonly_access var f =
- guarded_access var (fn x => (case f x of NONE => NONE | SOME y => SOME (y, x)));
-
fun change_result var f = guarded_access var (SOME o f);
fun change var f = change_result var (fn x => ((), f x));
-val assign = change;
-
end;
end;
--- a/src/Pure/Concurrent/task_queue.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Concurrent/task_queue.ML Tue Feb 09 17:06:05 2010 +0100
@@ -127,7 +127,7 @@
val empty = make_queue Inttab.empty Task_Graph.empty;
fun all_passive (Queue {jobs, ...}) =
- Task_Graph.get_first NONE
+ Task_Graph.get_first
((fn Job _ => SOME () | Running _ => SOME () | Passive => NONE) o #2 o #1 o #2) jobs |> is_none;
@@ -204,7 +204,7 @@
if is_ready deps group then SOME (task, group, rev list) else NONE
| ready _ = NONE;
in
- (case Task_Graph.get_first NONE ready jobs of
+ (case Task_Graph.get_first ready jobs of
NONE => (NONE, queue)
| SOME (result as (task, _, _)) =>
let val jobs' = set_job task (Running thread) jobs
--- a/src/Pure/General/file.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/General/file.ML Tue Feb 09 17:06:05 2010 +0100
@@ -14,7 +14,6 @@
val full_path: Path.T -> Path.T
val tmp_path: Path.T -> Path.T
val isabelle_tool: string -> string -> int
- val system_command: string -> unit
eqtype ident
val rep_ident: ident -> string
val ident: Path.T -> ident option
@@ -75,11 +74,11 @@
then SOME path
else NONE
end handle OS.SysErr _ => NONE) of
- SOME path => system (shell_quote path ^ " " ^ args)
+ SOME path => bash (shell_quote path ^ " " ^ args)
| NONE => (writeln ("Unknown Isabelle tool: " ^ name); 2));
fun system_command cmd =
- if system cmd <> 0 then error ("System command failed: " ^ cmd)
+ if bash cmd <> 0 then error ("System command failed: " ^ cmd)
else ();
@@ -116,7 +115,7 @@
SOME id => id
| NONE =>
let val (id, rc) = (*potentially slow*)
- system_out ("\"$ISABELLE_HOME/lib/scripts/fileident\" " ^ shell_quote physical_path)
+ bash_output ("\"$ISABELLE_HOME/lib/scripts/fileident\" " ^ shell_quote physical_path)
in
if id <> "" andalso rc = 0 then (update_cache (physical_path, (mod_time, id)); id)
else error ("Failed to identify file " ^ quote (Path.implode path) ^ " by " ^ cmd)
--- a/src/Pure/General/graph.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/General/graph.ML Tue Feb 09 17:06:05 2010 +0100
@@ -15,8 +15,7 @@
val is_empty: 'a T -> bool
val keys: 'a T -> key list
val dest: 'a T -> (key * key list) list
- val get_first: key option -> (key * ('a * (key list * key list)) -> 'b option) ->
- 'a T -> 'b option
+ val get_first: (key * ('a * (key list * key list)) -> 'b option) -> 'a T -> 'b option
val fold: (key * ('a * (key list * key list)) -> 'b -> 'b) -> 'a T -> 'b -> 'b
val minimals: 'a T -> key list
val maximals: 'a T -> key list
@@ -89,7 +88,7 @@
fun keys (Graph tab) = Table.keys tab;
fun dest (Graph tab) = map (fn (x, (_, (_, succs))) => (x, succs)) (Table.dest tab);
-fun get_first b f (Graph tab) = Table.get_first b f tab;
+fun get_first f (Graph tab) = Table.get_first f tab;
fun fold_graph f (Graph tab) = Table.fold f tab;
fun minimals G = fold_graph (fn (m, (_, ([], _))) => cons m | _ => I) G [];
--- a/src/Pure/General/secure.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/General/secure.ML Tue Feb 09 17:06:05 2010 +0100
@@ -15,8 +15,8 @@
val toplevel_pp: string list -> string -> unit
val open_unsynchronized: unit -> unit
val commit: unit -> unit
- val system_out: string -> string * int
- val system: string -> int
+ val bash_output: string -> string * int
+ val bash: string -> int
end;
structure Secure: SECURE =
@@ -61,12 +61,12 @@
fun secure_shell () = deny_secure "Cannot execute shell commands in secure mode";
-val orig_system_out = system_out;
+val orig_bash_output = bash_output;
-fun system_out s = (secure_shell (); orig_system_out s);
+fun bash_output s = (secure_shell (); orig_bash_output s);
-fun system s =
- (case system_out s of
+fun bash s =
+ (case bash_output s of
("", rc) => rc
| (out, rc) => (writeln (perhaps (try (unsuffix "\n")) out); rc));
@@ -78,5 +78,5 @@
fun use s = Secure.use_file ML_Parse.global_context true s
handle ERROR msg => (writeln msg; error "ML error");
val toplevel_pp = Secure.toplevel_pp;
-val system_out = Secure.system_out;
-val system = Secure.system;
+val bash_output = Secure.bash_output;
+val bash = Secure.bash;
--- a/src/Pure/General/table.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/General/table.ML Tue Feb 09 17:06:05 2010 +0100
@@ -28,7 +28,7 @@
val keys: 'a table -> key list
val min_key: 'a table -> key option
val max_key: 'a table -> key option
- val get_first: key option -> (key * 'a -> 'b option) -> 'a table -> 'b option
+ val get_first: (key * 'a -> 'b option) -> 'a table -> 'b option
val exists: (key * 'a -> bool) -> 'a table -> bool
val forall: (key * 'a -> bool) -> 'a table -> bool
val lookup: 'a table -> key -> 'a option
@@ -131,40 +131,32 @@
(* get_first *)
-fun get_first boundary f tab =
+fun get_first f =
let
- val check =
- (case boundary of
- NONE => K true
- | SOME b => (fn k => Key.ord (b, k) = LESS));
- fun apply (k, x) = if check k then f (k, x) else NONE;
- fun get_bounded tb k = if check k then get tb else NONE
- and get Empty = NONE
+ fun get Empty = NONE
| get (Branch2 (left, (k, x), right)) =
- (case get_bounded left k of
+ (case get left of
NONE =>
- (case apply (k, x) of
+ (case f (k, x) of
NONE => get right
| some => some)
| some => some)
| get (Branch3 (left, (k1, x1), mid, (k2, x2), right)) =
- (case get_bounded left k1 of
+ (case get left of
NONE =>
- (case apply (k1, x1) of
+ (case f (k1, x1) of
NONE =>
- (case get_bounded mid k2 of
+ (case get mid of
NONE =>
- (case apply (k2, x2) of
+ (case f (k2, x2) of
NONE => get right
| some => some)
| some => some)
| some => some)
| some => some);
- in get tab end;
+ in get end;
-fun exists pred =
- is_some o get_first NONE (fn entry => if pred entry then SOME () else NONE);
-
+fun exists pred = is_some o get_first (fn entry => if pred entry then SOME () else NONE);
fun forall pred = not o exists (not o pred);
--- a/src/Pure/IsaMakefile Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/IsaMakefile Tue Feb 09 17:06:05 2010 +0100
@@ -21,19 +21,19 @@
## Pure
-BOOTSTRAP_FILES = ML-Systems/compiler_polyml-5.0.ML \
+BOOTSTRAP_FILES = ML-Systems/bash.ML ML-Systems/compiler_polyml-5.0.ML \
ML-Systems/compiler_polyml-5.2.ML ML-Systems/compiler_polyml-5.3.ML \
- ML-Systems/ml_name_space.ML \
- ML-Systems/ml_pretty.ML ML-Systems/mosml.ML \
+ ML-Systems/ml_name_space.ML ML-Systems/ml_pretty.ML \
ML-Systems/multithreading.ML ML-Systems/multithreading_polyml.ML \
ML-Systems/overloading_smlnj.ML ML-Systems/polyml-5.0.ML \
ML-Systems/polyml-5.1.ML ML-Systems/polyml-5.2.ML \
ML-Systems/polyml-5.2.1.ML ML-Systems/polyml.ML \
ML-Systems/polyml_common.ML ML-Systems/pp_polyml.ML \
- ML-Systems/proper_int.ML ML-Systems/smlnj.ML \
- ML-Systems/system_shell.ML ML-Systems/thread_dummy.ML \
- ML-Systems/timing.ML ML-Systems/time_limit.ML \
- ML-Systems/universal.ML ML-Systems/unsynchronized.ML
+ ML-Systems/proper_int.ML ML-Systems/single_assignment.ML \
+ ML-Systems/single_assignment_polyml.ML ML-Systems/smlnj.ML \
+ ML-Systems/thread_dummy.ML ML-Systems/timing.ML \
+ ML-Systems/time_limit.ML ML-Systems/universal.ML \
+ ML-Systems/unsynchronized.ML
RAW: $(OUT)/RAW
@@ -47,17 +47,18 @@
Concurrent/future.ML Concurrent/lazy.ML \
Concurrent/lazy_sequential.ML Concurrent/mailbox.ML \
Concurrent/par_list.ML Concurrent/par_list_sequential.ML \
- Concurrent/simple_thread.ML Concurrent/synchronized.ML \
- Concurrent/synchronized_sequential.ML Concurrent/task_queue.ML \
- General/alist.ML General/antiquote.ML General/balanced_tree.ML \
- General/basics.ML General/binding.ML General/buffer.ML \
- General/exn.ML General/file.ML General/graph.ML General/heap.ML \
- General/integer.ML General/long_name.ML General/markup.ML \
- General/name_space.ML General/ord_list.ML General/output.ML \
- General/path.ML General/position.ML General/pretty.ML \
- General/print_mode.ML General/properties.ML General/queue.ML \
- General/same.ML General/scan.ML General/secure.ML General/seq.ML \
- General/source.ML General/stack.ML General/symbol.ML \
+ Concurrent/simple_thread.ML Concurrent/single_assignment.ML \
+ Concurrent/single_assignment_sequential.ML \
+ Concurrent/synchronized.ML Concurrent/synchronized_sequential.ML \
+ Concurrent/task_queue.ML General/alist.ML General/antiquote.ML \
+ General/balanced_tree.ML General/basics.ML General/binding.ML \
+ General/buffer.ML General/exn.ML General/file.ML General/graph.ML \
+ General/heap.ML General/integer.ML General/long_name.ML \
+ General/markup.ML General/name_space.ML General/ord_list.ML \
+ General/output.ML General/path.ML General/position.ML \
+ General/pretty.ML General/print_mode.ML General/properties.ML \
+ General/queue.ML General/same.ML General/scan.ML General/secure.ML \
+ General/seq.ML General/source.ML General/stack.ML General/symbol.ML \
General/symbol_pos.ML General/table.ML General/url.ML General/xml.ML \
General/yxml.ML Isar/args.ML Isar/attrib.ML Isar/auto_bind.ML \
Isar/calculation.ML Isar/class.ML Isar/class_target.ML Isar/code.ML \
--- a/src/Pure/Isar/args.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Isar/args.ML Tue Feb 09 17:06:05 2010 +0100
@@ -1,7 +1,7 @@
(* Title: Pure/Isar/args.ML
Author: Markus Wenzel, TU Muenchen
-Parsing with implicit value assigment. Concrete argument syntax of
+Parsing with implicit value assignment. Concrete argument syntax of
attributes, methods etc.
*)
--- a/src/Pure/Isar/attrib.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Isar/attrib.ML Tue Feb 09 17:06:05 2010 +0100
@@ -298,7 +298,7 @@
setup (Binding.name "params")
(Scan.lift (P.and_list1 (Scan.repeat Args.name)) >> Rule_Cases.params)
"named rule parameters" #>
- setup (Binding.name "standard") (Scan.succeed (Thm.rule_attribute (K Drule.standard)))
+ setup (Binding.name "standard") (Scan.succeed (Thm.rule_attribute (K Drule.export_without_context)))
"result put into standard form (legacy)" #>
setup (Binding.name "rule_format") rule_format "result put into canonical rule format" #>
setup (Binding.name "elim_format") (Scan.succeed elim_format)
--- a/src/Pure/Isar/class.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Isar/class.ML Tue Feb 09 17:06:05 2010 +0100
@@ -86,7 +86,7 @@
| SOME prop => Logic.mk_implies (Morphism.term const_morph
((map_types o map_atyps) (K aT) prop), of_class_prop_concl);
val sup_of_classes = map (snd o rules thy) sups;
- val loc_axiom_intros = map Drule.standard' (Locale.axioms_of thy class);
+ val loc_axiom_intros = map Drule.export_without_context_open (Locale.axioms_of thy class);
val axclass_intro = #intro (AxClass.get_info thy class);
val base_sort_trivs = Thm.of_sort (Thm.ctyp_of thy aT, base_sort);
val tac = REPEAT (SOMEGOAL
--- a/src/Pure/Isar/class_target.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Isar/class_target.ML Tue Feb 09 17:06:05 2010 +0100
@@ -233,7 +233,7 @@
fun register_subclass (sub, sup) some_dep_morph some_wit export thy =
let
val intros = (snd o rules thy) sup :: map_filter I
- [Option.map (Drule.standard' o Element.conclude_witness) some_wit,
+ [Option.map (Drule.export_without_context_open o Element.conclude_witness) some_wit,
(fst o rules thy) sub];
val tac = EVERY (map (TRYALL o Tactic.rtac) intros);
val classrel = Skip_Proof.prove_global thy [] [] (Logic.mk_classrel (sub, sup))
--- a/src/Pure/Isar/expression.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Isar/expression.ML Tue Feb 09 17:06:05 2010 +0100
@@ -699,7 +699,7 @@
|> PureThy.note_thmss ""
[((Binding.conceal (Binding.name introN), []), [([intro], [Locale.intro_add])]),
((Binding.conceal (Binding.name axiomsN), []),
- [(map (Drule.standard o Element.conclude_witness) axioms, [])])]
+ [(map (Drule.export_without_context o Element.conclude_witness) axioms, [])])]
||> Sign.restore_naming thy''';
in (SOME statement, SOME intro, axioms, thy'''') end;
in ((a_pred, a_intro, a_axioms), (b_pred, b_intro, b_axioms), thy'''') end;
--- a/src/Pure/Isar/skip_proof.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Isar/skip_proof.ML Tue Feb 09 17:06:05 2010 +0100
@@ -39,6 +39,6 @@
else tac args st);
fun prove_global thy xs asms prop tac =
- Drule.standard (prove (ProofContext.init thy) xs asms prop tac);
+ Drule.export_without_context (prove (ProofContext.init thy) xs asms prop tac);
end;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/ML-Systems/bash.ML Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,43 @@
+(* Title: Pure/ML-Systems/bash.ML
+ Author: Makarius
+
+Generic GNU bash processes (no provisions to propagate interrupts, but
+could work via the controlling tty).
+*)
+
+local
+
+fun read_file name =
+ let val is = TextIO.openIn name
+ in Exn.release (Exn.capture TextIO.inputAll is before TextIO.closeIn is) end;
+
+fun write_file name txt =
+ let val os = TextIO.openOut name
+ in Exn.release (Exn.capture TextIO.output (os, txt) before TextIO.closeOut os) end;
+
+in
+
+fun bash_output script =
+ let
+ val script_name = OS.FileSys.tmpName ();
+ val _ = write_file script_name script;
+
+ val output_name = OS.FileSys.tmpName ();
+
+ val status =
+ OS.Process.system ("exec \"$ISABELLE_HOME/lib/scripts/bash\" nogroup " ^
+ script_name ^ " /dev/null " ^ output_name);
+ val rc =
+ (case Posix.Process.fromStatus status of
+ Posix.Process.W_EXITED => 0
+ | Posix.Process.W_EXITSTATUS w => Word8.toInt w
+ | Posix.Process.W_SIGNALED s => 256 + LargeWord.toInt (Posix.Signal.toWord s)
+ | Posix.Process.W_STOPPED s => 512 + LargeWord.toInt (Posix.Signal.toWord s));
+
+ val output = read_file output_name handle IO.Io _ => "";
+ val _ = OS.FileSys.remove script_name handle OS.SysErr _ => ();
+ val _ = OS.FileSys.remove output_name handle OS.SysErr _ => ();
+ in (output, rc) end;
+
+end;
+
--- a/src/Pure/ML-Systems/mosml.ML Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,259 +0,0 @@
-(* Title: Pure/ML-Systems/mosml.ML
- Author: Lawrence C Paulson, Cambridge University Computer Laboratory
- Author: Makarius
-
-Compatibility file for Moscow ML 2.01
-
-NOTE: saving images does not work; may run it interactively as follows:
-
-$ cd Isabelle/src/Pure
-$ isabelle-process RAW_ML_SYSTEM
-> val ml_system = "mosml";
-> use "ML-Systems/mosml.ML";
-> use "ROOT.ML";
-> Session.finish ();
-> cd "../FOL";
-> use "ROOT.ML";
-> Session.finish ();
-> cd "../ZF";
-> use "ROOT.ML";
-*)
-
-(** ML system related **)
-
-val ml_system_fix_ints = false;
-
-fun forget_structure _ = ();
-
-load "Obj";
-load "Word";
-load "Bool";
-load "Int";
-load "Real";
-load "ListPair";
-load "OS";
-load "Process";
-load "FileSys";
-load "IO";
-load "CharVector";
-
-exception Interrupt;
-fun reraise exn = raise exn;
-
-use "ML-Systems/unsynchronized.ML";
-use "General/exn.ML";
-use "ML-Systems/universal.ML";
-use "ML-Systems/thread_dummy.ML";
-use "ML-Systems/multithreading.ML";
-use "ML-Systems/time_limit.ML";
-use "ML-Systems/ml_name_space.ML";
-use "ML-Systems/ml_pretty.ML";
-use "ML-Systems/use_context.ML";
-
-
-(*low-level pointer equality*)
-local val cast : 'a -> int = Obj.magic
-in fun pointer_eq (x:'a, y:'a) = (cast x = cast y) end;
-
-(*proper implementation available?*)
-structure IntInf =
-struct
- fun divMod (x, y) = (x div y, x mod y);
-
- local
- fun log 1 a = a
- | log n a = log (n div 2) (a + 1);
- in
- fun log2 n = if n > 0 then log n 0 else raise Domain;
- end;
-
- open Int;
-end;
-
-structure Substring =
-struct
- open Substring;
- val full = all;
-end;
-
-structure Real =
-struct
- open Real;
- val realFloor = real o floor;
-end;
-
-structure String =
-struct
- fun isSuffix s1 s2 =
- let val n1 = size s1 and n2 = size s2
- in if n1 = n2 then s1 = s2 else n1 <= n2 andalso String.substring (s2, n2 - n1, n1) = s1 end;
- open String;
-end;
-
-structure Time =
-struct
- open Time;
- fun toString t = Time.toString t
- handle Overflow => Real.toString (Time.toReal t); (*workaround Y2004 bug*)
-end;
-
-structure OS =
-struct
- open OS
- structure Process =
- struct
- open Process
- fun sleep _ = raise Fail "OS.Process.sleep undefined"
- end;
- structure FileSys = FileSys
-end;
-
-exception Option = Option.Option;
-
-
-(*limit the printing depth*)
-local
- val depth = ref 10;
-in
- fun get_print_depth () = ! depth;
- fun print_depth n =
- (depth := n;
- Meta.printDepth := n div 2;
- Meta.printLength := n);
-end;
-
-(*dummy implementation*)
-fun toplevel_pp _ _ _ = ();
-
-(*dummy implementation*)
-fun ml_prompts p1 p2 = ();
-
-(*dummy implementation*)
-fun profile (n: int) f x = f x;
-
-(*dummy implementation*)
-fun exception_trace f = f ();
-
-
-
-(** Compiler-independent timing functions **)
-
-load "Timer";
-
-fun start_timing () =
- let
- val timer = Timer.startCPUTimer ();
- val time = Timer.checkCPUTimer timer;
- in (timer, time) end;
-
-fun end_timing (timer, {gc, sys, usr}) =
- let
- open Time; (*...for Time.toString, Time.+ and Time.- *)
- val {gc = gc2, sys = sys2, usr = usr2} = Timer.checkCPUTimer timer;
- val user = usr2 - usr + gc2 - gc;
- val all = user + sys2 - sys;
- val message = "User " ^ toString user ^ " All "^ toString all ^ " secs" handle Time => "";
- in {message = message, user = user, all = all} end;
-
-
-(* SML basis library fixes *)
-
-structure TextIO =
-struct
- fun canInput _ = raise IO.Io {cause = Fail "undefined", function = "canInput", name = ""};
- open TextIO;
- fun inputLine is =
- let val s = TextIO.inputLine is
- in if s = "" then NONE else SOME s end;
- fun getOutstream _ = ();
- structure StreamIO =
- struct
- fun setBufferMode _ = ();
- end
-end;
-
-structure IO =
-struct
- open IO;
- val BLOCK_BUF = ();
-end;
-
-
-(* ML command execution *)
-
-(*Can one redirect 'use' directly to an instream?*)
-fun use_text ({tune_source, ...}: use_context) _ _ txt =
- let
- val tmp_name = FileSys.tmpName ();
- val tmp_file = TextIO.openOut tmp_name;
- in
- TextIO.output (tmp_file, tune_source txt);
- TextIO.closeOut tmp_file;
- use tmp_name;
- FileSys.remove tmp_name
- end;
-
-fun use_file _ _ name = use name;
-
-
-
-(** interrupts **) (*Note: may get into race conditions*)
-
-(* FIXME proper implementation available? *)
-
-fun interruptible f x = f x;
-fun uninterruptible f x = f (fn (g: 'c -> 'd) => g) x;
-
-
-
-(** OS related **)
-
-(*dummy implementation*)
-structure Posix =
-struct
- structure ProcEnv =
- struct
- fun getpid () = 0;
- end;
-end;
-
-local
-
-fun read_file name =
- let val is = TextIO.openIn name
- in Exn.release (Exn.capture TextIO.inputAll is before TextIO.closeIn is) end;
-
-fun write_file name txt =
- let val os = TextIO.openOut name
- in Exn.release (Exn.capture TextIO.output (os, txt) before TextIO.closeOut os) end;
-
-in
-
-fun system_out script =
- let
- val script_name = OS.FileSys.tmpName ();
- val _ = write_file script_name script;
-
- val output_name = OS.FileSys.tmpName ();
-
- val status =
- OS.Process.system ("perl -w \"$ISABELLE_HOME/lib/scripts/system.pl\" nogroup " ^
- script_name ^ " /dev/null " ^ output_name);
- val rc = if status = OS.Process.success then 0 else 1;
-
- val output = read_file output_name handle IO.Io _ => "";
- val _ = OS.FileSys.remove script_name handle OS.SysErr _ => ();
- val _ = OS.FileSys.remove output_name handle OS.SysErr _ => ();
- in (output, rc) end;
-
-end;
-
-val cd = OS.FileSys.chDir;
-val pwd = OS.FileSys.getDir;
-
-val process_id = Int.toString o Posix.ProcEnv.getpid;
-
-fun getenv var =
- (case Process.getEnv var of
- NONE => ""
- | SOME txt => txt);
--- a/src/Pure/ML-Systems/multithreading_polyml.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ML-Systems/multithreading_polyml.ML Tue Feb 09 17:06:05 2010 +0100
@@ -8,7 +8,7 @@
sig
val interruptible: ('a -> 'b) -> 'a -> 'b
val uninterruptible: ((('c -> 'd) -> 'c -> 'd) -> 'a -> 'b) -> 'a -> 'b
- val system_out: string -> string * int
+ val bash_output: string -> string * int
structure TimeLimit: TIME_LIMIT
end;
@@ -156,9 +156,9 @@
end;
-(* system shell processes, with propagation of interrupts *)
+(* GNU bash processes, with propagation of interrupts *)
-fun system_out script = with_attributes no_interrupts (fn orig_atts =>
+fun bash_output script = with_attributes no_interrupts (fn orig_atts =>
let
val script_name = OS.FileSys.tmpName ();
val _ = write_file script_name script;
@@ -180,7 +180,7 @@
val system_thread = Thread.fork (fn () =>
let
val status =
- OS.Process.system ("perl -w \"$ISABELLE_HOME/lib/scripts/system.pl\" group " ^
+ OS.Process.system ("exec \"$ISABELLE_HOME/lib/scripts/bash\" group " ^
script_name ^ " " ^ pid_name ^ " " ^ output_name);
val res =
(case Posix.Process.fromStatus status of
--- a/src/Pure/ML-Systems/polyml_common.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ML-Systems/polyml_common.ML Tue Feb 09 17:06:05 2010 +0100
@@ -6,10 +6,11 @@
exception Interrupt = SML90.Interrupt;
use "General/exn.ML";
+use "ML-Systems/single_assignment_polyml.ML";
use "ML-Systems/multithreading.ML";
use "ML-Systems/time_limit.ML";
use "ML-Systems/timing.ML";
-use "ML-Systems/system_shell.ML";
+use "ML-Systems/bash.ML";
use "ML-Systems/ml_pretty.ML";
use "ML-Systems/use_context.ML";
@@ -131,12 +132,3 @@
val _ = RunCall.run_call1 RuntimeCalls.POLY_SYS_profiler 0;
in Exn.release res end;
-
-(* magic immutability -- for internal use only! *)
-
-fun magic_immutability_mark (r: 'a Unsynchronized.ref) =
- ignore (RunCall.run_call1 RuntimeCalls.POLY_SYS_lockseg r);
-
-fun magic_immutability_test (r: 'a Unsynchronized.ref) =
- Word8.andb (0wx40, RunCall.run_call1 RuntimeCalls.POLY_SYS_get_flags r) = 0w0;
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/ML-Systems/single_assignment.ML Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,33 @@
+(* Title: Pure/ML-Systems/single_assignment.ML
+ Author: Makarius
+
+References with single assignment. Unsynchronized!
+*)
+
+signature SINGLE_ASSIGNMENT =
+sig
+ type 'a saref
+ exception Locked
+ val saref: unit -> 'a saref
+ val savalue: 'a saref -> 'a option
+ val saset: 'a saref * 'a -> unit
+end;
+
+structure SingleAssignment: SINGLE_ASSIGNMENT =
+struct
+
+exception Locked;
+
+abstype 'a saref = SARef of 'a option ref
+with
+
+fun saref () = SARef (ref NONE);
+
+fun savalue (SARef r) = ! r;
+
+fun saset (SARef (r as ref NONE), x) = r := SOME x
+ | saset _ = raise Locked;
+
+end;
+
+end;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/ML-Systems/single_assignment_polyml.ML Tue Feb 09 17:06:05 2010 +0100
@@ -0,0 +1,35 @@
+(* Title: Pure/ML-Systems/single_assignment_polyml.ML
+ Author: Makarius
+
+References with single assignment. Unsynchronized! Emulates
+structure SingleAssignment from Poly/ML 5.4.
+*)
+
+signature SINGLE_ASSIGNMENT =
+sig
+ type 'a saref
+ exception Locked
+ val saref: unit -> 'a saref
+ val savalue: 'a saref -> 'a option
+ val saset: 'a saref * 'a -> unit
+end;
+
+structure SingleAssignment: SINGLE_ASSIGNMENT =
+struct
+
+exception Locked;
+
+abstype 'a saref = SARef of 'a option ref
+with
+
+fun saref () = SARef (ref NONE);
+
+fun savalue (SARef r) = ! r;
+
+fun saset (SARef (r as ref NONE), x) =
+ (r := SOME x; RunCall.run_call1 RuntimeCalls.POLY_SYS_lockseg r)
+ | saset _ = raise Locked;
+
+end;
+
+end;
--- a/src/Pure/ML-Systems/smlnj.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ML-Systems/smlnj.ML Tue Feb 09 17:06:05 2010 +0100
@@ -10,11 +10,12 @@
use "ML-Systems/unsynchronized.ML";
use "ML-Systems/overloading_smlnj.ML";
use "General/exn.ML";
+use "ML-Systems/single_assignment.ML";
use "ML-Systems/universal.ML";
use "ML-Systems/thread_dummy.ML";
use "ML-Systems/multithreading.ML";
use "ML-Systems/timing.ML";
-use "ML-Systems/system_shell.ML";
+use "ML-Systems/bash.ML";
use "ML-Systems/ml_name_space.ML";
use "ML-Systems/ml_pretty.ML";
use "ML-Systems/use_context.ML";
@@ -66,10 +67,6 @@
(Control.primaryPrompt := p1; Control.secondaryPrompt := p2);
(*dummy implementation*)
-fun magic_immutability_test _ = false;
-fun magic_immutability_mark _ = ();
-
-(*dummy implementation*)
fun profile (n: int) f x = f x;
(*dummy implementation*)
@@ -179,7 +176,7 @@
(* system command execution *)
-val system_out = (fn (output, rc) => (output, mk_int rc)) o system_out;
+val bash_output = (fn (output, rc) => (output, mk_int rc)) o bash_output;
fun process_id pid =
Word.fmt StringCvt.DEC (Word.fromLargeWord (Posix.Process.pidToWord (Posix.ProcEnv.getpid ())));
--- a/src/Pure/ML-Systems/system_shell.ML Tue Feb 09 16:07:51 2010 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-(* Title: Pure/ML-Systems/system_shell.ML
- Author: Makarius
-
-Generic system shell processes (no provisions to propagate interrupts;
-might still work via the controlling tty).
-*)
-
-local
-
-fun read_file name =
- let val is = TextIO.openIn name
- in Exn.release (Exn.capture TextIO.inputAll is before TextIO.closeIn is) end;
-
-fun write_file name txt =
- let val os = TextIO.openOut name
- in Exn.release (Exn.capture TextIO.output (os, txt) before TextIO.closeOut os) end;
-
-in
-
-fun system_out script =
- let
- val script_name = OS.FileSys.tmpName ();
- val _ = write_file script_name script;
-
- val output_name = OS.FileSys.tmpName ();
-
- val status =
- OS.Process.system ("perl -w \"$ISABELLE_HOME/lib/scripts/system.pl\" nogroup " ^
- script_name ^ " /dev/null " ^ output_name);
- val rc =
- (case Posix.Process.fromStatus status of
- Posix.Process.W_EXITED => 0
- | Posix.Process.W_EXITSTATUS w => Word8.toInt w
- | Posix.Process.W_SIGNALED s => 256 + LargeWord.toInt (Posix.Signal.toWord s)
- | Posix.Process.W_STOPPED s => 512 + LargeWord.toInt (Posix.Signal.toWord s));
-
- val output = read_file output_name handle IO.Io _ => "";
- val _ = OS.FileSys.remove script_name handle OS.SysErr _ => ();
- val _ = OS.FileSys.remove output_name handle OS.SysErr _ => ();
- in (output, rc) end;
-
-end;
-
--- a/src/Pure/ML/ml_antiquote.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ML/ml_antiquote.ML Tue Feb 09 17:06:05 2010 +0100
@@ -38,17 +38,17 @@
fun macro name scan = ML_Context.add_antiq name
(fn _ => scan :|-- (fn ctxt => Scan.depend (fn _ => Scan.succeed
- (Context.Proof ctxt, fn {background, ...} => (K ("", ""), background)))));
+ (Context.Proof ctxt, fn background => (K ("", ""), background)))));
fun inline name scan = ML_Context.add_antiq name
- (fn _ => scan >> (fn s => fn {background, ...} => (K ("", s), background)));
+ (fn _ => scan >> (fn s => fn background => (K ("", s), background)));
fun declaration kind name scan = ML_Context.add_antiq name
- (fn _ => scan >> (fn s => fn {struct_name, background} =>
+ (fn _ => scan >> (fn s => fn background =>
let
val (a, background') = variant name background;
val env = kind ^ " " ^ a ^ " = " ^ s ^ ";\n";
- val body = struct_name ^ "." ^ a;
+ val body = "Isabelle." ^ a;
in (K (env, body), background') end));
val value = declaration "val";
--- a/src/Pure/ML/ml_context.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ML/ml_context.ML Tue Feb 09 17:06:05 2010 +0100
@@ -22,9 +22,7 @@
val stored_thms: thm list Unsynchronized.ref
val ml_store_thm: string * thm -> unit
val ml_store_thms: string * thm list -> unit
- type antiq =
- {struct_name: string, background: Proof.context} ->
- (Proof.context -> string * string) * Proof.context
+ type antiq = Proof.context -> (Proof.context -> string * string) * Proof.context
val add_antiq: string -> (Position.T -> antiq context_parser) -> unit
val trace: bool Unsynchronized.ref
val eval_antiquotes: ML_Lex.token Antiquote.antiquote list * Position.T ->
@@ -72,8 +70,8 @@
val ml_store_thms = ml_store "";
fun ml_store_thm (name, th) = ml_store "hd" (name, [th]);
-fun bind_thm (name, thm) = ml_store_thm (name, Drule.standard thm);
-fun bind_thms (name, thms) = ml_store_thms (name, map Drule.standard thms);
+fun bind_thm (name, thm) = ml_store_thm (name, Drule.export_without_context thm);
+fun bind_thms (name, thms) = ml_store_thms (name, map Drule.export_without_context thms);
fun thm name = ProofContext.get_thm (the_local_context ()) name;
fun thms name = ProofContext.get_thms (the_local_context ()) name;
@@ -84,9 +82,7 @@
(* antiquotation commands *)
-type antiq =
- {struct_name: string, background: Proof.context} ->
- (Proof.context -> string * string) * Proof.context;
+type antiq = Proof.context -> (Proof.context -> string * string) * Proof.context;
local
@@ -123,8 +119,7 @@
P.!!! (P.position P.xname -- Args.parse --| Scan.ahead P.eof)
>> (fn ((x, pos), y) => Args.src ((x, y), pos));
-val struct_name = "Isabelle";
-val begin_env = ML_Lex.tokenize ("structure " ^ struct_name ^ " =\nstruct\n");
+val begin_env = ML_Lex.tokenize "structure Isabelle =\nstruct\n";
val end_env = ML_Lex.tokenize "end;";
val reset_env = ML_Lex.tokenize "structure Isabelle = struct end";
@@ -151,7 +146,7 @@
let
val context = Stack.top scope;
val (f, context') = antiquotation (T.read_antiq lex antiq (ss, #1 range)) context;
- val (decl, background') = f {background = background, struct_name = struct_name};
+ val (decl, background') = f background;
val decl' = decl #> pairself (ML_Lex.tokenize #> map (ML_Lex.set_range range));
in (decl', (Stack.map_top (K context') scope, background')) end
| expand (Antiquote.Open _) (scope, background) =
--- a/src/Pure/ML/ml_thms.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ML/ml_thms.ML Tue Feb 09 17:06:05 2010 +0100
@@ -34,12 +34,12 @@
(* fact references *)
fun thm_antiq kind scan = ML_Context.add_antiq kind
- (fn _ => scan >> (fn ths => fn {struct_name, background} =>
+ (fn _ => scan >> (fn ths => fn background =>
let
val i = serial ();
val (a, background') = background
|> ML_Antiquote.variant kind ||> put_thms (i, ths);
- val ml = (thm_bind kind a i, struct_name ^ "." ^ a);
+ val ml = (thm_bind kind a i, "Isabelle." ^ a);
in (K ml, background') end));
val _ = thm_antiq "thm" (Attrib.thm >> single);
@@ -56,7 +56,7 @@
(fn _ => Args.context -- Args.mode "open" --
Scan.lift (OuterParse.and_list1 (Scan.repeat1 goal) --
(by |-- Method.parse -- Scan.option Method.parse)) >>
- (fn ((ctxt, is_open), (raw_propss, methods)) => fn {struct_name, background} =>
+ (fn ((ctxt, is_open), (raw_propss, methods)) => fn background =>
let
val propss = burrow (map (rpair []) o Syntax.read_props ctxt) raw_propss;
val i = serial ();
@@ -69,8 +69,7 @@
val (a, background') = background
|> ML_Antiquote.variant "lemma" ||> put_thms (i, the_thms ctxt' i);
val ml =
- (thm_bind (if length (flat propss) = 1 then "thm" else "thms") a i,
- struct_name ^ "." ^ a);
+ (thm_bind (if length (flat propss) = 1 then "thm" else "thms") a i, "Isabelle." ^ a);
in (K ml, background') end));
end;
--- a/src/Pure/ROOT.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/ROOT.ML Tue Feb 09 17:06:05 2010 +0100
@@ -57,6 +57,10 @@
use "Concurrent/simple_thread.ML";
+use "Concurrent/single_assignment.ML";
+if Multithreading.available then ()
+else use "Concurrent/single_assignment_sequential.ML";
+
use "Concurrent/synchronized.ML";
if Multithreading.available then ()
else use "Concurrent/synchronized_sequential.ML";
--- a/src/Pure/System/isabelle_system.scala Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/System/isabelle_system.scala Tue Feb 09 17:06:05 2010 +0100
@@ -162,7 +162,7 @@
/** system tools **/
- def system_out(script: String): (String, Int) =
+ def bash_output(script: String): (String, Int) =
{
Standard_System.with_tmp_file("isabelle_script") { script_file =>
Standard_System.with_tmp_file("isabelle_pid") { pid_file =>
@@ -170,8 +170,7 @@
Standard_System.write_file(script_file, script)
- val proc = execute(true, "perl", "-w",
- expand_path("$ISABELLE_HOME/lib/scripts/system.pl"), "group",
+ val proc = execute(true, expand_path("$ISABELLE_HOME/lib/scripts/bash"), "group",
script_file.getPath, pid_file.getPath, output_file.getPath)
def kill(strict: Boolean) =
@@ -310,20 +309,29 @@
val font_family = "IsabelleText"
- private def check_font(): Boolean =
- new Font(font_family, Font.PLAIN, 1).getFamily == font_family
-
- private def create_font(name: String) =
- Font.createFont(Font.TRUETYPE_FONT, platform_file(name))
+ def get_font(bold: Boolean): Font =
+ new Font(font_family, if (bold) Font.BOLD else Font.PLAIN, 1)
def install_fonts()
{
+ def create_font(bold: Boolean): Font =
+ {
+ val name =
+ if (bold) "$ISABELLE_HOME/lib/fonts/IsabelleTextBold.ttf"
+ else "$ISABELLE_HOME/lib/fonts/IsabelleText.ttf"
+ Font.createFont(Font.TRUETYPE_FONT, platform_file(name))
+ }
+ def check_font() = get_font(false).getFamily == font_family
+
if (!check_font()) {
+ val font = create_font(false)
+ val bold_font = create_font(true)
+
val ge = GraphicsEnvironment.getLocalGraphicsEnvironment()
- ge.registerFont(create_font("$ISABELLE_HOME/lib/fonts/IsabelleText.ttf"))
- ge.registerFont(create_font("$ISABELLE_HOME/lib/fonts/IsabelleTextBold.ttf"))
- if (!check_font())
- error("Failed to install IsabelleText fonts")
+ ge.registerFont(font)
+ // workaround strange problem with Apple's Java 1.6 font manager
+ if (bold_font.getFamily == font_family) ge.registerFont(bold_font)
+ if (!check_font()) error("Failed to install IsabelleText fonts")
}
}
}
--- a/src/Pure/System/platform.scala Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/System/platform.scala Tue Feb 09 17:06:05 2010 +0100
@@ -55,15 +55,15 @@
/* Swing look-and-feel */
+ private def find_laf(name: String): Option[String] =
+ UIManager.getInstalledLookAndFeels().find(_.getName == name).map(_.getClassName)
+
def look_and_feel(): String =
{
if (is_windows || is_macos) UIManager.getSystemLookAndFeelClassName()
- else {
- UIManager.getInstalledLookAndFeels().find(laf => laf.getName == "Nimbus") match {
- case None => UIManager.getCrossPlatformLookAndFeelClassName()
- case Some(laf) => laf.getClassName
- }
- }
+ else
+ find_laf("Nimbus") orElse find_laf("GTK+") getOrElse
+ UIManager.getCrossPlatformLookAndFeelClassName()
}
}
--- a/src/Pure/System/session.scala Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/System/session.scala Tue Feb 09 17:06:05 2010 +0100
@@ -122,14 +122,13 @@
// global status message
result.body match {
- // document state assigment
+ // document state assignment
case List(XML.Elem(Markup.ASSIGN, _, edits)) if target_id.isDefined =>
documents.get(target_id.get) match {
case Some(doc) =>
val states =
for {
- XML.Elem(Markup.EDIT, (Markup.ID, cmd_id) :: (Markup.STATE, state_id) :: _, _)
- <- edits
+ XML.Elem(Markup.EDIT, (Markup.ID, cmd_id) :: (Markup.STATE, state_id) :: _, _) <- edits
cmd <- lookup_command(cmd_id)
} yield {
val st = cmd.assign_state(state_id)
--- a/src/Pure/Thy/completion.scala Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Thy/completion.scala Tue Feb 09 17:06:05 2010 +0100
@@ -88,12 +88,11 @@
abbrevs_lex.parse(abbrevs_lex.keyword, new Library.Reverse(line)) match {
case abbrevs_lex.Success(rev_a, _) =>
val (word, c) = abbrevs_map(rev_a)
- if (word == c) None
- else Some(word, List(c))
+ Some(word, List(c))
case _ =>
Completion.Parse.read(line) match {
case Some(word) =>
- words_lex.completions(word).map(words_map(_)).filter(_ != word) match {
+ words_lex.completions(word).map(words_map(_)) match {
case Nil => None
case cs => Some(word, cs.sort(_ < _))
}
--- a/src/Pure/Thy/present.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/Thy/present.ML Tue Feb 09 17:06:05 2010 +0100
@@ -328,7 +328,7 @@
\-n '" ^ name ^ "' -t '" ^ tags ^ "' " ^ File.shell_path path ^ " 2>&1";
val doc_path = Path.append result_path (Path.ext format (Path.basic name));
val _ = if verbose then writeln s else ();
- val (out, rc) = system_out s;
+ val (out, rc) = bash_output s;
val _ =
if not (File.exists doc_path) orelse rc <> 0 then
cat_error out ("Failed to build document " ^ quote (show_path doc_path))
--- a/src/Pure/axclass.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/axclass.ML Tue Feb 09 17:06:05 2010 +0100
@@ -484,10 +484,10 @@
def_thy
|> Sign.mandatory_path (Binding.name_of bconst)
|> PureThy.note_thmss ""
- [((Binding.name introN, []), [([Drule.standard raw_intro], [])]),
- ((Binding.name superN, []), [(map Drule.standard raw_classrel, [])]),
+ [((Binding.name introN, []), [([Drule.export_without_context raw_intro], [])]),
+ ((Binding.name superN, []), [(map Drule.export_without_context raw_classrel, [])]),
((Binding.name axiomsN, []),
- [(map (fn th => Drule.standard (class_triv RS th)) raw_axioms, [])])]
+ [(map (fn th => Drule.export_without_context (class_triv RS th)) raw_axioms, [])])]
||> Sign.restore_naming def_thy;
--- a/src/Pure/drule.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/drule.ML Tue Feb 09 17:06:05 2010 +0100
@@ -75,8 +75,8 @@
val beta_conv: cterm -> cterm -> cterm
val types_sorts: thm -> (indexname-> typ option) * (indexname-> sort option)
val flexflex_unique: thm -> thm
- val standard: thm -> thm
- val standard': thm -> thm
+ val export_without_context: thm -> thm
+ val export_without_context_open: thm -> thm
val get_def: theory -> xstring -> thm
val store_thm: binding -> thm -> thm
val store_standard_thm: binding -> thm -> thm
@@ -303,9 +303,9 @@
| _ => raise THM("flexflex_unique: multiple unifiers", 0, [th]);
-(* legacy standard operations *)
+(* old-style export without context *)
-val standard' =
+val export_without_context_open =
implies_intr_hyps
#> forall_intr_frees
#> `Thm.maxidx_of
@@ -315,9 +315,9 @@
#> zero_var_indexes
#> Thm.varifyT);
-val standard =
+val export_without_context =
flexflex_unique
- #> standard'
+ #> export_without_context_open
#> Thm.close_derivation;
@@ -459,8 +459,8 @@
fun store_thm_open name th =
Context.>>> (Context.map_theory_result (PureThy.store_thm_open (name, th)));
-fun store_standard_thm name th = store_thm name (standard th);
-fun store_standard_thm_open name thm = store_thm_open name (standard' thm);
+fun store_standard_thm name th = store_thm name (export_without_context th);
+fun store_standard_thm_open name thm = store_thm_open name (export_without_context_open thm);
val reflexive_thm =
let val cx = certify (Var(("x",0),TVar(("'a",0),[])))
@@ -708,12 +708,12 @@
val protect = Thm.capply (certify Logic.protectC);
val protectI =
- store_thm (Binding.conceal (Binding.name "protectI"))
- (standard (Thm.equal_elim (Thm.symmetric prop_def) (Thm.assume A)));
+ store_standard_thm (Binding.conceal (Binding.name "protectI"))
+ (Thm.equal_elim (Thm.symmetric prop_def) (Thm.assume A));
val protectD =
- store_thm (Binding.conceal (Binding.name "protectD"))
- (standard (Thm.equal_elim prop_def (Thm.assume (protect A))));
+ store_standard_thm (Binding.conceal (Binding.name "protectD"))
+ (Thm.equal_elim prop_def (Thm.assume (protect A)));
val protect_cong =
store_standard_thm_open (Binding.name "protect_cong") (Thm.reflexive (protect A));
@@ -730,8 +730,8 @@
(* term *)
val termI =
- store_thm (Binding.conceal (Binding.name "termI"))
- (standard (Thm.equal_elim (Thm.symmetric term_def) (Thm.forall_intr A (Thm.trivial A))));
+ store_standard_thm (Binding.conceal (Binding.name "termI"))
+ (Thm.equal_elim (Thm.symmetric term_def) (Thm.forall_intr A (Thm.trivial A)));
fun mk_term ct =
let
@@ -759,15 +759,14 @@
(* sort_constraint *)
val sort_constraintI =
- store_thm (Binding.conceal (Binding.name "sort_constraintI"))
- (standard (Thm.equal_elim (Thm.symmetric sort_constraint_def) (mk_term T)));
+ store_standard_thm (Binding.conceal (Binding.name "sort_constraintI"))
+ (Thm.equal_elim (Thm.symmetric sort_constraint_def) (mk_term T));
val sort_constraint_eq =
- store_thm (Binding.conceal (Binding.name "sort_constraint_eq"))
- (standard
- (Thm.equal_intr
- (Thm.implies_intr CA (Thm.implies_elim (Thm.assume CA) (Thm.unvarify sort_constraintI)))
- (implies_intr_list [A, C] (Thm.assume A))));
+ store_standard_thm (Binding.conceal (Binding.name "sort_constraint_eq"))
+ (Thm.equal_intr
+ (Thm.implies_intr CA (Thm.implies_elim (Thm.assume CA) (Thm.unvarify sort_constraintI)))
+ (implies_intr_list [A, C] (Thm.assume A)));
end;
@@ -983,5 +982,5 @@
end;
-structure BasicDrule: BASIC_DRULE = Drule;
-open BasicDrule;
+structure Basic_Drule: BASIC_DRULE = Drule;
+open Basic_Drule;
--- a/src/Pure/goal.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/goal.ML Tue Feb 09 17:06:05 2010 +0100
@@ -210,7 +210,7 @@
fun prove ctxt xs asms prop tac = hd (prove_common true ctxt xs asms [prop] tac);
fun prove_global thy xs asms prop tac =
- Drule.standard (prove (ProofContext.init thy) xs asms prop tac);
+ Drule.export_without_context (prove (ProofContext.init thy) xs asms prop tac);
--- a/src/Pure/old_goals.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/old_goals.ML Tue Feb 09 17:06:05 2010 +0100
@@ -305,7 +305,7 @@
val th = Goal.conclude ath
val thy' = Thm.theory_of_thm th
val {hyps, prop, ...} = Thm.rep_thm th
- val final_th = Drule.standard th
+ val final_th = Drule.export_without_context th
in if not check then final_th
else if not (Theory.eq_thy(thy,thy')) then !result_error_fn state
("Theory of proof state has changed!" ^
@@ -512,7 +512,7 @@
0 => thm
| i => !result_error_fn thm (string_of_int i ^ " unsolved goals!"))
in
- Drule.standard (implies_intr_list As
+ Drule.export_without_context (implies_intr_list As
(check (Seq.pull (EVERY (f asms) (trivial B)))))
end;
--- a/src/Pure/term.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/term.ML Tue Feb 09 17:06:05 2010 +0100
@@ -12,10 +12,10 @@
signature BASIC_TERM =
sig
- eqtype indexname
- eqtype class
- eqtype sort
- eqtype arity
+ type indexname = string * int
+ type class = string
+ type sort = class list
+ type arity = string * sort list * sort
datatype typ =
Type of string * typ list |
TFree of string * sort |
--- a/src/Pure/type_infer.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Pure/type_infer.ML Tue Feb 09 17:06:05 2010 +0100
@@ -284,7 +284,7 @@
| meets _ tye_idx = tye_idx;
- (* occurs check and assigment *)
+ (* occurs check and assignment *)
fun occurs_check tye i (Param (i', S)) =
if i = i' then raise NO_UNIFIER ("Occurs check!", tye)
--- a/src/Sequents/ILL_predlog.thy Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Sequents/ILL_predlog.thy Tue Feb 09 17:06:05 2010 +0100
@@ -22,8 +22,8 @@
"[* A & B *]" == "[*A*] && [*B*]"
"[* A | B *]" == "![*A*] ++ ![*B*]"
- "[* - A *]" == "[*A > ff*]"
- "[* ff *]" == "0"
+ "[* - A *]" == "[*A > CONST ff*]"
+ "[* XCONST ff *]" == "0"
"[* A = B *]" => "[* (A > B) & (B > A) *]"
"[* A > B *]" == "![*A*] -o [*B*]"
--- a/src/Sequents/simpdata.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Sequents/simpdata.ML Tue Feb 09 17:06:05 2010 +0100
@@ -49,9 +49,9 @@
(*Congruence rules for = or <-> (instead of ==)*)
fun mk_meta_cong rl =
- Drule.standard(mk_meta_eq (mk_meta_prems rl))
- handle THM _ =>
- error("Premises and conclusion of congruence rules must use =-equality or <->");
+ Drule.export_without_context(mk_meta_eq (mk_meta_prems rl))
+ handle THM _ =>
+ error("Premises and conclusion of congruence rules must use =-equality or <->");
(*** Standard simpsets ***)
--- a/src/Tools/Code/code_eval.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Tools/Code/code_eval.ML Tue Feb 09 17:06:05 2010 +0100
@@ -107,25 +107,25 @@
val _ = map2 check_base constrs constrs'';
in "datatype " ^ tyco'' ^ " = datatype " ^ Long_Name.append all_struct_name tyco'' end;
-fun print_code struct_name is_first print_it ctxt =
+fun print_code is_first print_it ctxt =
let
val (_, (_, (struct_code_name, acc_code))) = CodeAntiqData.get ctxt;
val (ml_code, (tycos_map, consts_map)) = Lazy.force acc_code;
val ml_code = if is_first then ml_code
else "";
- val all_struct_name = Long_Name.append struct_name struct_code_name;
+ val all_struct_name = "Isabelle." ^ struct_code_name;
in (ml_code, print_it all_struct_name tycos_map consts_map) end;
in
-fun ml_code_antiq raw_const {struct_name, background} =
+fun ml_code_antiq raw_const background =
let
val const = Code.check_const (ProofContext.theory_of background) raw_const;
val is_first = is_first_occ background;
val background' = register_const const background;
- in (print_code struct_name is_first (print_const const), background') end;
+ in (print_code is_first (print_const const), background') end;
-fun ml_code_datatype_antiq (raw_tyco, raw_constrs) {struct_name, background} =
+fun ml_code_datatype_antiq (raw_tyco, raw_constrs) background =
let
val thy = ProofContext.theory_of background;
val tyco = Sign.intern_type thy raw_tyco;
@@ -135,7 +135,7 @@
else error ("Type " ^ quote tyco ^ ": given constructors diverge from real constructors")
val is_first = is_first_occ background;
val background' = register_datatype tyco constrs background;
- in (print_code struct_name is_first (print_datatype tyco constrs), background') end;
+ in (print_code is_first (print_datatype tyco constrs), background') end;
end; (*local*)
--- a/src/Tools/Compute_Oracle/am_ghc.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Tools/Compute_Oracle/am_ghc.ML Tue Feb 09 17:06:05 2010 +0100
@@ -228,7 +228,7 @@
val module_file = tmp_file (module^".hs")
val object_file = tmp_file (module^".o")
val _ = writeTextFile module_file source
- val _ = system ((!ghc)^" -c "^module_file)
+ val _ = bash ((!ghc)^" -c "^module_file)
val _ = if not (fileExists object_file) then raise Compile ("Failure compiling haskell code (GHC_PATH = '"^(!ghc)^"')") else ()
in
(guid, module_file, arity)
@@ -309,7 +309,7 @@
val term = print_term arity_of 0 t
val call_source = "module "^call^" where\n\nimport "^module^"\n\ncall = "^module^".calc \""^result_file^"\" ("^term^")"
val _ = writeTextFile call_file call_source
- val _ = system ((!ghc)^" -e \""^call^".call\" "^module_file^" "^call_file)
+ val _ = bash ((!ghc)^" -e \""^call^".call\" "^module_file^" "^call_file)
val result = readResultFile result_file handle IO.Io _ => raise Run ("Failure running haskell compiler (GHC_PATH = '"^(!ghc)^"')")
val t' = parse_result arity_of result
val _ = OS.FileSys.remove call_file
--- a/src/Tools/jEdit/README_BUILD Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Tools/jEdit/README_BUILD Tue Feb 09 17:06:05 2010 +0100
@@ -2,10 +2,10 @@
Requirements to build from sources
==================================
-* Proper Java JRE/JDK from Sun, e.g. 1.6.0_17
+* Proper Java JRE/JDK from Sun, e.g. 1.6.0_18
http://java.sun.com/javase/downloads/index.jsp
-* Netbeans 6.7
+* Netbeans 6.7.1
http://www.netbeans.org/downloads/index.html
* Scala for Netbeans: version 6.7v1 for NB 6.7
@@ -13,7 +13,7 @@
http://blogtrader.net/dcaoyuan/category/NetBeans
http://wiki.netbeans.org/Scala
-* jEdit 4.3 (final)
+* jEdit 4.3.1 (final)
http://www.jedit.org/
Netbeans Project "jEdit": install official sources as ./contrib/jEdit/.
--- a/src/Tools/jEdit/src/jedit/isabelle_sidekick.scala Tue Feb 09 16:07:51 2010 +0100
+++ b/src/Tools/jEdit/src/jedit/isabelle_sidekick.scala Tue Feb 09 17:06:05 2010 +0100
@@ -88,11 +88,11 @@
case None => null
case Some((word, cs)) =>
val ds =
- if (Isabelle_Encoding.is_active(buffer))
+ (if (Isabelle_Encoding.is_active(buffer))
cs.map(Isabelle.system.symbols.decode(_)).sort(_ < _)
- else cs
- new SideKickCompletion(pane.getView, word, ds.toArray.asInstanceOf[Array[Object]]) { }
+ else cs).filter(_ != word)
+ if (ds.isEmpty) null
+ else new SideKickCompletion(pane.getView, word, ds.toArray.asInstanceOf[Array[Object]]) { }
}
}
-
}
--- a/src/ZF/Tools/datatype_package.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/ZF/Tools/datatype_package.ML Tue Feb 09 17:06:05 2010 +0100
@@ -292,7 +292,7 @@
rtac case_trans 1,
REPEAT (resolve_tac [refl, split_trans, Su.case_inl RS trans, Su.case_inr RS trans] 1)]);
- val free_iffs = map Drule.standard (con_defs RL [@{thm def_swap_iff}]);
+ val free_iffs = map Drule.export_without_context (con_defs RL [@{thm def_swap_iff}]);
val case_eqns = map prove_case_eqn (flat con_ty_lists ~~ case_args ~~ tl con_defs);
@@ -338,7 +338,7 @@
val constructors =
map (head_of o #1 o Logic.dest_equals o #prop o rep_thm) (tl con_defs);
- val free_SEs = map Drule.standard (Ind_Syntax.mk_free_SEs free_iffs);
+ val free_SEs = map Drule.export_without_context (Ind_Syntax.mk_free_SEs free_iffs);
val {intrs, elim, induct, mutual_induct, ...} = ind_result
--- a/src/ZF/Tools/inductive_package.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/ZF/Tools/inductive_package.ML Tue Feb 09 17:06:05 2010 +0100
@@ -193,9 +193,9 @@
[rtac (@{thm Collect_subset} RS @{thm bnd_monoI}) 1,
REPEAT (ares_tac (@{thms basic_monos} @ monos) 1)]);
- val dom_subset = Drule.standard (big_rec_def RS Fp.subs);
+ val dom_subset = Drule.export_without_context (big_rec_def RS Fp.subs);
- val unfold = Drule.standard ([big_rec_def, bnd_mono] MRS Fp.Tarski);
+ val unfold = Drule.export_without_context ([big_rec_def, bnd_mono] MRS Fp.Tarski);
(********)
val dummy = writeln " Proving the introduction rules...";
@@ -205,7 +205,7 @@
val Part_trans =
case rec_names of
[_] => asm_rl
- | _ => Drule.standard (@{thm Part_subset} RS @{thm subset_trans});
+ | _ => Drule.export_without_context (@{thm Part_subset} RS @{thm subset_trans});
(*To type-check recursive occurrences of the inductive sets, possibly
enclosed in some monotonic operator M.*)
@@ -272,7 +272,7 @@
rule_by_tactic
(basic_elim_tac THEN ALLGOALS (asm_full_simp_tac ss) THEN basic_elim_tac)
(Thm.assume A RS elim)
- |> Drule.standard';
+ |> Drule.export_without_context_open;
fun induction_rules raw_induct thy =
let
@@ -503,7 +503,7 @@
val Const (@{const_name Trueprop}, _) $ (pred_var $ _) = concl_of induct0
val induct = CP.split_rule_var(pred_var, elem_type-->FOLogic.oT, induct0)
- |> Drule.standard
+ |> Drule.export_without_context
and mutual_induct = CP.remove_split mutual_induct_fsplit
val ([induct', mutual_induct'], thy') =
@@ -514,7 +514,7 @@
in ((thy', induct'), mutual_induct')
end; (*of induction_rules*)
- val raw_induct = Drule.standard ([big_rec_def, bnd_mono] MRS Fp.induct)
+ val raw_induct = Drule.export_without_context ([big_rec_def, bnd_mono] MRS Fp.induct)
val ((thy2, induct), mutual_induct) =
if not coind then induction_rules raw_induct thy1
--- a/src/ZF/ind_syntax.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/ZF/ind_syntax.ML Tue Feb 09 17:06:05 2010 +0100
@@ -114,7 +114,7 @@
| tryres (th, []) = raise THM("tryres", 0, [th]);
fun gen_make_elim elim_rls rl =
- Drule.standard (tryres (rl, elim_rls @ [revcut_rl]));
+ Drule.export_without_context (tryres (rl, elim_rls @ [revcut_rl]));
(*Turns iff rules into safe elimination rules*)
fun mk_free_SEs iffs = map (gen_make_elim [@{thm conjE}, @{thm FalseE}]) (iffs RL [@{thm iffD1}]);
--- a/src/ZF/int_arith.ML Tue Feb 09 16:07:51 2010 +0100
+++ b/src/ZF/int_arith.ML Tue Feb 09 17:06:05 2010 +0100
@@ -110,9 +110,8 @@
(*To let us treat subtraction as addition*)
val diff_simps = [@{thm zdiff_def}, @{thm zminus_zadd_distrib}, @{thm zminus_zminus}];
-(*push the unary minus down: - x * y = x * - y *)
-val int_minus_mult_eq_1_to_2 =
- [@{thm zmult_zminus}, @{thm zmult_zminus_right} RS sym] MRS trans |> Drule.standard;
+(*push the unary minus down*)
+val int_minus_mult_eq_1_to_2 = @{lemma "$- w $* z = w $* $- z" by simp};
(*to extract again any uncancelled minuses*)
val int_minus_from_mult_simps =