--- a/Admin/isatest/isatest-stats Thu Feb 26 10:13:43 2009 +0100
+++ b/Admin/isatest/isatest-stats Fri Feb 27 18:50:35 2009 +0100
@@ -16,6 +16,7 @@
HOL-Algebra \
HOL-Auth \
HOL-Bali \
+ HOL-Decision_Procs \
HOL-Extraction \
HOL-Hoare \
HOL-HoareParallel \
--- a/Admin/isatest/settings/sun-poly Thu Feb 26 10:13:43 2009 +0100
+++ b/Admin/isatest/settings/sun-poly Fri Feb 27 18:50:35 2009 +0100
@@ -4,7 +4,7 @@
ML_SYSTEM="polyml-5.1"
ML_PLATFORM="sparc-solaris"
ML_HOME="$POLYML_HOME/$ML_PLATFORM"
- ML_OPTIONS="-H 1500"
+ ML_OPTIONS="-H 800"
ISABELLE_HOME_USER=/tmp/isabelle-sun-poly
--- a/Admin/makedist Thu Feb 26 10:13:43 2009 +0100
+++ b/Admin/makedist Fri Feb 27 18:50:35 2009 +0100
@@ -4,7 +4,7 @@
## global settings
-REPOS="https://isabelle.in.tum.de/repos/isabelle"
+REPOS="http://isabelle.in.tum.de/repos/isabelle"
DISTPREFIX=${DISTPREFIX:-~/tmp/isadist}
@@ -156,7 +156,7 @@
rm doc/codegen_process.pdf
rm -rf doc-src
-mkdir contrib
+mkdir -p contrib
cp doc/isabelle*.eps lib/logo
--- a/NEWS Thu Feb 26 10:13:43 2009 +0100
+++ b/NEWS Fri Feb 27 18:50:35 2009 +0100
@@ -6,6 +6,10 @@
*** General ***
+* The main reference manuals (isar-ref, implementation, system) have
+been updated and extended. Formally checked references as hyperlinks
+are now available in uniform manner.
+
* Simplified main Isabelle executables, with less surprises on
case-insensitive file-systems (such as Mac OS).
@@ -47,9 +51,6 @@
regular 4-core machine, if the initial heap space is made reasonably
large (cf. Poly/ML option -H). [Poly/ML 5.2.1 or later]
-* The Isabelle System Manual (system) has been updated, with formally
-checked references as hyperlinks.
-
* Generalized Isar history, with support for linear undo, direct state
addressing etc.
@@ -111,30 +112,32 @@
unify_trace_bound = 50 (formerly 25)
unify_search_bound = 60 (formerly 30)
-* Different bookkeeping for code equations:
- a) On theory merge, the last set of code equations for a particular constant
- is taken (in accordance with the policy applied by other parts of the
- code generator framework).
- b) Code equations stemming from explicit declarations (e.g. code attribute)
- gain priority over default code equations stemming from definition, primrec,
- fun etc.
- INCOMPATIBILITY.
-
-* Global versions of theorems stemming from classes do not carry
-a parameter prefix any longer. INCOMPATIBILITY.
+* Different bookkeeping for code equations (INCOMPATIBILITY):
+
+ a) On theory merge, the last set of code equations for a particular
+ constant is taken (in accordance with the policy applied by other
+ parts of the code generator framework).
+
+ b) Code equations stemming from explicit declarations (e.g. code
+ attribute) gain priority over default code equations stemming
+ from definition, primrec, fun etc.
+
+* Global versions of theorems stemming from classes do not carry a
+parameter prefix any longer. INCOMPATIBILITY.
* Dropped locale element "includes". This is a major INCOMPATIBILITY.
In existing theorem specifications replace the includes element by the
-respective context elements of the included locale, omitting those that
-are already present in the theorem specification. Multiple assume
-elements of a locale should be replaced by a single one involving the
-locale predicate. In the proof body, declarations (most notably
-theorems) may be regained by interpreting the respective locales in the
-proof context as required (command "interpret").
+respective context elements of the included locale, omitting those
+that are already present in the theorem specification. Multiple
+assume elements of a locale should be replaced by a single one
+involving the locale predicate. In the proof body, declarations (most
+notably theorems) may be regained by interpreting the respective
+locales in the proof context as required (command "interpret").
+
If using "includes" in replacement of a target solely because the
parameter types in the theorem are not as general as in the target,
-consider declaring a new locale with additional type constraints on the
-parameters (context element "constrains").
+consider declaring a new locale with additional type constraints on
+the parameters (context element "constrains").
* Dropped "locale (open)". INCOMPATIBILITY.
@@ -145,9 +148,9 @@
* Interpretation commands no longer accept interpretation attributes.
INCOMPATBILITY.
-* Complete re-implementation of locales. INCOMPATIBILITY.
-The most important changes are listed below. See documentation
-(forthcoming) and tutorial (also forthcoming) for details.
+* Complete re-implementation of locales. INCOMPATIBILITY. The most
+important changes are listed below. See documentation (forthcoming)
+and tutorial (also forthcoming) for details.
- In locale expressions, instantiation replaces renaming. Parameters
must be declared in a for clause. To aid compatibility with previous
@@ -161,15 +164,15 @@
- More flexible mechanisms to qualify names generated by locale
expressions. Qualifiers (prefixes) may be specified in locale
-expressions. Available are normal qualifiers (syntax "name:") and strict
-qualifiers (syntax "name!:"). The latter must occur in name references
-and are useful to avoid accidental hiding of names, the former are
-optional. Qualifiers derived from the parameter names of a locale are no
-longer generated.
-
-- "sublocale l < e" replaces "interpretation l < e". The instantiation
-clause in "interpretation" and "interpret" (square brackets) is no
-longer available. Use locale expressions.
+expressions. Available are normal qualifiers (syntax "name:") and
+strict qualifiers (syntax "name!:"). The latter must occur in name
+references and are useful to avoid accidental hiding of names, the
+former are optional. Qualifiers derived from the parameter names of a
+locale are no longer generated.
+
+- "sublocale l < e" replaces "interpretation l < e". The
+instantiation clause in "interpretation" and "interpret" (square
+brackets) is no longer available. Use locale expressions.
- When converting proof scripts, be sure to replace qualifiers in
"interpretation" and "interpret" by strict qualifiers. Qualifiers in
@@ -183,8 +186,8 @@
* The 'axiomatization' command now only works within a global theory
context. INCOMPATIBILITY.
-* New find_theorems criterion "solves" matching theorems that
-directly solve the current goal. Try "find_theorems solves".
+* New find_theorems criterion "solves" matching theorems that directly
+solve the current goal. Try "find_theorems solves".
* Added an auto solve option, which can be enabled through the
ProofGeneral Isabelle settings menu (disabled by default).
@@ -193,14 +196,15 @@
stated. Any theorems that could solve the lemma directly are listed
underneath the goal.
-* New command find_consts searches for constants based on type and name
-patterns, e.g.
+* New command find_consts searches for constants based on type and
+name patterns, e.g.
find_consts "_ => bool"
-By default, matching is against subtypes, but it may be restricted to the
-whole type. Searching by name is possible. Multiple queries are conjunctive
-and queries may be negated by prefixing them with a hyphen:
+By default, matching is against subtypes, but it may be restricted to
+the whole type. Searching by name is possible. Multiple queries are
+conjunctive and queries may be negated by prefixing them with a
+hyphen:
find_consts strict: "_ => bool" name: "Int" -"int => int"
@@ -312,7 +316,7 @@
process. New thread-based implementation also works on non-Unix
platforms (Cygwin). Provers are no longer hardwired, but defined
within the theory via plain ML wrapper functions. Basic Sledgehammer
-commands are covered in the isar-ref manual
+commands are covered in the isar-ref manual.
* Wrapper scripts for remote SystemOnTPTP service allows to use
sledgehammer without local ATP installation (Vampire etc.). See also
@@ -383,6 +387,7 @@
zdvd_triv_left -> dvd_triv_left
zdvd_triv_right -> dvd_triv_right
zdvd_zmult_cancel_disj -> dvd_mult_cancel_left
+zmod_eq0_zdvd_iff -> dvd_eq_mod_eq_0[symmetric]
zmod_zadd_left_eq -> mod_add_left_eq
zmod_zadd_right_eq -> mod_add_right_eq
zmod_zadd_self1 -> mod_add_self1
@@ -495,9 +500,8 @@
*** HOL-Algebra ***
* New locales for orders and lattices where the equivalence relation
- is not restricted to equality. INCOMPATIBILITY: all order and
- lattice locales use a record structure with field eq for the
- equivalence.
+is not restricted to equality. INCOMPATIBILITY: all order and lattice
+locales use a record structure with field eq for the equivalence.
* New theory of factorial domains.
--- a/doc-src/AxClass/Group/Group.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,322 +0,0 @@
-
-header {* Basic group theory *}
-
-theory Group imports Main begin
-
-text {*
- \medskip\noindent The meta-level type system of Isabelle supports
- \emph{intersections} and \emph{inclusions} of type classes. These
- directly correspond to intersections and inclusions of type
- predicates in a purely set theoretic sense. This is sufficient as a
- means to describe simple hierarchies of structures. As an
- illustration, we use the well-known example of semigroups, monoids,
- general groups and Abelian groups.
-*}
-
-subsection {* Monoids and Groups *}
-
-text {*
- First we declare some polymorphic constants required later for the
- signature parts of our structures.
-*}
-
-consts
- times :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<odot>" 70)
- invers :: "'a \<Rightarrow> 'a" ("(_\<inv>)" [1000] 999)
- one :: 'a ("\<one>")
-
-text {*
- \noindent Next we define class @{text monoid} of monoids with
- operations @{text \<odot>} and @{text \<one>}. Note that multiple class
- axioms are allowed for user convenience --- they simply represent
- the conjunction of their respective universal closures.
-*}
-
-axclass monoid \<subseteq> type
- assoc: "(x \<odot> y) \<odot> z = x \<odot> (y \<odot> z)"
- left_unit: "\<one> \<odot> x = x"
- right_unit: "x \<odot> \<one> = x"
-
-text {*
- \noindent So class @{text monoid} contains exactly those types
- @{text \<tau>} where @{text "\<odot> \<Colon> \<tau> \<Rightarrow> \<tau> \<Rightarrow> \<tau>"} and @{text "\<one> \<Colon> \<tau>"}
- are specified appropriately, such that @{text \<odot>} is associative and
- @{text \<one>} is a left and right unit element for the @{text \<odot>}
- operation.
-*}
-
-text {*
- \medskip Independently of @{text monoid}, we now define a linear
- hierarchy of semigroups, general groups and Abelian groups. Note
- that the names of class axioms are automatically qualified with each
- class name, so we may re-use common names such as @{text assoc}.
-*}
-
-axclass semigroup \<subseteq> type
- assoc: "(x \<odot> y) \<odot> z = x \<odot> (y \<odot> z)"
-
-axclass group \<subseteq> semigroup
- left_unit: "\<one> \<odot> x = x"
- left_inverse: "x\<inv> \<odot> x = \<one>"
-
-axclass agroup \<subseteq> group
- commute: "x \<odot> y = y \<odot> x"
-
-text {*
- \noindent Class @{text group} inherits associativity of @{text \<odot>}
- from @{text semigroup} and adds two further group axioms. Similarly,
- @{text agroup} is defined as the subset of @{text group} such that
- for all of its elements @{text \<tau>}, the operation @{text "\<odot> \<Colon> \<tau> \<Rightarrow> \<tau> \<Rightarrow>
- \<tau>"} is even commutative.
-*}
-
-
-subsection {* Abstract reasoning *}
-
-text {*
- In a sense, axiomatic type classes may be viewed as \emph{abstract
- theories}. Above class definitions gives rise to abstract axioms
- @{text assoc}, @{text left_unit}, @{text left_inverse}, @{text
- commute}, where any of these contain a type variable @{text "'a \<Colon>
- c"} that is restricted to types of the corresponding class @{text
- c}. \emph{Sort constraints} like this express a logical
- precondition for the whole formula. For example, @{text assoc}
- states that for all @{text \<tau>}, provided that @{text "\<tau> \<Colon>
- semigroup"}, the operation @{text "\<odot> \<Colon> \<tau> \<Rightarrow> \<tau> \<Rightarrow> \<tau>"} is associative.
-
- \medskip From a technical point of view, abstract axioms are just
- ordinary Isabelle theorems, which may be used in proofs without
- special treatment. Such ``abstract proofs'' usually yield new
- ``abstract theorems''. For example, we may now derive the following
- well-known laws of general groups.
-*}
-
-theorem group_right_inverse: "x \<odot> x\<inv> = (\<one>\<Colon>'a\<Colon>group)"
-proof -
- have "x \<odot> x\<inv> = \<one> \<odot> (x \<odot> x\<inv>)"
- by (simp only: group_class.left_unit)
- also have "... = \<one> \<odot> x \<odot> x\<inv>"
- by (simp only: semigroup_class.assoc)
- also have "... = (x\<inv>)\<inv> \<odot> x\<inv> \<odot> x \<odot> x\<inv>"
- by (simp only: group_class.left_inverse)
- also have "... = (x\<inv>)\<inv> \<odot> (x\<inv> \<odot> x) \<odot> x\<inv>"
- by (simp only: semigroup_class.assoc)
- also have "... = (x\<inv>)\<inv> \<odot> \<one> \<odot> x\<inv>"
- by (simp only: group_class.left_inverse)
- also have "... = (x\<inv>)\<inv> \<odot> (\<one> \<odot> x\<inv>)"
- by (simp only: semigroup_class.assoc)
- also have "... = (x\<inv>)\<inv> \<odot> x\<inv>"
- by (simp only: group_class.left_unit)
- also have "... = \<one>"
- by (simp only: group_class.left_inverse)
- finally show ?thesis .
-qed
-
-text {*
- \noindent With @{text group_right_inverse} already available, @{text
- group_right_unit}\label{thm:group-right-unit} is now established
- much easier.
-*}
-
-theorem group_right_unit: "x \<odot> \<one> = (x\<Colon>'a\<Colon>group)"
-proof -
- have "x \<odot> \<one> = x \<odot> (x\<inv> \<odot> x)"
- by (simp only: group_class.left_inverse)
- also have "... = x \<odot> x\<inv> \<odot> x"
- by (simp only: semigroup_class.assoc)
- also have "... = \<one> \<odot> x"
- by (simp only: group_right_inverse)
- also have "... = x"
- by (simp only: group_class.left_unit)
- finally show ?thesis .
-qed
-
-text {*
- \medskip Abstract theorems may be instantiated to only those types
- @{text \<tau>} where the appropriate class membership @{text "\<tau> \<Colon> c"} is
- known at Isabelle's type signature level. Since we have @{text
- "agroup \<subseteq> group \<subseteq> semigroup"} by definition, all theorems of @{text
- semigroup} and @{text group} are automatically inherited by @{text
- group} and @{text agroup}.
-*}
-
-
-subsection {* Abstract instantiation *}
-
-text {*
- From the definition, the @{text monoid} and @{text group} classes
- have been independent. Note that for monoids, @{text right_unit}
- had to be included as an axiom, but for groups both @{text
- right_unit} and @{text right_inverse} are derivable from the other
- axioms. With @{text group_right_unit} derived as a theorem of group
- theory (see page~\pageref{thm:group-right-unit}), we may now
- instantiate @{text "monoid \<subseteq> semigroup"} and @{text "group \<subseteq>
- monoid"} properly as follows (cf.\ \figref{fig:monoid-group}).
-
- \begin{figure}[htbp]
- \begin{center}
- \small
- \unitlength 0.6mm
- \begin{picture}(65,90)(0,-10)
- \put(15,10){\line(0,1){10}} \put(15,30){\line(0,1){10}}
- \put(15,50){\line(1,1){10}} \put(35,60){\line(1,-1){10}}
- \put(15,5){\makebox(0,0){@{text agroup}}}
- \put(15,25){\makebox(0,0){@{text group}}}
- \put(15,45){\makebox(0,0){@{text semigroup}}}
- \put(30,65){\makebox(0,0){@{text type}}} \put(50,45){\makebox(0,0){@{text monoid}}}
- \end{picture}
- \hspace{4em}
- \begin{picture}(30,90)(0,0)
- \put(15,10){\line(0,1){10}} \put(15,30){\line(0,1){10}}
- \put(15,50){\line(0,1){10}} \put(15,70){\line(0,1){10}}
- \put(15,5){\makebox(0,0){@{text agroup}}}
- \put(15,25){\makebox(0,0){@{text group}}}
- \put(15,45){\makebox(0,0){@{text monoid}}}
- \put(15,65){\makebox(0,0){@{text semigroup}}}
- \put(15,85){\makebox(0,0){@{text type}}}
- \end{picture}
- \caption{Monoids and groups: according to definition, and by proof}
- \label{fig:monoid-group}
- \end{center}
- \end{figure}
-*}
-
-instance monoid \<subseteq> semigroup
-proof
- fix x y z :: "'a\<Colon>monoid"
- show "x \<odot> y \<odot> z = x \<odot> (y \<odot> z)"
- by (rule monoid_class.assoc)
-qed
-
-instance group \<subseteq> monoid
-proof
- fix x y z :: "'a\<Colon>group"
- show "x \<odot> y \<odot> z = x \<odot> (y \<odot> z)"
- by (rule semigroup_class.assoc)
- show "\<one> \<odot> x = x"
- by (rule group_class.left_unit)
- show "x \<odot> \<one> = x"
- by (rule group_right_unit)
-qed
-
-text {*
- \medskip The \isakeyword{instance} command sets up an appropriate
- goal that represents the class inclusion (or type arity, see
- \secref{sec:inst-arity}) to be proven (see also
- \cite{isabelle-isar-ref}). The initial proof step causes
- back-chaining of class membership statements wrt.\ the hierarchy of
- any classes defined in the current theory; the effect is to reduce
- to the initial statement to a number of goals that directly
- correspond to any class axioms encountered on the path upwards
- through the class hierarchy.
-*}
-
-
-subsection {* Concrete instantiation \label{sec:inst-arity} *}
-
-text {*
- So far we have covered the case of the form
- \isakeyword{instance}~@{text "c\<^sub>1 \<subseteq> c\<^sub>2"}, namely
- \emph{abstract instantiation} --- $c@1$ is more special than @{text
- "c\<^sub>1"} and thus an instance of @{text "c\<^sub>2"}. Even more
- interesting for practical applications are \emph{concrete
- instantiations} of axiomatic type classes. That is, certain simple
- schemes @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t \<Colon> c"} of class
- membership may be established at the logical level and then
- transferred to Isabelle's type signature level.
-
- \medskip As a typical example, we show that type @{typ bool} with
- exclusive-or as @{text \<odot>} operation, identity as @{text \<inv>}, and
- @{term False} as @{text \<one>} forms an Abelian group.
-*}
-
-defs (overloaded)
- times_bool_def: "x \<odot> y \<equiv> x \<noteq> (y\<Colon>bool)"
- inverse_bool_def: "x\<inv> \<equiv> x\<Colon>bool"
- unit_bool_def: "\<one> \<equiv> False"
-
-text {*
- \medskip It is important to note that above \isakeyword{defs} are
- just overloaded meta-level constant definitions, where type classes
- are not yet involved at all. This form of constant definition with
- overloading (and optional recursion over the syntactic structure of
- simple types) are admissible as definitional extensions of plain HOL
- \cite{Wenzel:1997:TPHOL}. The Haskell-style type system is not
- required for overloading. Nevertheless, overloaded definitions are
- best applied in the context of type classes.
-
- \medskip Since we have chosen above \isakeyword{defs} of the generic
- group operations on type @{typ bool} appropriately, the class
- membership @{text "bool \<Colon> agroup"} may be now derived as follows.
-*}
-
-instance bool :: agroup
-proof (intro_classes,
- unfold times_bool_def inverse_bool_def unit_bool_def)
- fix x y z
- show "((x \<noteq> y) \<noteq> z) = (x \<noteq> (y \<noteq> z))" by blast
- show "(False \<noteq> x) = x" by blast
- show "(x \<noteq> x) = False" by blast
- show "(x \<noteq> y) = (y \<noteq> x)" by blast
-qed
-
-text {*
- The result of an \isakeyword{instance} statement is both expressed
- as a theorem of Isabelle's meta-logic, and as a type arity of the
- type signature. The latter enables type-inference system to take
- care of this new instance automatically.
-
- \medskip We could now also instantiate our group theory classes to
- many other concrete types. For example, @{text "int \<Colon> agroup"}
- (e.g.\ by defining @{text \<odot>} as addition, @{text \<inv>} as negation
- and @{text \<one>} as zero) or @{text "list \<Colon> (type) semigroup"}
- (e.g.\ if @{text \<odot>} is defined as list append). Thus, the
- characteristic constants @{text \<odot>}, @{text \<inv>}, @{text \<one>}
- really become overloaded, i.e.\ have different meanings on different
- types.
-*}
-
-
-subsection {* Lifting and Functors *}
-
-text {*
- As already mentioned above, overloading in the simply-typed HOL
- systems may include recursion over the syntactic structure of types.
- That is, definitional equations @{text "c\<^sup>\<tau> \<equiv> t"} may also
- contain constants of name @{text c} on the right-hand side --- if
- these have types that are structurally simpler than @{text \<tau>}.
-
- This feature enables us to \emph{lift operations}, say to Cartesian
- products, direct sums or function spaces. Subsequently we lift
- @{text \<odot>} component-wise to binary products @{typ "'a \<times> 'b"}.
-*}
-
-defs (overloaded)
- times_prod_def: "p \<odot> q \<equiv> (fst p \<odot> fst q, snd p \<odot> snd q)"
-
-text {*
- It is very easy to see that associativity of @{text \<odot>} on @{typ 'a}
- and @{text \<odot>} on @{typ 'b} transfers to @{text \<odot>} on @{typ "'a \<times>
- 'b"}. Hence the binary type constructor @{text \<odot>} maps semigroups
- to semigroups. This may be established formally as follows.
-*}
-
-instance * :: (semigroup, semigroup) semigroup
-proof (intro_classes, unfold times_prod_def)
- fix p q r :: "'a\<Colon>semigroup \<times> 'b\<Colon>semigroup"
- show
- "(fst (fst p \<odot> fst q, snd p \<odot> snd q) \<odot> fst r,
- snd (fst p \<odot> fst q, snd p \<odot> snd q) \<odot> snd r) =
- (fst p \<odot> fst (fst q \<odot> fst r, snd q \<odot> snd r),
- snd p \<odot> snd (fst q \<odot> fst r, snd q \<odot> snd r))"
- by (simp add: semigroup_class.assoc)
-qed
-
-text {*
- Thus, if we view class instances as ``structures'', then overloaded
- constant definitions with recursion over types indirectly provide
- some kind of ``functors'' --- i.e.\ mappings between abstract
- theories.
-*}
-
-end
--- a/doc-src/AxClass/Group/Product.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-
-header {* Syntactic classes *}
-
-theory Product imports Main begin
-
-text {*
- \medskip\noindent There is still a feature of Isabelle's type system
- left that we have not yet discussed. When declaring polymorphic
- constants @{text "c \<Colon> \<sigma>"}, the type variables occurring in @{text \<sigma>}
- may be constrained by type classes (or even general sorts) in an
- arbitrary way. Note that by default, in Isabelle/HOL the
- declaration @{text "\<odot> \<Colon> 'a \<Rightarrow> 'a \<Rightarrow> 'a"} is actually an abbreviation
- for @{text "\<odot> \<Colon> 'a\<Colon>type \<Rightarrow> 'a \<Rightarrow> 'a"} Since class @{text type} is the
- universal class of HOL, this is not really a constraint at all.
-
- The @{text product} class below provides a less degenerate example of
- syntactic type classes.
-*}
-
-axclass
- product \<subseteq> type
-consts
- product :: "'a\<Colon>product \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<odot>" 70)
-
-text {*
- Here class @{text product} is defined as subclass of @{text type}
- without any additional axioms. This effects in logical equivalence
- of @{text product} and @{text type}, as is reflected by the trivial
- introduction rule generated for this definition.
-
- \medskip So what is the difference of declaring @{text "\<odot> \<Colon>
- 'a\<Colon>product \<Rightarrow> 'a \<Rightarrow> 'a"} vs.\ declaring @{text "\<odot> \<Colon> 'a\<Colon>type \<Rightarrow> 'a \<Rightarrow>
- 'a"} anyway? In this particular case where @{text "product \<equiv>
- type"}, it should be obvious that both declarations are the same
- from the logic's point of view. It even makes the most sense to
- remove sort constraints from constant declarations, as far as the
- purely logical meaning is concerned \cite{Wenzel:1997:TPHOL}.
-
- On the other hand there are syntactic differences, of course.
- Constants @{text \<odot>} on some type @{text \<tau>} are rejected by the
- type-checker, unless the arity @{text "\<tau> \<Colon> product"} is part of the
- type signature. In our example, this arity may be always added when
- required by means of an \isakeyword{instance} with the default proof
- (double-dot).
-
- \medskip Thus, we may observe the following discipline of using
- syntactic classes. Overloaded polymorphic constants have their type
- arguments restricted to an associated (logically trivial) class
- @{text c}. Only immediately before \emph{specifying} these
- constants on a certain type @{text \<tau>} do we instantiate @{text "\<tau> \<Colon>
- c"}.
-
- This is done for class @{text product} and type @{typ bool} as
- follows.
-*}
-
-instance bool :: product ..
-defs (overloaded)
- product_bool_def: "x \<odot> y \<equiv> x \<and> y"
-
-text {*
- The definition @{text prod_bool_def} becomes syntactically
- well-formed only after the arity @{text "bool \<Colon> product"} is made
- known to the type checker.
-
- \medskip It is very important to see that above \isakeyword{defs} are
- not directly connected with \isakeyword{instance} at all! We were
- just following our convention to specify @{text \<odot>} on @{typ bool}
- after having instantiated @{text "bool \<Colon> product"}. Isabelle does
- not require these definitions, which is in contrast to programming
- languages like Haskell \cite{haskell-report}.
-
- \medskip While Isabelle type classes and those of Haskell are almost
- the same as far as type-checking and type inference are concerned,
- there are important semantic differences. Haskell classes require
- their instances to \emph{provide operations} of certain \emph{names}.
- Therefore, its \texttt{instance} has a \texttt{where} part that tells
- the system what these ``member functions'' should be.
-
- This style of \texttt{instance} would not make much sense in
- Isabelle's meta-logic, because there is no internal notion of
- ``providing operations'' or even ``names of functions''.
-*}
-
-end
--- a/doc-src/AxClass/Group/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-
-use_thy "Semigroups";
-use_thy "Group";
-use_thy "Product";
--- a/doc-src/AxClass/Group/Semigroups.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-
-header {* Semigroups *}
-
-theory Semigroups imports Main begin
-
-text {*
- \medskip\noindent An axiomatic type class is simply a class of types
- that all meet certain properties, which are also called \emph{class
- axioms}. Thus, type classes may be also understood as type
- predicates --- i.e.\ abstractions over a single type argument @{typ
- 'a}. Class axioms typically contain polymorphic constants that
- depend on this type @{typ 'a}. These \emph{characteristic
- constants} behave like operations associated with the ``carrier''
- type @{typ 'a}.
-
- We illustrate these basic concepts by the following formulation of
- semigroups.
-*}
-
-consts
- times :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<odot>" 70)
-axclass semigroup \<subseteq> type
- assoc: "(x \<odot> y) \<odot> z = x \<odot> (y \<odot> z)"
-
-text {*
- \noindent Above we have first declared a polymorphic constant @{text
- "\<odot> \<Colon> 'a \<Rightarrow> 'a \<Rightarrow> 'a"} and then defined the class @{text semigroup} of
- all types @{text \<tau>} such that @{text "\<odot> \<Colon> \<tau> \<Rightarrow> \<tau> \<Rightarrow> \<tau>"} is indeed an
- associative operator. The @{text assoc} axiom contains exactly one
- type variable, which is invisible in the above presentation, though.
- Also note that free term variables (like @{term x}, @{term y},
- @{term z}) are allowed for user convenience --- conceptually all of
- these are bound by outermost universal quantifiers.
-
- \medskip In general, type classes may be used to describe
- \emph{structures} with exactly one carrier @{typ 'a} and a fixed
- \emph{signature}. Different signatures require different classes.
- Below, class @{text plus_semigroup} represents semigroups @{text
- "(\<tau>, \<oplus>\<^sup>\<tau>)"}, while the original @{text semigroup} would
- correspond to semigroups of the form @{text "(\<tau>, \<odot>\<^sup>\<tau>)"}.
-*}
-
-consts
- plus :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<oplus>" 70)
-axclass plus_semigroup \<subseteq> type
- assoc: "(x \<oplus> y) \<oplus> z = x \<oplus> (y \<oplus> z)"
-
-text {*
- \noindent Even if classes @{text plus_semigroup} and @{text
- semigroup} both represent semigroups in a sense, they are certainly
- not quite the same.
-*}
-
-end
--- a/doc-src/AxClass/Group/document/Group.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,512 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{Group}%
-%
-\isamarkupheader{Basic group theory%
-}
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ Group\ \isakeyword{imports}\ Main\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\begin{isamarkuptext}%
-\medskip\noindent The meta-level type system of Isabelle supports
- \emph{intersections} and \emph{inclusions} of type classes. These
- directly correspond to intersections and inclusions of type
- predicates in a purely set theoretic sense. This is sufficient as a
- means to describe simple hierarchies of structures. As an
- illustration, we use the well-known example of semigroups, monoids,
- general groups and Abelian groups.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Monoids and Groups%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-First we declare some polymorphic constants required later for the
- signature parts of our structures.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{consts}\isamarkupfalse%
-\isanewline
-\ \ times\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymodot}{\isachardoublequoteclose}\ {\isadigit{7}}{\isadigit{0}}{\isacharparenright}\isanewline
-\ \ invers\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}{\isachardoublequoteopen}{\isacharparenleft}{\isacharunderscore}{\isasyminv}{\isacharparenright}{\isachardoublequoteclose}\ {\isacharbrackleft}{\isadigit{1}}{\isadigit{0}}{\isadigit{0}}{\isadigit{0}}{\isacharbrackright}\ {\isadigit{9}}{\isadigit{9}}{\isadigit{9}}{\isacharparenright}\isanewline
-\ \ one\ {\isacharcolon}{\isacharcolon}\ {\isacharprime}a\ \ \ \ {\isacharparenleft}{\isachardoublequoteopen}{\isasymone}{\isachardoublequoteclose}{\isacharparenright}%
-\begin{isamarkuptext}%
-\noindent Next we define class \isa{monoid} of monoids with
- operations \isa{{\isasymodot}} and \isa{{\isasymone}}. Note that multiple class
- axioms are allowed for user convenience --- they simply represent
- the conjunction of their respective universal closures.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{axclass}\isamarkupfalse%
-\ monoid\ {\isasymsubseteq}\ type\isanewline
-\ \ assoc{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymodot}\ y{\isacharparenright}\ {\isasymodot}\ z\ {\isacharequal}\ x\ {\isasymodot}\ {\isacharparenleft}y\ {\isasymodot}\ z{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ left{\isacharunderscore}unit{\isacharcolon}\ {\isachardoublequoteopen}{\isasymone}\ {\isasymodot}\ x\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
-\ \ right{\isacharunderscore}unit{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymodot}\ {\isasymone}\ {\isacharequal}\ x{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-\noindent So class \isa{monoid} contains exactly those types
- \isa{{\isasymtau}} where \isa{{\isasymodot}\ {\isasymColon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}} and \isa{{\isasymone}\ {\isasymColon}\ {\isasymtau}}
- are specified appropriately, such that \isa{{\isasymodot}} is associative and
- \isa{{\isasymone}} is a left and right unit element for the \isa{{\isasymodot}}
- operation.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\medskip Independently of \isa{monoid}, we now define a linear
- hierarchy of semigroups, general groups and Abelian groups. Note
- that the names of class axioms are automatically qualified with each
- class name, so we may re-use common names such as \isa{assoc}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{axclass}\isamarkupfalse%
-\ semigroup\ {\isasymsubseteq}\ type\isanewline
-\ \ assoc{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymodot}\ y{\isacharparenright}\ {\isasymodot}\ z\ {\isacharequal}\ x\ {\isasymodot}\ {\isacharparenleft}y\ {\isasymodot}\ z{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\isanewline
-\isacommand{axclass}\isamarkupfalse%
-\ group\ {\isasymsubseteq}\ semigroup\isanewline
-\ \ left{\isacharunderscore}unit{\isacharcolon}\ {\isachardoublequoteopen}{\isasymone}\ {\isasymodot}\ x\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
-\ \ left{\isacharunderscore}inverse{\isacharcolon}\ {\isachardoublequoteopen}x{\isasyminv}\ {\isasymodot}\ x\ {\isacharequal}\ {\isasymone}{\isachardoublequoteclose}\isanewline
-\isanewline
-\isacommand{axclass}\isamarkupfalse%
-\ agroup\ {\isasymsubseteq}\ group\isanewline
-\ \ commute{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymodot}\ y\ {\isacharequal}\ y\ {\isasymodot}\ x{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-\noindent Class \isa{group} inherits associativity of \isa{{\isasymodot}}
- from \isa{semigroup} and adds two further group axioms. Similarly,
- \isa{agroup} is defined as the subset of \isa{group} such that
- for all of its elements \isa{{\isasymtau}}, the operation \isa{{\isasymodot}\ {\isasymColon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}} is even commutative.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Abstract reasoning%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-In a sense, axiomatic type classes may be viewed as \emph{abstract
- theories}. Above class definitions gives rise to abstract axioms
- \isa{assoc}, \isa{left{\isacharunderscore}unit}, \isa{left{\isacharunderscore}inverse}, \isa{commute}, where any of these contain a type variable \isa{{\isacharprime}a\ {\isasymColon}\ c} that is restricted to types of the corresponding class \isa{c}. \emph{Sort constraints} like this express a logical
- precondition for the whole formula. For example, \isa{assoc}
- states that for all \isa{{\isasymtau}}, provided that \isa{{\isasymtau}\ {\isasymColon}\ semigroup}, the operation \isa{{\isasymodot}\ {\isasymColon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}} is associative.
-
- \medskip From a technical point of view, abstract axioms are just
- ordinary Isabelle theorems, which may be used in proofs without
- special treatment. Such ``abstract proofs'' usually yield new
- ``abstract theorems''. For example, we may now derive the following
- well-known laws of general groups.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{theorem}\isamarkupfalse%
-\ group{\isacharunderscore}right{\isacharunderscore}inverse{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymodot}\ x{\isasyminv}\ {\isacharequal}\ {\isacharparenleft}{\isasymone}{\isasymColon}{\isacharprime}a{\isasymColon}group{\isacharparenright}{\isachardoublequoteclose}\isanewline
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-\isacommand{proof}\isamarkupfalse%
-\ {\isacharminus}\isanewline
-\ \ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}x\ {\isasymodot}\ x{\isasyminv}\ {\isacharequal}\ {\isasymone}\ {\isasymodot}\ {\isacharparenleft}x\ {\isasymodot}\ x{\isasyminv}{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}unit{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isasymone}\ {\isasymodot}\ x\ {\isasymodot}\ x{\isasyminv}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ semigroup{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminv}{\isacharparenright}{\isasyminv}\ {\isasymodot}\ x{\isasyminv}\ {\isasymodot}\ x\ {\isasymodot}\ x{\isasyminv}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}inverse{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminv}{\isacharparenright}{\isasyminv}\ {\isasymodot}\ {\isacharparenleft}x{\isasyminv}\ {\isasymodot}\ x{\isacharparenright}\ {\isasymodot}\ x{\isasyminv}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ semigroup{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminv}{\isacharparenright}{\isasyminv}\ {\isasymodot}\ {\isasymone}\ {\isasymodot}\ x{\isasyminv}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}inverse{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminv}{\isacharparenright}{\isasyminv}\ {\isasymodot}\ {\isacharparenleft}{\isasymone}\ {\isasymodot}\ x{\isasyminv}{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ semigroup{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminv}{\isacharparenright}{\isasyminv}\ {\isasymodot}\ x{\isasyminv}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}unit{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isasymone}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}inverse{\isacharparenright}\isanewline
-\ \ \isacommand{finally}\isamarkupfalse%
-\ \isacommand{show}\isamarkupfalse%
-\ {\isacharquery}thesis\ \isacommand{{\isachardot}}\isamarkupfalse%
-\isanewline
-\isacommand{qed}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\begin{isamarkuptext}%
-\noindent With \isa{group{\isacharunderscore}right{\isacharunderscore}inverse} already available, \isa{group{\isacharunderscore}right{\isacharunderscore}unit}\label{thm:group-right-unit} is now established
- much easier.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{theorem}\isamarkupfalse%
-\ group{\isacharunderscore}right{\isacharunderscore}unit{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymodot}\ {\isasymone}\ {\isacharequal}\ {\isacharparenleft}x{\isasymColon}{\isacharprime}a{\isasymColon}group{\isacharparenright}{\isachardoublequoteclose}\isanewline
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-\isacommand{proof}\isamarkupfalse%
-\ {\isacharminus}\isanewline
-\ \ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}x\ {\isasymodot}\ {\isasymone}\ {\isacharequal}\ x\ {\isasymodot}\ {\isacharparenleft}x{\isasyminv}\ {\isasymodot}\ x{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}inverse{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ x\ {\isasymodot}\ x{\isasyminv}\ {\isasymodot}\ x{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ semigroup{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ {\isasymone}\ {\isasymodot}\ x{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}right{\isacharunderscore}inverse{\isacharparenright}\isanewline
-\ \ \isacommand{also}\isamarkupfalse%
-\ \isacommand{have}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isachardot}\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ only{\isacharcolon}\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}unit{\isacharparenright}\isanewline
-\ \ \isacommand{finally}\isamarkupfalse%
-\ \isacommand{show}\isamarkupfalse%
-\ {\isacharquery}thesis\ \isacommand{{\isachardot}}\isamarkupfalse%
-\isanewline
-\isacommand{qed}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\begin{isamarkuptext}%
-\medskip Abstract theorems may be instantiated to only those types
- \isa{{\isasymtau}} where the appropriate class membership \isa{{\isasymtau}\ {\isasymColon}\ c} is
- known at Isabelle's type signature level. Since we have \isa{agroup\ {\isasymsubseteq}\ group\ {\isasymsubseteq}\ semigroup} by definition, all theorems of \isa{semigroup} and \isa{group} are automatically inherited by \isa{group} and \isa{agroup}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Abstract instantiation%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-From the definition, the \isa{monoid} and \isa{group} classes
- have been independent. Note that for monoids, \isa{right{\isacharunderscore}unit}
- had to be included as an axiom, but for groups both \isa{right{\isacharunderscore}unit} and \isa{right{\isacharunderscore}inverse} are derivable from the other
- axioms. With \isa{group{\isacharunderscore}right{\isacharunderscore}unit} derived as a theorem of group
- theory (see page~\pageref{thm:group-right-unit}), we may now
- instantiate \isa{monoid\ {\isasymsubseteq}\ semigroup} and \isa{group\ {\isasymsubseteq}\ monoid} properly as follows (cf.\ \figref{fig:monoid-group}).
-
- \begin{figure}[htbp]
- \begin{center}
- \small
- \unitlength 0.6mm
- \begin{picture}(65,90)(0,-10)
- \put(15,10){\line(0,1){10}} \put(15,30){\line(0,1){10}}
- \put(15,50){\line(1,1){10}} \put(35,60){\line(1,-1){10}}
- \put(15,5){\makebox(0,0){\isa{agroup}}}
- \put(15,25){\makebox(0,0){\isa{group}}}
- \put(15,45){\makebox(0,0){\isa{semigroup}}}
- \put(30,65){\makebox(0,0){\isa{type}}} \put(50,45){\makebox(0,0){\isa{monoid}}}
- \end{picture}
- \hspace{4em}
- \begin{picture}(30,90)(0,0)
- \put(15,10){\line(0,1){10}} \put(15,30){\line(0,1){10}}
- \put(15,50){\line(0,1){10}} \put(15,70){\line(0,1){10}}
- \put(15,5){\makebox(0,0){\isa{agroup}}}
- \put(15,25){\makebox(0,0){\isa{group}}}
- \put(15,45){\makebox(0,0){\isa{monoid}}}
- \put(15,65){\makebox(0,0){\isa{semigroup}}}
- \put(15,85){\makebox(0,0){\isa{type}}}
- \end{picture}
- \caption{Monoids and groups: according to definition, and by proof}
- \label{fig:monoid-group}
- \end{center}
- \end{figure}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{instance}\isamarkupfalse%
-\ monoid\ {\isasymsubseteq}\ semigroup\isanewline
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-\isacommand{proof}\isamarkupfalse%
-\isanewline
-\ \ \isacommand{fix}\isamarkupfalse%
-\ x\ y\ z\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a{\isasymColon}monoid{\isachardoublequoteclose}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}x\ {\isasymodot}\ y\ {\isasymodot}\ z\ {\isacharequal}\ x\ {\isasymodot}\ {\isacharparenleft}y\ {\isasymodot}\ z{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}rule\ monoid{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\isacommand{qed}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-\isanewline
-%
-\endisadelimproof
-\isanewline
-\isacommand{instance}\isamarkupfalse%
-\ group\ {\isasymsubseteq}\ monoid\isanewline
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-\isacommand{proof}\isamarkupfalse%
-\isanewline
-\ \ \isacommand{fix}\isamarkupfalse%
-\ x\ y\ z\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a{\isasymColon}group{\isachardoublequoteclose}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}x\ {\isasymodot}\ y\ {\isasymodot}\ z\ {\isacharequal}\ x\ {\isasymodot}\ {\isacharparenleft}y\ {\isasymodot}\ z{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}rule\ semigroup{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isasymone}\ {\isasymodot}\ x\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}rule\ group{\isacharunderscore}class{\isachardot}left{\isacharunderscore}unit{\isacharparenright}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}x\ {\isasymodot}\ {\isasymone}\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}rule\ group{\isacharunderscore}right{\isacharunderscore}unit{\isacharparenright}\isanewline
-\isacommand{qed}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\begin{isamarkuptext}%
-\medskip The \isakeyword{instance} command sets up an appropriate
- goal that represents the class inclusion (or type arity, see
- \secref{sec:inst-arity}) to be proven (see also
- \cite{isabelle-isar-ref}). The initial proof step causes
- back-chaining of class membership statements wrt.\ the hierarchy of
- any classes defined in the current theory; the effect is to reduce
- to the initial statement to a number of goals that directly
- correspond to any class axioms encountered on the path upwards
- through the class hierarchy.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Concrete instantiation \label{sec:inst-arity}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-So far we have covered the case of the form
- \isakeyword{instance}~\isa{c\isactrlsub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlsub {\isadigit{2}}}, namely
- \emph{abstract instantiation} --- $c@1$ is more special than \isa{c\isactrlsub {\isadigit{1}}} and thus an instance of \isa{c\isactrlsub {\isadigit{2}}}. Even more
- interesting for practical applications are \emph{concrete
- instantiations} of axiomatic type classes. That is, certain simple
- schemes \isa{{\isacharparenleft}{\isasymalpha}\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlsub n{\isacharparenright}\ t\ {\isasymColon}\ c} of class
- membership may be established at the logical level and then
- transferred to Isabelle's type signature level.
-
- \medskip As a typical example, we show that type \isa{bool} with
- exclusive-or as \isa{{\isasymodot}} operation, identity as \isa{{\isasyminv}}, and
- \isa{False} as \isa{{\isasymone}} forms an Abelian group.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{defs}\isamarkupfalse%
-\ {\isacharparenleft}\isakeyword{overloaded}{\isacharparenright}\isanewline
-\ \ times{\isacharunderscore}bool{\isacharunderscore}def{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymodot}\ y\ {\isasymequiv}\ x\ {\isasymnoteq}\ {\isacharparenleft}y{\isasymColon}bool{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ inverse{\isacharunderscore}bool{\isacharunderscore}def{\isacharcolon}\ {\isachardoublequoteopen}x{\isasyminv}\ {\isasymequiv}\ x{\isasymColon}bool{\isachardoublequoteclose}\isanewline
-\ \ unit{\isacharunderscore}bool{\isacharunderscore}def{\isacharcolon}\ {\isachardoublequoteopen}{\isasymone}\ {\isasymequiv}\ False{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-\medskip It is important to note that above \isakeyword{defs} are
- just overloaded meta-level constant definitions, where type classes
- are not yet involved at all. This form of constant definition with
- overloading (and optional recursion over the syntactic structure of
- simple types) are admissible as definitional extensions of plain HOL
- \cite{Wenzel:1997:TPHOL}. The Haskell-style type system is not
- required for overloading. Nevertheless, overloaded definitions are
- best applied in the context of type classes.
-
- \medskip Since we have chosen above \isakeyword{defs} of the generic
- group operations on type \isa{bool} appropriately, the class
- membership \isa{bool\ {\isasymColon}\ agroup} may be now derived as follows.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{instance}\isamarkupfalse%
-\ bool\ {\isacharcolon}{\isacharcolon}\ agroup\isanewline
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-\isacommand{proof}\isamarkupfalse%
-\ {\isacharparenleft}intro{\isacharunderscore}classes{\isacharcomma}\isanewline
-\ \ \ \ unfold\ times{\isacharunderscore}bool{\isacharunderscore}def\ inverse{\isacharunderscore}bool{\isacharunderscore}def\ unit{\isacharunderscore}bool{\isacharunderscore}def{\isacharparenright}\isanewline
-\ \ \isacommand{fix}\isamarkupfalse%
-\ x\ y\ z\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isacharparenleft}{\isacharparenleft}x\ {\isasymnoteq}\ y{\isacharparenright}\ {\isasymnoteq}\ z{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}x\ {\isasymnoteq}\ {\isacharparenleft}y\ {\isasymnoteq}\ z{\isacharparenright}{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
-\ blast\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isacharparenleft}False\ {\isasymnoteq}\ x{\isacharparenright}\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
-\ blast\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymnoteq}\ x{\isacharparenright}\ {\isacharequal}\ False{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
-\ blast\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymnoteq}\ y{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}y\ {\isasymnoteq}\ x{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
-\ blast\isanewline
-\isacommand{qed}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\begin{isamarkuptext}%
-The result of an \isakeyword{instance} statement is both expressed
- as a theorem of Isabelle's meta-logic, and as a type arity of the
- type signature. The latter enables type-inference system to take
- care of this new instance automatically.
-
- \medskip We could now also instantiate our group theory classes to
- many other concrete types. For example, \isa{int\ {\isasymColon}\ agroup}
- (e.g.\ by defining \isa{{\isasymodot}} as addition, \isa{{\isasyminv}} as negation
- and \isa{{\isasymone}} as zero) or \isa{list\ {\isasymColon}\ {\isacharparenleft}type{\isacharparenright}\ semigroup}
- (e.g.\ if \isa{{\isasymodot}} is defined as list append). Thus, the
- characteristic constants \isa{{\isasymodot}}, \isa{{\isasyminv}}, \isa{{\isasymone}}
- really become overloaded, i.e.\ have different meanings on different
- types.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Lifting and Functors%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-As already mentioned above, overloading in the simply-typed HOL
- systems may include recursion over the syntactic structure of types.
- That is, definitional equations \isa{c\isactrlsup {\isasymtau}\ {\isasymequiv}\ t} may also
- contain constants of name \isa{c} on the right-hand side --- if
- these have types that are structurally simpler than \isa{{\isasymtau}}.
-
- This feature enables us to \emph{lift operations}, say to Cartesian
- products, direct sums or function spaces. Subsequently we lift
- \isa{{\isasymodot}} component-wise to binary products \isa{{\isacharprime}a\ {\isasymtimes}\ {\isacharprime}b}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{defs}\isamarkupfalse%
-\ {\isacharparenleft}\isakeyword{overloaded}{\isacharparenright}\isanewline
-\ \ times{\isacharunderscore}prod{\isacharunderscore}def{\isacharcolon}\ {\isachardoublequoteopen}p\ {\isasymodot}\ q\ {\isasymequiv}\ {\isacharparenleft}fst\ p\ {\isasymodot}\ fst\ q{\isacharcomma}\ snd\ p\ {\isasymodot}\ snd\ q{\isacharparenright}{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-It is very easy to see that associativity of \isa{{\isasymodot}} on \isa{{\isacharprime}a}
- and \isa{{\isasymodot}} on \isa{{\isacharprime}b} transfers to \isa{{\isasymodot}} on \isa{{\isacharprime}a\ {\isasymtimes}\ {\isacharprime}b}. Hence the binary type constructor \isa{{\isasymodot}} maps semigroups
- to semigroups. This may be established formally as follows.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{instance}\isamarkupfalse%
-\ {\isacharasterisk}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}semigroup{\isacharcomma}\ semigroup{\isacharparenright}\ semigroup\isanewline
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-\isacommand{proof}\isamarkupfalse%
-\ {\isacharparenleft}intro{\isacharunderscore}classes{\isacharcomma}\ unfold\ times{\isacharunderscore}prod{\isacharunderscore}def{\isacharparenright}\isanewline
-\ \ \isacommand{fix}\isamarkupfalse%
-\ p\ q\ r\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a{\isasymColon}semigroup\ {\isasymtimes}\ {\isacharprime}b{\isasymColon}semigroup{\isachardoublequoteclose}\isanewline
-\ \ \isacommand{show}\isamarkupfalse%
-\isanewline
-\ \ \ \ {\isachardoublequoteopen}{\isacharparenleft}fst\ {\isacharparenleft}fst\ p\ {\isasymodot}\ fst\ q{\isacharcomma}\ snd\ p\ {\isasymodot}\ snd\ q{\isacharparenright}\ {\isasymodot}\ fst\ r{\isacharcomma}\isanewline
-\ \ \ \ \ \ snd\ {\isacharparenleft}fst\ p\ {\isasymodot}\ fst\ q{\isacharcomma}\ snd\ p\ {\isasymodot}\ snd\ q{\isacharparenright}\ {\isasymodot}\ snd\ r{\isacharparenright}\ {\isacharequal}\isanewline
-\ \ \ \ \ \ \ {\isacharparenleft}fst\ p\ {\isasymodot}\ fst\ {\isacharparenleft}fst\ q\ {\isasymodot}\ fst\ r{\isacharcomma}\ snd\ q\ {\isasymodot}\ snd\ r{\isacharparenright}{\isacharcomma}\isanewline
-\ \ \ \ \ \ \ \ snd\ p\ {\isasymodot}\ snd\ {\isacharparenleft}fst\ q\ {\isasymodot}\ fst\ r{\isacharcomma}\ snd\ q\ {\isasymodot}\ snd\ r{\isacharparenright}{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ \ \ \isacommand{by}\isamarkupfalse%
-\ {\isacharparenleft}simp\ add{\isacharcolon}\ semigroup{\isacharunderscore}class{\isachardot}assoc{\isacharparenright}\isanewline
-\isacommand{qed}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\begin{isamarkuptext}%
-Thus, if we view class instances as ``structures'', then overloaded
- constant definitions with recursion over types indirectly provide
- some kind of ``functors'' --- i.e.\ mappings between abstract
- theories.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/AxClass/Group/document/Product.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,133 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{Product}%
-%
-\isamarkupheader{Syntactic classes%
-}
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ Product\ \isakeyword{imports}\ Main\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\begin{isamarkuptext}%
-\medskip\noindent There is still a feature of Isabelle's type system
- left that we have not yet discussed. When declaring polymorphic
- constants \isa{c\ {\isasymColon}\ {\isasymsigma}}, the type variables occurring in \isa{{\isasymsigma}}
- may be constrained by type classes (or even general sorts) in an
- arbitrary way. Note that by default, in Isabelle/HOL the
- declaration \isa{{\isasymodot}\ {\isasymColon}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a} is actually an abbreviation
- for \isa{{\isasymodot}\ {\isasymColon}\ {\isacharprime}a{\isasymColon}type\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a} Since class \isa{type} is the
- universal class of HOL, this is not really a constraint at all.
-
- The \isa{product} class below provides a less degenerate example of
- syntactic type classes.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{axclass}\isamarkupfalse%
-\isanewline
-\ \ product\ {\isasymsubseteq}\ type\isanewline
-\isacommand{consts}\isamarkupfalse%
-\isanewline
-\ \ product\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a{\isasymColon}product\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymodot}{\isachardoublequoteclose}\ {\isadigit{7}}{\isadigit{0}}{\isacharparenright}%
-\begin{isamarkuptext}%
-Here class \isa{product} is defined as subclass of \isa{type}
- without any additional axioms. This effects in logical equivalence
- of \isa{product} and \isa{type}, as is reflected by the trivial
- introduction rule generated for this definition.
-
- \medskip So what is the difference of declaring \isa{{\isasymodot}\ {\isasymColon}\ {\isacharprime}a{\isasymColon}product\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a} vs.\ declaring \isa{{\isasymodot}\ {\isasymColon}\ {\isacharprime}a{\isasymColon}type\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a} anyway? In this particular case where \isa{product\ {\isasymequiv}\ type}, it should be obvious that both declarations are the same
- from the logic's point of view. It even makes the most sense to
- remove sort constraints from constant declarations, as far as the
- purely logical meaning is concerned \cite{Wenzel:1997:TPHOL}.
-
- On the other hand there are syntactic differences, of course.
- Constants \isa{{\isasymodot}} on some type \isa{{\isasymtau}} are rejected by the
- type-checker, unless the arity \isa{{\isasymtau}\ {\isasymColon}\ product} is part of the
- type signature. In our example, this arity may be always added when
- required by means of an \isakeyword{instance} with the default proof
- (double-dot).
-
- \medskip Thus, we may observe the following discipline of using
- syntactic classes. Overloaded polymorphic constants have their type
- arguments restricted to an associated (logically trivial) class
- \isa{c}. Only immediately before \emph{specifying} these
- constants on a certain type \isa{{\isasymtau}} do we instantiate \isa{{\isasymtau}\ {\isasymColon}\ c}.
-
- This is done for class \isa{product} and type \isa{bool} as
- follows.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{instance}\isamarkupfalse%
-\ bool\ {\isacharcolon}{\isacharcolon}\ product%
-\isadelimproof
-\ %
-\endisadelimproof
-%
-\isatagproof
-\isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-\isanewline
-\isacommand{defs}\isamarkupfalse%
-\ {\isacharparenleft}\isakeyword{overloaded}{\isacharparenright}\isanewline
-\ \ product{\isacharunderscore}bool{\isacharunderscore}def{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymodot}\ y\ {\isasymequiv}\ x\ {\isasymand}\ y{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-The definition \isa{prod{\isacharunderscore}bool{\isacharunderscore}def} becomes syntactically
- well-formed only after the arity \isa{bool\ {\isasymColon}\ product} is made
- known to the type checker.
-
- \medskip It is very important to see that above \isakeyword{defs} are
- not directly connected with \isakeyword{instance} at all! We were
- just following our convention to specify \isa{{\isasymodot}} on \isa{bool}
- after having instantiated \isa{bool\ {\isasymColon}\ product}. Isabelle does
- not require these definitions, which is in contrast to programming
- languages like Haskell \cite{haskell-report}.
-
- \medskip While Isabelle type classes and those of Haskell are almost
- the same as far as type-checking and type inference are concerned,
- there are important semantic differences. Haskell classes require
- their instances to \emph{provide operations} of certain \emph{names}.
- Therefore, its \texttt{instance} has a \texttt{where} part that tells
- the system what these ``member functions'' should be.
-
- This style of \texttt{instance} would not make much sense in
- Isabelle's meta-logic, because there is no internal notion of
- ``providing operations'' or even ``names of functions''.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/AxClass/Group/document/Semigroups.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{Semigroups}%
-%
-\isamarkupheader{Semigroups%
-}
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ Semigroups\ \isakeyword{imports}\ Main\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\begin{isamarkuptext}%
-\medskip\noindent An axiomatic type class is simply a class of types
- that all meet certain properties, which are also called \emph{class
- axioms}. Thus, type classes may be also understood as type
- predicates --- i.e.\ abstractions over a single type argument \isa{{\isacharprime}a}. Class axioms typically contain polymorphic constants that
- depend on this type \isa{{\isacharprime}a}. These \emph{characteristic
- constants} behave like operations associated with the ``carrier''
- type \isa{{\isacharprime}a}.
-
- We illustrate these basic concepts by the following formulation of
- semigroups.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{consts}\isamarkupfalse%
-\isanewline
-\ \ times\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymodot}{\isachardoublequoteclose}\ {\isadigit{7}}{\isadigit{0}}{\isacharparenright}\isanewline
-\isacommand{axclass}\isamarkupfalse%
-\ semigroup\ {\isasymsubseteq}\ type\isanewline
-\ \ assoc{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymodot}\ y{\isacharparenright}\ {\isasymodot}\ z\ {\isacharequal}\ x\ {\isasymodot}\ {\isacharparenleft}y\ {\isasymodot}\ z{\isacharparenright}{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-\noindent Above we have first declared a polymorphic constant \isa{{\isasymodot}\ {\isasymColon}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a} and then defined the class \isa{semigroup} of
- all types \isa{{\isasymtau}} such that \isa{{\isasymodot}\ {\isasymColon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymtau}} is indeed an
- associative operator. The \isa{assoc} axiom contains exactly one
- type variable, which is invisible in the above presentation, though.
- Also note that free term variables (like \isa{x}, \isa{y},
- \isa{z}) are allowed for user convenience --- conceptually all of
- these are bound by outermost universal quantifiers.
-
- \medskip In general, type classes may be used to describe
- \emph{structures} with exactly one carrier \isa{{\isacharprime}a} and a fixed
- \emph{signature}. Different signatures require different classes.
- Below, class \isa{plus{\isacharunderscore}semigroup} represents semigroups \isa{{\isacharparenleft}{\isasymtau}{\isacharcomma}\ {\isasymoplus}\isactrlsup {\isasymtau}{\isacharparenright}}, while the original \isa{semigroup} would
- correspond to semigroups of the form \isa{{\isacharparenleft}{\isasymtau}{\isacharcomma}\ {\isasymodot}\isactrlsup {\isasymtau}{\isacharparenright}}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{consts}\isamarkupfalse%
-\isanewline
-\ \ plus\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isasymoplus}{\isachardoublequoteclose}\ {\isadigit{7}}{\isadigit{0}}{\isacharparenright}\isanewline
-\isacommand{axclass}\isamarkupfalse%
-\ plus{\isacharunderscore}semigroup\ {\isasymsubseteq}\ type\isanewline
-\ \ assoc{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymoplus}\ y{\isacharparenright}\ {\isasymoplus}\ z\ {\isacharequal}\ x\ {\isasymoplus}\ {\isacharparenleft}y\ {\isasymoplus}\ z{\isacharparenright}{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-\noindent Even if classes \isa{plus{\isacharunderscore}semigroup} and \isa{semigroup} both represent semigroups in a sense, they are certainly
- not quite the same.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/AxClass/IsaMakefile Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-
-## targets
-
-default: Group Nat
-images:
-test: Group Nat
-
-all: images test
-
-
-## global settings
-
-SRC = $(ISABELLE_HOME)/src
-OUT = $(ISABELLE_OUTPUT)
-LOG = $(OUT)/log
-USEDIR = $(ISABELLE_TOOL) usedir -d false -D document
-
-
-## Group
-
-Group: HOL $(LOG)/HOL-Group.gz
-
-HOL:
- @cd $(SRC)/HOL; $(ISABELLE_TOOL) make HOL
-
-$(LOG)/HOL-Group.gz: $(OUT)/HOL Group/ROOT.ML Group/Group.thy \
- Group/Product.thy Group/Semigroups.thy
- @$(USEDIR) $(OUT)/HOL Group
- @rm -f Group/document/pdfsetup.sty Group/document/session.tex
-
-
-## Nat
-
-Nat: FOL $(LOG)/FOL-Nat.gz
-
-FOL:
- @cd $(SRC)/FOL; $(ISABELLE_TOOL) make FOL
-
-$(LOG)/FOL-Nat.gz: $(OUT)/FOL Nat/ROOT.ML Nat/NatClass.thy
- @$(USEDIR) $(OUT)/FOL Nat
- @rm -f Nat/document/*.sty Nat/document/session.tex
-
-
-## clean
-
-clean:
- @rm -f $(LOG)/HOL-Group.gz $(LOG)/FOL-Nat.gz
--- a/doc-src/AxClass/Makefile Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-#
-# $Id$
-#
-
-## targets
-
-default: dvi
-
-
-## dependencies
-
-include ../Makefile.in
-
-NAME = axclass
-
-FILES = axclass.tex body.tex ../iman.sty ../extra.sty ../isar.sty \
- ../isabelle.sty ../isabellesym.sty ../pdfsetup.sty \
- Group/document/Group.tex Nat/document/NatClass.tex \
- Group/document/Product.tex Group/document/Semigroups.tex
-
-dvi: $(NAME).dvi
-
-$(NAME).dvi: $(FILES) isabelle_isar.eps
- $(LATEX) $(NAME)
- $(BIBTEX) $(NAME)
- $(LATEX) $(NAME)
- $(LATEX) $(NAME)
-
-pdf: $(NAME).pdf
-
-$(NAME).pdf: $(FILES) isabelle_isar.pdf
- $(PDFLATEX) $(NAME)
- $(FIXBOOKMARKS) $(NAME).out
- $(BIBTEX) $(NAME)
- $(PDFLATEX) $(NAME)
- $(PDFLATEX) $(NAME)
--- a/doc-src/AxClass/Nat/NatClass.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,117 +0,0 @@
-
-header {* Defining natural numbers in FOL \label{sec:ex-natclass} *}
-
-theory NatClass imports FOL begin
-
-text {*
- \medskip\noindent Axiomatic type classes abstract over exactly one
- type argument. Thus, any \emph{axiomatic} theory extension where each
- axiom refers to at most one type variable, may be trivially turned
- into a \emph{definitional} one.
-
- We illustrate this with the natural numbers in
- Isabelle/FOL.\footnote{See also
- \url{http://isabelle.in.tum.de/library/FOL/ex/NatClass.html}}
-*}
-
-consts
- zero :: 'a ("\<zero>")
- Suc :: "'a \<Rightarrow> 'a"
- rec :: "'a \<Rightarrow> 'a \<Rightarrow> ('a \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a"
-
-axclass nat \<subseteq> "term"
- induct: "P(\<zero>) \<Longrightarrow> (\<And>x. P(x) \<Longrightarrow> P(Suc(x))) \<Longrightarrow> P(n)"
- Suc_inject: "Suc(m) = Suc(n) \<Longrightarrow> m = n"
- Suc_neq_0: "Suc(m) = \<zero> \<Longrightarrow> R"
- rec_0: "rec(\<zero>, a, f) = a"
- rec_Suc: "rec(Suc(m), a, f) = f(m, rec(m, a, f))"
-
-constdefs
- add :: "'a::nat \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "+" 60)
- "m + n \<equiv> rec(m, n, \<lambda>x y. Suc(y))"
-
-text {*
- This is an abstract version of the plain @{text Nat} theory in
- FOL.\footnote{See
- \url{http://isabelle.in.tum.de/library/FOL/ex/Nat.html}} Basically,
- we have just replaced all occurrences of type @{text nat} by @{typ
- 'a} and used the natural number axioms to define class @{text nat}.
- There is only a minor snag, that the original recursion operator
- @{term rec} had to be made monomorphic.
-
- Thus class @{text nat} contains exactly those types @{text \<tau>} that
- are isomorphic to ``the'' natural numbers (with signature @{term
- \<zero>}, @{term Suc}, @{term rec}).
-
- \medskip What we have done here can be also viewed as \emph{type
- specification}. Of course, it still remains open if there is some
- type at all that meets the class axioms. Now a very nice property of
- axiomatic type classes is that abstract reasoning is always possible
- --- independent of satisfiability. The meta-logic won't break, even
- if some classes (or general sorts) turns out to be empty later ---
- ``inconsistent'' class definitions may be useless, but do not cause
- any harm.
-
- Theorems of the abstract natural numbers may be derived in the same
- way as for the concrete version. The original proof scripts may be
- re-used with some trivial changes only (mostly adding some type
- constraints).
-*}
-
-(*<*)
-lemma Suc_n_not_n: "Suc(k) ~= (k::'a::nat)"
-apply (rule_tac n = k in induct)
-apply (rule notI)
-apply (erule Suc_neq_0)
-apply (rule notI)
-apply (erule notE)
-apply (erule Suc_inject)
-done
-
-lemma "(k+m)+n = k+(m+n)"
-apply (rule induct)
-back
-back
-back
-back
-back
-back
-oops
-
-lemma add_0 [simp]: "\<zero>+n = n"
-apply (unfold add_def)
-apply (rule rec_0)
-done
-
-lemma add_Suc [simp]: "Suc(m)+n = Suc(m+n)"
-apply (unfold add_def)
-apply (rule rec_Suc)
-done
-
-lemma add_assoc: "(k+m)+n = k+(m+n)"
-apply (rule_tac n = k in induct)
-apply simp
-apply simp
-done
-
-lemma add_0_right: "m+\<zero> = m"
-apply (rule_tac n = m in induct)
-apply simp
-apply simp
-done
-
-lemma add_Suc_right: "m+Suc(n) = Suc(m+n)"
-apply (rule_tac n = m in induct)
-apply simp_all
-done
-
-lemma
- assumes prem: "!!n. f(Suc(n)) = Suc(f(n))"
- shows "f(i+j) = i+f(j)"
-apply (rule_tac n = i in induct)
-apply simp
-apply (simp add: prem)
-done
-(*>*)
-
-end
\ No newline at end of file
--- a/doc-src/AxClass/Nat/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-
-use_thy "NatClass";
--- a/doc-src/AxClass/Nat/document/NatClass.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,201 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{NatClass}%
-%
-\isamarkupheader{Defining natural numbers in FOL \label{sec:ex-natclass}%
-}
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ NatClass\ \isakeyword{imports}\ FOL\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\begin{isamarkuptext}%
-\medskip\noindent Axiomatic type classes abstract over exactly one
- type argument. Thus, any \emph{axiomatic} theory extension where each
- axiom refers to at most one type variable, may be trivially turned
- into a \emph{definitional} one.
-
- We illustrate this with the natural numbers in
- Isabelle/FOL.\footnote{See also
- \url{http://isabelle.in.tum.de/library/FOL/ex/NatClass.html}}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-\isacommand{consts}\isamarkupfalse%
-\isanewline
-\ \ zero\ {\isacharcolon}{\isacharcolon}\ {\isacharprime}a\ \ \ \ {\isacharparenleft}{\isachardoublequoteopen}{\isasymzero}{\isachardoublequoteclose}{\isacharparenright}\isanewline
-\ \ Suc\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\isanewline
-\ \ rec\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharparenleft}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isacharparenright}\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\isanewline
-\isanewline
-\isacommand{axclass}\isamarkupfalse%
-\ nat\ {\isasymsubseteq}\ {\isachardoublequoteopen}term{\isachardoublequoteclose}\isanewline
-\ \ induct{\isacharcolon}\ {\isachardoublequoteopen}P{\isacharparenleft}{\isasymzero}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ P{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ P{\isacharparenleft}Suc{\isacharparenleft}x{\isacharparenright}{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ P{\isacharparenleft}n{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\ \ Suc{\isacharunderscore}inject{\isacharcolon}\ {\isachardoublequoteopen}Suc{\isacharparenleft}m{\isacharparenright}\ {\isacharequal}\ Suc{\isacharparenleft}n{\isacharparenright}\ {\isasymLongrightarrow}\ m\ {\isacharequal}\ n{\isachardoublequoteclose}\isanewline
-\ \ Suc{\isacharunderscore}neq{\isacharunderscore}{\isadigit{0}}{\isacharcolon}\ {\isachardoublequoteopen}Suc{\isacharparenleft}m{\isacharparenright}\ {\isacharequal}\ {\isasymzero}\ {\isasymLongrightarrow}\ R{\isachardoublequoteclose}\isanewline
-\ \ rec{\isacharunderscore}{\isadigit{0}}{\isacharcolon}\ {\isachardoublequoteopen}rec{\isacharparenleft}{\isasymzero}{\isacharcomma}\ a{\isacharcomma}\ f{\isacharparenright}\ {\isacharequal}\ a{\isachardoublequoteclose}\isanewline
-\ \ rec{\isacharunderscore}Suc{\isacharcolon}\ {\isachardoublequoteopen}rec{\isacharparenleft}Suc{\isacharparenleft}m{\isacharparenright}{\isacharcomma}\ a{\isacharcomma}\ f{\isacharparenright}\ {\isacharequal}\ f{\isacharparenleft}m{\isacharcomma}\ rec{\isacharparenleft}m{\isacharcomma}\ a{\isacharcomma}\ f{\isacharparenright}{\isacharparenright}{\isachardoublequoteclose}\isanewline
-\isanewline
-\isacommand{constdefs}\isamarkupfalse%
-\isanewline
-\ \ add\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharprime}a{\isacharcolon}{\isacharcolon}nat\ {\isasymRightarrow}\ {\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}\isakeyword{infixl}\ {\isachardoublequoteopen}{\isacharplus}{\isachardoublequoteclose}\ {\isadigit{6}}{\isadigit{0}}{\isacharparenright}\isanewline
-\ \ {\isachardoublequoteopen}m\ {\isacharplus}\ n\ {\isasymequiv}\ rec{\isacharparenleft}m{\isacharcomma}\ n{\isacharcomma}\ {\isasymlambda}x\ y{\isachardot}\ Suc{\isacharparenleft}y{\isacharparenright}{\isacharparenright}{\isachardoublequoteclose}%
-\begin{isamarkuptext}%
-This is an abstract version of the plain \isa{Nat} theory in
- FOL.\footnote{See
- \url{http://isabelle.in.tum.de/library/FOL/ex/Nat.html}} Basically,
- we have just replaced all occurrences of type \isa{nat} by \isa{{\isacharprime}a} and used the natural number axioms to define class \isa{nat}.
- There is only a minor snag, that the original recursion operator
- \isa{rec} had to be made monomorphic.
-
- Thus class \isa{nat} contains exactly those types \isa{{\isasymtau}} that
- are isomorphic to ``the'' natural numbers (with signature \isa{{\isasymzero}}, \isa{Suc}, \isa{rec}).
-
- \medskip What we have done here can be also viewed as \emph{type
- specification}. Of course, it still remains open if there is some
- type at all that meets the class axioms. Now a very nice property of
- axiomatic type classes is that abstract reasoning is always possible
- --- independent of satisfiability. The meta-logic won't break, even
- if some classes (or general sorts) turns out to be empty later ---
- ``inconsistent'' class definitions may be useless, but do not cause
- any harm.
-
- Theorems of the abstract natural numbers may be derived in the same
- way as for the concrete version. The original proof scripts may be
- re-used with some trivial changes only (mostly adding some type
- constraints).%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isadelimproof
-%
-\endisadelimproof
-%
-\isatagproof
-%
-\endisatagproof
-{\isafoldproof}%
-%
-\isadelimproof
-\isanewline
-%
-\endisadelimproof
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/AxClass/axclass.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-
-\documentclass[12pt,a4paper,fleqn]{report}
-\usepackage{graphicx,../iman,../extra,../isar}
-\usepackage{../isabelle,../isabellesym}
-\usepackage{../pdfsetup} % last one!
-
-\isabellestyle{it}
-\newcommand{\isasyminv}{\isamath{{}^{-1}}}
-\renewcommand{\isasymzero}{\isamath{0}}
-\renewcommand{\isasymone}{\isamath{1}}
-
-\newcommand{\secref}[1]{\S\ref{#1}}
-\newcommand{\figref}[1]{figure~\ref{#1}}
-
-\hyphenation{Isabelle}
-\hyphenation{Isar}
-\hyphenation{Haskell}
-
-\title{\includegraphics[scale=0.5]{isabelle_isar}
- \\[4ex] Using Axiomatic Type Classes in Isabelle}
-\author{\emph{Markus Wenzel} \\ TU M\"unchen}
-
-
-\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
-
-\pagestyle{headings}
-\sloppy
-\binperiod %%%treat . like a binary operator
-
-
-\begin{document}
-
-\underscoreoff
-
-\maketitle
-
-\begin{abstract}
- Isabelle offers order-sorted type classes on top of the simple types of
- plain Higher-Order Logic. The resulting type system is similar to that of
- the programming language Haskell. Its interpretation within the logic
- enables further application, though, apart from restricting polymorphism
- syntactically. In particular, the concept of \emph{Axiomatic Type Classes}
- provides a useful light-weight mechanism for hierarchically-structured
- abstract theories. Subsequently, we demonstrate typical uses of Isabelle's
- axiomatic type classes to model basic algebraic structures.
-
- This document describes axiomatic type classes using Isabelle/Isar theories,
- with proofs expressed via Isar proof language elements. The new theory
- format greatly simplifies the arrangement of the overall development, since
- definitions and proofs may be freely intermixed. Users who prefer tactic
- scripts over structured proofs do not need to fall back on separate ML
- scripts, though, but may refer to Isar's tactic emulation commands.
-\end{abstract}
-
-
-\pagenumbering{roman} \tableofcontents \clearfirst
-
-\include{body}
-
-%FIXME
-\nocite{nipkow-types93}
-\nocite{nipkow-sorts93}
-\nocite{Wenzel:1997:TPHOL}
-\nocite{paulson-isa-book}
-\nocite{isabelle-isar-ref}
-\nocite{Wenzel:1999:TPHOL}
-
-\begingroup
- \bibliographystyle{plain} \small\raggedright\frenchspacing
- \bibliography{../manual}
-\endgroup
-
-\end{document}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: t
-%%% End:
-% LocalWords: Isabelle FIXME
--- a/doc-src/AxClass/body.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,166 +0,0 @@
-
-\chapter{Introduction}
-
-A Haskell-style type-system \cite{haskell-report} with ordered type-classes
-has been present in Isabelle since 1991 already \cite{nipkow-sorts93}.
-Initially, classes have mainly served as a \emph{purely syntactic} tool to
-formulate polymorphic object-logics in a clean way, such as the standard
-Isabelle formulation of many-sorted FOL \cite{paulson-isa-book}.
-
-Applying classes at the \emph{logical level} to provide a simple notion of
-abstract theories and instantiations to concrete ones, has been long proposed
-as well \cite{nipkow-types93,nipkow-sorts93}. At that time, Isabelle still
-lacked built-in support for these \emph{axiomatic type classes}. More
-importantly, their semantics was not yet fully fleshed out (and unnecessarily
-complicated, too).
-
-Since Isabelle94, actual axiomatic type classes have been an integral part of
-Isabelle's meta-logic. This very simple implementation is based on a
-straight-forward extension of traditional simply-typed Higher-Order Logic, by
-including types qualified by logical predicates and overloaded constant
-definitions (see \cite{Wenzel:1997:TPHOL} for further details).
-
-Yet even until Isabelle99, there used to be still a fundamental methodological
-problem in using axiomatic type classes conveniently, due to the traditional
-distinction of Isabelle theory files vs.\ ML proof scripts. This has been
-finally overcome with the advent of Isabelle/Isar theories
-\cite{isabelle-isar-ref}: now definitions and proofs may be freely intermixed.
-This nicely accommodates the usual procedure of defining axiomatic type
-classes, proving abstract properties, defining operations on concrete types,
-proving concrete properties for instantiation of classes etc.
-
-\medskip
-
-So to cut a long story short, the present version of axiomatic type classes
-now provides an even more useful and convenient mechanism for light-weight
-abstract theories, without any special technical provisions to be observed by
-the user.
-
-
-\chapter{Examples}\label{sec:ex}
-
-Axiomatic type classes are a concept of Isabelle's meta-logic
-\cite{paulson-isa-book,Wenzel:1997:TPHOL}. They may be applied to any
-object-logic that directly uses the meta type system, such as Isabelle/HOL
-\cite{isabelle-HOL}. Subsequently, we present various examples that are all
-formulated within HOL, except the one of \secref{sec:ex-natclass} which is in
-FOL. See also \url{http://isabelle.in.tum.de/library/HOL/AxClasses/} and
-\url{http://isabelle.in.tum.de/library/FOL/ex/NatClass.html}.
-
-\input{Group/document/Semigroups}
-
-\input{Group/document/Group}
-
-\input{Group/document/Product}
-
-\input{Nat/document/NatClass}
-
-
-%% FIXME move some parts to ref or isar-ref manual (!?);
-
-% \chapter{The user interface of Isabelle's axclass package}
-
-% The actual axiomatic type class package of Isabelle/Pure mainly consists
-% of two new theory sections: \texttt{axclass} and \texttt{instance}. Some
-% typical applications of these have already been demonstrated in
-% \secref{sec:ex}, below their syntax and semantics are presented more
-% completely.
-
-
-% \section{The axclass section}
-
-% Within theory files, \texttt{axclass} introduces an axiomatic type class
-% definition. Its concrete syntax is:
-
-% \begin{matharray}{l}
-% \texttt{axclass} \\
-% \ \ c \texttt{ < } c@1\texttt, \ldots\texttt, c@n \\
-% \ \ id@1\ axm@1 \\
-% \ \ \vdots \\
-% \ \ id@m\ axm@m
-% \emphnd{matharray}
-
-% Where $c, c@1, \ldots, c@n$ are classes (category $id$ or
-% $string$) and $axm@1, \ldots, axm@m$ (with $m \geq
-% 0$) are formulas (category $string$).
-
-% Class $c$ has to be new, and sort $\{c@1, \ldots, c@n\}$ a subsort of
-% \texttt{logic}. Each class axiom $axm@j$ may contain any term
-% variables, but at most one type variable (which need not be the same
-% for all axioms). The sort of this type variable has to be a supersort
-% of $\{c@1, \ldots, c@n\}$.
-
-% \medskip
-
-% The \texttt{axclass} section declares $c$ as subclass of $c@1, \ldots,
-% c@n$ to the type signature.
-
-% Furthermore, $axm@1, \ldots, axm@m$ are turned into the
-% ``abstract axioms'' of $c$ with names $id@1, \ldots,
-% id@m$. This is done by replacing all occurring type variables
-% by $\alpha :: c$. Original axioms that do not contain any type
-% variable will be prefixed by the logical precondition
-% $\texttt{OFCLASS}(\alpha :: \texttt{logic}, c\texttt{_class})$.
-
-% Another axiom of name $c\texttt{I}$ --- the ``class $c$ introduction
-% rule'' --- is built from the respective universal closures of
-% $axm@1, \ldots, axm@m$ appropriately.
-
-
-% \section{The instance section}
-
-% Section \texttt{instance} proves class inclusions or type arities at the
-% logical level and then transfers these into the type signature.
-
-% Its concrete syntax is:
-
-% \begin{matharray}{l}
-% \texttt{instance} \\
-% \ \ [\ c@1 \texttt{ < } c@2 \ |\
-% t \texttt{ ::\ (}sort@1\texttt, \ldots \texttt, sort@n\texttt) sort\ ] \\
-% \ \ [\ \texttt(name@1 \texttt, \ldots\texttt, name@m\texttt)\ ] \\
-% \ \ [\ \texttt{\{|} text \texttt{|\}}\ ]
-% \emphnd{matharray}
-
-% Where $c@1, c@2$ are classes and $t$ is an $n$-place type constructor
-% (all of category $id$ or $string)$. Furthermore,
-% $sort@i$ are sorts in the usual Isabelle-syntax.
-
-% \medskip
-
-% Internally, \texttt{instance} first sets up an appropriate goal that
-% expresses the class inclusion or type arity as a meta-proposition.
-% Then tactic \texttt{AxClass.axclass_tac} is applied with all preceding
-% meta-definitions of the current theory file and the user-supplied
-% witnesses. The latter are $name@1, \ldots, name@m$, where
-% $id$ refers to an \ML-name of a theorem, and $string$ to an
-% axiom of the current theory node\footnote{Thus, the user may reference
-% axioms from above this \texttt{instance} in the theory file. Note
-% that new axioms appear at the \ML-toplevel only after the file is
-% processed completely.}.
-
-% Tactic \texttt{AxClass.axclass_tac} first unfolds the class definition by
-% resolving with rule $c\texttt\texttt{I}$, and then applies the witnesses
-% according to their form: Meta-definitions are unfolded, all other
-% formulas are repeatedly resolved\footnote{This is done in a way that
-% enables proper object-\emph{rules} to be used as witnesses for
-% corresponding class axioms.} with.
-
-% The final optional argument $text$ is \ML-code of an arbitrary
-% user tactic which is applied last to any remaining goals.
-
-% \medskip
-
-% Because of the complexity of \texttt{instance}'s witnessing mechanisms,
-% new users of the axclass package are advised to only use the simple
-% form $\texttt{instance}\ \ldots\ (id@1, \ldots, id@!m)$, where
-% the identifiers refer to theorems that are appropriate type instances
-% of the class axioms. This typically requires an auxiliary theory,
-% though, which defines some constants and then proves these witnesses.
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "axclass"
-%%% End:
-% LocalWords: Isabelle FOL
--- a/doc-src/Dirs Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/Dirs Fri Feb 27 18:50:35 2009 +0100
@@ -1,1 +1,1 @@
-Ref System Logics HOL ZF Inductive TutorialI IsarOverview IsarRef IsarImplementation Locales LaTeXsugar IsarAdvanced/Classes IsarAdvanced/Codegen IsarAdvanced/Functions
+Intro Ref System Logics HOL ZF Inductive TutorialI IsarOverview IsarRef IsarImplementation Locales LaTeXsugar IsarAdvanced/Classes IsarAdvanced/Codegen IsarAdvanced/Functions
--- a/doc-src/Intro/intro.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/Intro/intro.tex Fri Feb 27 18:50:35 2009 +0100
@@ -7,7 +7,7 @@
%prth *(\(.*\)); \1;
%{\\out \(.*\)} {\\out val it = "\1" : thm}
-\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Introduction to Isabelle}
+\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Introduction to Isabelle}
\author{{\em Lawrence C. Paulson}\\
Computer Laboratory \\ University of Cambridge \\
\texttt{lcp@cl.cam.ac.uk}\\[3ex]
--- a/doc-src/IsarAdvanced/Classes/Thy/Classes.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Classes/Thy/Classes.thy Fri Feb 27 18:50:35 2009 +0100
@@ -537,7 +537,7 @@
\end{picture}
\caption{Subclass relationship of monoids and groups:
before and after establishing the relationship
- @{text "group \<subseteq> monoid"}; transitive edges left out.}
+ @{text "group \<subseteq> monoid"}; transitive edges are left out.}
\label{fig:subclass}
\end{center}
\end{figure}
--- a/doc-src/IsarAdvanced/Classes/Thy/document/Classes.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Classes/Thy/document/Classes.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1153,7 +1153,7 @@
\hspace*{0pt}module Example where {\char123}\\
\hspace*{0pt}\\
\hspace*{0pt}\\
-\hspace*{0pt}data Nat = Suc Nat | Zero{\char95}nat;\\
+\hspace*{0pt}data Nat = Zero{\char95}nat | Suc Nat;\\
\hspace*{0pt}\\
\hspace*{0pt}nat{\char95}aux ::~Integer -> Nat -> Nat;\\
\hspace*{0pt}nat{\char95}aux i n = (if i <= 0 then n else nat{\char95}aux (i - 1) (Suc n));\\
@@ -1240,7 +1240,7 @@
\hspace*{0pt}structure Example = \\
\hspace*{0pt}struct\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype nat = Suc of nat | Zero{\char95}nat;\\
+\hspace*{0pt}datatype nat = Zero{\char95}nat | Suc of nat;\\
\hspace*{0pt}\\
\hspace*{0pt}fun nat{\char95}aux i n =\\
\hspace*{0pt} ~(if IntInf.<= (i,~(0 :~IntInf.int)) then n\\
--- a/doc-src/IsarAdvanced/Codegen/Thy/Setup.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Codegen/Thy/Setup.thy Fri Feb 27 18:50:35 2009 +0100
@@ -5,7 +5,7 @@
ML {* no_document use_thys
["Efficient_Nat", "Code_Char_chr", "Product_ord", "~~/src/HOL/Imperative_HOL/Imperative_HOL",
- "~~/src/HOL/Reflection/Ferrack"] *}
+ "~~/src/HOL/Decision_Procs/Ferrack"] *}
ML_val {* Code_Target.code_width := 74 *}
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/Adaption.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/Adaption.tex Fri Feb 27 18:50:35 2009 +0100
@@ -267,9 +267,9 @@
\hspace*{0pt}structure Example = \\
\hspace*{0pt}struct\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype nat = Suc of nat | Zero{\char95}nat;\\
+\hspace*{0pt}datatype nat = Zero{\char95}nat | Suc of nat;\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype boola = False | True;\\
+\hspace*{0pt}datatype boola = True | False;\\
\hspace*{0pt}\\
\hspace*{0pt}fun anda x True = x\\
\hspace*{0pt} ~| anda x False = False\\
@@ -350,7 +350,7 @@
\hspace*{0pt}structure Example = \\
\hspace*{0pt}struct\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype nat = Suc of nat | Zero{\char95}nat;\\
+\hspace*{0pt}datatype nat = Zero{\char95}nat | Suc of nat;\\
\hspace*{0pt}\\
\hspace*{0pt}fun less{\char95}nat m (Suc n) = less{\char95}eq{\char95}nat m n\\
\hspace*{0pt} ~| less{\char95}nat n Zero{\char95}nat = false\\
@@ -407,7 +407,7 @@
\hspace*{0pt}structure Example = \\
\hspace*{0pt}struct\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype nat = Suc of nat | Zero{\char95}nat;\\
+\hspace*{0pt}datatype nat = Zero{\char95}nat | Suc of nat;\\
\hspace*{0pt}\\
\hspace*{0pt}fun less{\char95}nat m (Suc n) = less{\char95}eq{\char95}nat m n\\
\hspace*{0pt} ~| less{\char95}nat n Zero{\char95}nat = false\\
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/ML.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/ML.tex Fri Feb 27 18:50:35 2009 +0100
@@ -52,18 +52,18 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{Code.add\_eqn}\verb|Code.add_eqn: thm -> theory -> theory| \\
- \indexml{Code.del\_eqn}\verb|Code.del_eqn: thm -> theory -> theory| \\
- \indexml{Code.add\_eqnl}\verb|Code.add_eqnl: string * (thm * bool) list lazy -> theory -> theory| \\
- \indexml{Code.map\_pre}\verb|Code.map_pre: (simpset -> simpset) -> theory -> theory| \\
- \indexml{Code.map\_post}\verb|Code.map_post: (simpset -> simpset) -> theory -> theory| \\
- \indexml{Code.add\_functrans}\verb|Code.add_functrans: string * (theory -> (thm * bool) list -> (thm * bool) list option)|\isasep\isanewline%
+ \indexdef{}{ML}{Code.add\_eqn}\verb|Code.add_eqn: thm -> theory -> theory| \\
+ \indexdef{}{ML}{Code.del\_eqn}\verb|Code.del_eqn: thm -> theory -> theory| \\
+ \indexdef{}{ML}{Code.add\_eqnl}\verb|Code.add_eqnl: string * (thm * bool) list lazy -> theory -> theory| \\
+ \indexdef{}{ML}{Code.map\_pre}\verb|Code.map_pre: (simpset -> simpset) -> theory -> theory| \\
+ \indexdef{}{ML}{Code.map\_post}\verb|Code.map_post: (simpset -> simpset) -> theory -> theory| \\
+ \indexdef{}{ML}{Code.add\_functrans}\verb|Code.add_functrans: string * (theory -> (thm * bool) list -> (thm * bool) list option)|\isasep\isanewline%
\verb| -> theory -> theory| \\
- \indexml{Code.del\_functrans}\verb|Code.del_functrans: string -> theory -> theory| \\
- \indexml{Code.add\_datatype}\verb|Code.add_datatype: (string * typ) list -> theory -> theory| \\
- \indexml{Code.get\_datatype}\verb|Code.get_datatype: theory -> string|\isasep\isanewline%
+ \indexdef{}{ML}{Code.del\_functrans}\verb|Code.del_functrans: string -> theory -> theory| \\
+ \indexdef{}{ML}{Code.add\_datatype}\verb|Code.add_datatype: (string * typ) list -> theory -> theory| \\
+ \indexdef{}{ML}{Code.get\_datatype}\verb|Code.get_datatype: theory -> string|\isasep\isanewline%
\verb| -> (string * sort) list * (string * typ list) list| \\
- \indexml{Code.get\_datatype\_of\_constr}\verb|Code.get_datatype_of_constr: theory -> string -> string option|
+ \indexdef{}{ML}{Code.get\_datatype\_of\_constr}\verb|Code.get_datatype_of_constr: theory -> string -> string option|
\end{mldecls}
\begin{description}
@@ -124,9 +124,9 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{Code\_Unit.read\_const}\verb|Code_Unit.read_const: theory -> string -> string| \\
- \indexml{Code\_Unit.head\_eqn}\verb|Code_Unit.head_eqn: theory -> thm -> string * ((string * sort) list * typ)| \\
- \indexml{Code\_Unit.rewrite\_eqn}\verb|Code_Unit.rewrite_eqn: simpset -> thm -> thm| \\
+ \indexdef{}{ML}{Code\_Unit.read\_const}\verb|Code_Unit.read_const: theory -> string -> string| \\
+ \indexdef{}{ML}{Code\_Unit.head\_eqn}\verb|Code_Unit.head_eqn: theory -> thm -> string * ((string * sort) list * typ)| \\
+ \indexdef{}{ML}{Code\_Unit.rewrite\_eqn}\verb|Code_Unit.rewrite_eqn: simpset -> thm -> thm| \\
\end{mldecls}
\begin{description}
--- a/doc-src/IsarAdvanced/Codegen/Thy/document/Program.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Codegen/Thy/document/Program.tex Fri Feb 27 18:50:35 2009 +0100
@@ -276,7 +276,7 @@
\hspace*{0pt}module Example where {\char123}\\
\hspace*{0pt}\\
\hspace*{0pt}\\
-\hspace*{0pt}data Nat = Suc Nat | Zero{\char95}nat;\\
+\hspace*{0pt}data Nat = Zero{\char95}nat | Suc Nat;\\
\hspace*{0pt}\\
\hspace*{0pt}class Semigroup a where {\char123}\\
\hspace*{0pt} ~mult ::~a -> a -> a;\\
@@ -341,7 +341,7 @@
\hspace*{0pt}structure Example = \\
\hspace*{0pt}struct\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype nat = Suc of nat | Zero{\char95}nat;\\
+\hspace*{0pt}datatype nat = Zero{\char95}nat | Suc of nat;\\
\hspace*{0pt}\\
\hspace*{0pt}type 'a semigroup = {\char123}mult :~'a -> 'a -> 'a{\char125};\\
\hspace*{0pt}fun mult (A{\char95}:'a semigroup) = {\char35}mult A{\char95};\\
@@ -1032,7 +1032,7 @@
\hspace*{0pt}structure Example = \\
\hspace*{0pt}struct\\
\hspace*{0pt}\\
-\hspace*{0pt}datatype nat = Suc of nat | Zero{\char95}nat;\\
+\hspace*{0pt}datatype nat = Zero{\char95}nat | Suc of nat;\\
\hspace*{0pt}\\
\hspace*{0pt}fun null [] = true\\
\hspace*{0pt} ~| null (x ::~xs) = false;\\
--- a/doc-src/IsarAdvanced/Codegen/style.sty Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Codegen/style.sty Fri Feb 27 18:50:35 2009 +0100
@@ -6,12 +6,6 @@
%% references
\newcommand{\secref}[1]{\S\ref{#1}}
-%% index
-\newcommand{\indexml}[1]{\index{\emph{#1}|bold}}
-\newcommand{\indexmltype}[1]{\index{\emph{#1} (type)|bold}}
-\newcommand{\indexmlstructure}[1]{\index{\emph{#1} (structure)|bold}}
-\newcommand{\indexmlfunctor}[1]{\index{\emph{#1} (functor)|bold}}
-
%% logical markup
\newcommand{\strong}[1]{{\bfseries {#1}}}
\newcommand{\qn}[1]{\emph{#1}}
--- a/doc-src/IsarAdvanced/Functions/Thy/document/Functions.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Functions/Thy/document/Functions.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1104,7 +1104,7 @@
%
\begin{isamarkuptext}%
\noindent Clearly, any attempt of a termination proof must fail. And without
- that, we do not get the usual rules \isa{findzero{\isachardot}simp} and
+ that, we do not get the usual rules \isa{findzero{\isachardot}simps} and
\isa{findzero{\isachardot}induct}. So what was the definition good for at all?%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -1480,7 +1480,7 @@
The predicate \isa{findzero{\isacharunderscore}dom} is the accessible part of
that relation. An argument belongs to the accessible part, if it can
- be reached in a finite number of steps (cf.~its definition in \isa{Accessible{\isacharunderscore}Part{\isachardot}thy}).
+ be reached in a finite number of steps (cf.~its definition in \isa{Wellfounded{\isachardot}thy}).
Since the domain predicate is just an abbreviation, you can use
lemmas for \isa{accp} and \isa{findzero{\isacharunderscore}rel} directly. Some
@@ -1823,7 +1823,7 @@
As usual, we have to give a wellfounded relation, such that the
arguments of the recursive calls get smaller. But what exactly are
the arguments of the recursive calls when mirror is given as an
- argument to map? Isabelle gives us the
+ argument to \isa{map}? Isabelle gives us the
subgoals
\begin{isabelle}%
@@ -1835,9 +1835,9 @@
applies the recursive call \isa{mirror} to elements
of \isa{l}, which is essential for the termination proof.
- This knowledge about map is encoded in so-called congruence rules,
+ This knowledge about \isa{map} is encoded in so-called congruence rules,
which are special theorems known to the \cmd{function} command. The
- rule for map is
+ rule for \isa{map} is
\begin{isabelle}%
{\isasymlbrakk}{\isacharquery}xs\ {\isacharequal}\ {\isacharquery}ys{\isacharsemicolon}\ {\isasymAnd}x{\isachardot}\ x\ {\isasymin}\ set\ {\isacharquery}ys\ {\isasymLongrightarrow}\ {\isacharquery}f\ x\ {\isacharequal}\ {\isacharquery}g\ x{\isasymrbrakk}\ {\isasymLongrightarrow}\ map\ {\isacharquery}f\ {\isacharquery}xs\ {\isacharequal}\ map\ {\isacharquery}g\ {\isacharquery}ys%
--- a/doc-src/IsarAdvanced/Functions/functions.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Functions/functions.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,3 @@
-
-%% $Id$
\documentclass[a4paper,fleqn]{article}
@@ -19,11 +17,8 @@
\newcommand{\isasymINCLUDES}{\cmd{includes}}
\newcommand{\isasymDATATYPE}{\cmd{datatype}}
\newcommand{\isasymAXCLASS}{\cmd{axclass}}
-\newcommand{\isasymFIXES}{\cmd{fixes}}
-\newcommand{\isasymASSUMES}{\cmd{assumes}}
\newcommand{\isasymDEFINES}{\cmd{defines}}
\newcommand{\isasymNOTES}{\cmd{notes}}
-\newcommand{\isasymSHOWS}{\cmd{shows}}
\newcommand{\isasymCLASS}{\cmd{class}}
\newcommand{\isasymINSTANCE}{\cmd{instance}}
\newcommand{\isasymLEMMA}{\cmd{lemma}}
--- a/doc-src/IsarAdvanced/Functions/style.sty Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarAdvanced/Functions/style.sty Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
%% toc
\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
@@ -10,19 +7,6 @@
\newcommand{\chref}[1]{chapter~\ref{#1}}
\newcommand{\figref}[1]{figure~\ref{#1}}
-%% glossary
-\renewcommand{\glossary}[2]{\nomenclature{\bf #1}{#2}}
-\newcommand{\seeglossary}[1]{\emph{#1}}
-\newcommand{\glossaryname}{Glossary}
-\renewcommand{\nomname}{\glossaryname}
-\renewcommand{\pagedeclaration}[1]{\nobreak\quad\dotfill~page~\bold{#1}}
-
-%% index
-\newcommand{\indexml}[1]{\index{\emph{#1}|bold}}
-\newcommand{\indexmltype}[1]{\index{\emph{#1} (type)|bold}}
-\newcommand{\indexmlstructure}[1]{\index{\emph{#1} (structure)|bold}}
-\newcommand{\indexmlfunctor}[1]{\index{\emph{#1} (functor)|bold}}
-
%% math
\newcommand{\text}[1]{\mbox{#1}}
\newcommand{\isasymvartheta}{\isamath{\theta}}
--- a/doc-src/IsarImplementation/IsaMakefile Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/IsaMakefile Fri Feb 27 18:50:35 2009 +0100
@@ -21,9 +21,10 @@
Thy: $(LOG)/Pure-Thy.gz
-$(LOG)/Pure-Thy.gz: Thy/ROOT.ML Thy/base.thy Thy/integration.thy Thy/isar.thy \
- Thy/locale.thy Thy/logic.thy Thy/prelim.thy Thy/proof.thy Thy/tactic.thy \
- Thy/ML.thy ../antiquote_setup.ML
+$(LOG)/Pure-Thy.gz: Thy/ROOT.ML Thy/Base.thy Thy/Integration.thy \
+ Thy/Isar.thy Thy/Local_Theory.thy Thy/Logic.thy Thy/Prelim.thy \
+ Thy/Proof.thy Thy/Syntax.thy Thy/Tactic.thy Thy/ML.thy \
+ ../antiquote_setup.ML
@$(USEDIR) Pure Thy
--- a/doc-src/IsarImplementation/Makefile Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/Makefile Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-#
-# $Id$
-#
## targets
@@ -11,16 +8,14 @@
include ../Makefile.in
-MAKEGLOSSARY = ./makeglossary
-
NAME = implementation
-FILES = implementation.tex intro.tex Thy/document/prelim.tex \
- Thy/document/logic.tex Thy/document/tactic.tex \
- Thy/document/proof.tex Thy/document/locale.tex \
- Thy/document/integration.tex style.sty ../iman.sty ../extra.sty \
- ../isar.sty ../isabelle.sty ../isabellesym.sty ../pdfsetup.sty \
- ../manual.bib ../proof.sty
+FILES = ../extra.sty ../iman.sty ../isabelle.sty ../isabellesym.sty \
+ ../isar.sty ../manual.bib ../pdfsetup.sty ../proof.sty \
+ Thy/document/Integration.tex Thy/document/Local_Theory.tex \
+ Thy/document/Logic.tex Thy/document/Prelim.tex \
+ Thy/document/Proof.tex Thy/document/Syntax.tex \
+ Thy/document/Tactic.tex implementation.tex style.sty
dvi: $(NAME).dvi
@@ -29,7 +24,6 @@
$(BIBTEX) $(NAME)
$(LATEX) $(NAME)
$(LATEX) $(NAME)
- $(MAKEGLOSSARY) $(NAME)
$(SEDINDEX) $(NAME)
$(LATEX) $(NAME)
$(LATEX) $(NAME)
@@ -41,7 +35,6 @@
$(BIBTEX) $(NAME)
$(PDFLATEX) $(NAME)
$(PDFLATEX) $(NAME)
- $(MAKEGLOSSARY) $(NAME)
$(SEDINDEX) $(NAME)
$(FIXBOOKMARKS) $(NAME).out
$(PDFLATEX) $(NAME)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Base.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,6 @@
+theory Base
+imports Pure
+uses "../../antiquote_setup.ML"
+begin
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Integration.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,425 @@
+theory Integration
+imports Base
+begin
+
+chapter {* System integration *}
+
+section {* Isar toplevel \label{sec:isar-toplevel} *}
+
+text {* The Isar toplevel may be considered the centeral hub of the
+ Isabelle/Isar system, where all key components and sub-systems are
+ integrated into a single read-eval-print loop of Isar commands. We
+ shall even incorporate the existing {\ML} toplevel of the compiler
+ and run-time system (cf.\ \secref{sec:ML-toplevel}).
+
+ Isabelle/Isar departs from the original ``LCF system architecture''
+ where {\ML} was really The Meta Language for defining theories and
+ conducting proofs. Instead, {\ML} now only serves as the
+ implementation language for the system (and user extensions), while
+ the specific Isar toplevel supports the concepts of theory and proof
+ development natively. This includes the graph structure of theories
+ and the block structure of proofs, support for unlimited undo,
+ facilities for tracing, debugging, timing, profiling etc.
+
+ \medskip The toplevel maintains an implicit state, which is
+ transformed by a sequence of transitions -- either interactively or
+ in batch-mode. In interactive mode, Isar state transitions are
+ encapsulated as safe transactions, such that both failure and undo
+ are handled conveniently without destroying the underlying draft
+ theory (cf.~\secref{sec:context-theory}). In batch mode,
+ transitions operate in a linear (destructive) fashion, such that
+ error conditions abort the present attempt to construct a theory or
+ proof altogether.
+
+ The toplevel state is a disjoint sum of empty @{text toplevel}, or
+ @{text theory}, or @{text proof}. On entering the main Isar loop we
+ start with an empty toplevel. A theory is commenced by giving a
+ @{text \<THEORY>} header; within a theory we may issue theory
+ commands such as @{text \<DEFINITION>}, or state a @{text
+ \<THEOREM>} to be proven. Now we are within a proof state, with a
+ rich collection of Isar proof commands for structured proof
+ composition, or unstructured proof scripts. When the proof is
+ concluded we get back to the theory, which is then updated by
+ storing the resulting fact. Further theory declarations or theorem
+ statements with proofs may follow, until we eventually conclude the
+ theory development by issuing @{text \<END>}. The resulting theory
+ is then stored within the theory database and we are back to the
+ empty toplevel.
+
+ In addition to these proper state transformations, there are also
+ some diagnostic commands for peeking at the toplevel state without
+ modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term},
+ \isakeyword{print-cases}).
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type Toplevel.state} \\
+ @{index_ML Toplevel.UNDEF: "exn"} \\
+ @{index_ML Toplevel.is_toplevel: "Toplevel.state -> bool"} \\
+ @{index_ML Toplevel.theory_of: "Toplevel.state -> theory"} \\
+ @{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
+ @{index_ML Toplevel.debug: "bool ref"} \\
+ @{index_ML Toplevel.timing: "bool ref"} \\
+ @{index_ML Toplevel.profiling: "int ref"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type Toplevel.state} represents Isar toplevel states,
+ which are normally manipulated through the concept of toplevel
+ transitions only (\secref{sec:toplevel-transition}). Also note that
+ a raw toplevel state is subject to the same linearity restrictions
+ as a theory context (cf.~\secref{sec:context-theory}).
+
+ \item @{ML Toplevel.UNDEF} is raised for undefined toplevel
+ operations. Many operations work only partially for certain cases,
+ since @{ML_type Toplevel.state} is a sum type.
+
+ \item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
+ toplevel state.
+
+ \item @{ML Toplevel.theory_of}~@{text "state"} selects the theory of
+ a theory or proof (!), otherwise raises @{ML Toplevel.UNDEF}.
+
+ \item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
+ state if available, otherwise raises @{ML Toplevel.UNDEF}.
+
+ \item @{ML "set Toplevel.debug"} makes the toplevel print further
+ details about internal error conditions, exceptions being raised
+ etc.
+
+ \item @{ML "set Toplevel.timing"} makes the toplevel print timing
+ information for each Isar command being executed.
+
+ \item @{ML Toplevel.profiling}~@{verbatim ":="}~@{text "n"} controls
+ low-level profiling of the underlying {\ML} runtime system. For
+ Poly/ML, @{text "n = 1"} means time and @{text "n = 2"} space
+ profiling.
+
+ \end{description}
+*}
+
+
+subsection {* Toplevel transitions \label{sec:toplevel-transition} *}
+
+text {*
+ An Isar toplevel transition consists of a partial function on the
+ toplevel state, with additional information for diagnostics and
+ error reporting: there are fields for command name, source position,
+ optional source text, as well as flags for interactive-only commands
+ (which issue a warning in batch-mode), printing of result state,
+ etc.
+
+ The operational part is represented as the sequential union of a
+ list of partial functions, which are tried in turn until the first
+ one succeeds. This acts like an outer case-expression for various
+ alternative state transitions. For example, \isakeyword{qed} acts
+ differently for a local proofs vs.\ the global ending of the main
+ proof.
+
+ Toplevel transitions are composed via transition transformers.
+ Internally, Isar commands are put together from an empty transition
+ extended by name and source position (and optional source text). It
+ is then left to the individual command parser to turn the given
+ concrete syntax into a suitable transition transformer that adjoins
+ actual operations on a theory or proof state etc.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML Toplevel.print: "Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.no_timing: "Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.keep: "(Toplevel.state -> unit) ->
+ Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.theory: "(theory -> theory) ->
+ Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.theory_to_proof: "(theory -> Proof.state) ->
+ Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.proof: "(Proof.state -> Proof.state) ->
+ Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.proofs: "(Proof.state -> Proof.state Seq.seq) ->
+ Toplevel.transition -> Toplevel.transition"} \\
+ @{index_ML Toplevel.end_proof: "(bool -> Proof.state -> Proof.context) ->
+ Toplevel.transition -> Toplevel.transition"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML Toplevel.print}~@{text "tr"} sets the print flag, which
+ causes the toplevel loop to echo the result state (in interactive
+ mode).
+
+ \item @{ML Toplevel.no_timing}~@{text "tr"} indicates that the
+ transition should never show timing information, e.g.\ because it is
+ a diagnostic command.
+
+ \item @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
+ function.
+
+ \item @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
+ transformer.
+
+ \item @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
+ goal function, which turns a theory into a proof state. The theory
+ may be changed before entering the proof; the generic Isar goal
+ setup includes an argument that specifies how to apply the proven
+ result to the theory, when the proof is finished.
+
+ \item @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
+ proof command, with a singleton result.
+
+ \item @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
+ command, with zero or more result states (represented as a lazy
+ list).
+
+ \item @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
+ proof command, that returns the resulting theory, after storing the
+ resulting facts in the context etc.
+
+ \end{description}
+*}
+
+
+subsection {* Toplevel control *}
+
+text {*
+ There are a few special control commands that modify the behavior
+ the toplevel itself, and only make sense in interactive mode. Under
+ normal circumstances, the user encounters these only implicitly as
+ part of the protocol between the Isabelle/Isar system and a
+ user-interface such as ProofGeneral.
+
+ \begin{description}
+
+ \item \isacommand{undo} follows the three-level hierarchy of empty
+ toplevel vs.\ theory vs.\ proof: undo within a proof reverts to the
+ previous proof context, undo after a proof reverts to the theory
+ before the initial goal statement, undo of a theory command reverts
+ to the previous theory value, undo of a theory header discontinues
+ the current theory development and removes it from the theory
+ database (\secref{sec:theory-database}).
+
+ \item \isacommand{kill} aborts the current level of development:
+ kill in a proof context reverts to the theory before the initial
+ goal statement, kill in a theory context aborts the current theory
+ development, removing it from the database.
+
+ \item \isacommand{exit} drops out of the Isar toplevel into the
+ underlying {\ML} toplevel (\secref{sec:ML-toplevel}). The Isar
+ toplevel state is preserved and may be continued later.
+
+ \item \isacommand{quit} terminates the Isabelle/Isar process without
+ saving.
+
+ \end{description}
+*}
+
+
+section {* ML toplevel \label{sec:ML-toplevel} *}
+
+text {*
+ The {\ML} toplevel provides a read-compile-eval-print loop for {\ML}
+ values, types, structures, and functors. {\ML} declarations operate
+ on the global system state, which consists of the compiler
+ environment plus the values of {\ML} reference variables. There is
+ no clean way to undo {\ML} declarations, except for reverting to a
+ previously saved state of the whole Isabelle process. {\ML} input
+ is either read interactively from a TTY, or from a string (usually
+ within a theory text), or from a source file (usually loaded from a
+ theory).
+
+ Whenever the {\ML} toplevel is active, the current Isabelle theory
+ context is passed as an internal reference variable. Thus {\ML}
+ code may access the theory context during compilation, it may even
+ change the value of a theory being under construction --- while
+ observing the usual linearity restrictions
+ (cf.~\secref{sec:context-theory}).
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML the_context: "unit -> theory"} \\
+ @{index_ML "Context.>> ": "(Context.generic -> Context.generic) -> unit"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML "the_context ()"} refers to the theory context of the
+ {\ML} toplevel --- at compile time! {\ML} code needs to take care
+ to refer to @{ML "the_context ()"} correctly. Recall that
+ evaluation of a function body is delayed until actual runtime.
+ Moreover, persistent {\ML} toplevel bindings to an unfinished theory
+ should be avoided: code should either project out the desired
+ information immediately, or produce an explicit @{ML_type
+ theory_ref} (cf.\ \secref{sec:context-theory}).
+
+ \item @{ML "Context.>>"}~@{text f} applies context transformation
+ @{text f} to the implicit context of the {\ML} toplevel.
+
+ \end{description}
+
+ It is very important to note that the above functions are really
+ restricted to the compile time, even though the {\ML} compiler is
+ invoked at runtime! The majority of {\ML} code uses explicit
+ functional arguments of a theory or proof context instead. Thus it
+ may be invoked for an arbitrary context later on, without having to
+ worry about any operational details.
+
+ \bigskip
+
+ \begin{mldecls}
+ @{index_ML Isar.main: "unit -> unit"} \\
+ @{index_ML Isar.loop: "unit -> unit"} \\
+ @{index_ML Isar.state: "unit -> Toplevel.state"} \\
+ @{index_ML Isar.exn: "unit -> (exn * string) option"} \\
+ @{index_ML Isar.context: "unit -> Proof.context"} \\
+ @{index_ML Isar.goal: "unit -> thm"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML "Isar.main ()"} invokes the Isar toplevel from {\ML},
+ initializing an empty toplevel state.
+
+ \item @{ML "Isar.loop ()"} continues the Isar toplevel with the
+ current state, after having dropped out of the Isar toplevel loop.
+
+ \item @{ML "Isar.state ()"} and @{ML "Isar.exn ()"} get current
+ toplevel state and error condition, respectively. This only works
+ after having dropped out of the Isar toplevel loop.
+
+ \item @{ML "Isar.context ()"} produces the proof context from @{ML
+ "Isar.state ()"}, analogous to @{ML Context.proof_of}
+ (\secref{sec:generic-context}).
+
+ \item @{ML "Isar.goal ()"} picks the tactical goal from @{ML
+ "Isar.state ()"}, represented as a theorem according to
+ \secref{sec:tactical-goals}.
+
+ \end{description}
+*}
+
+
+section {* Theory database \label{sec:theory-database} *}
+
+text {*
+ The theory database maintains a collection of theories, together
+ with some administrative information about their original sources,
+ which are held in an external store (i.e.\ some directory within the
+ regular file system).
+
+ The theory database is organized as a directed acyclic graph;
+ entries are referenced by theory name. Although some additional
+ interfaces allow to include a directory specification as well, this
+ is only a hint to the underlying theory loader. The internal theory
+ name space is flat!
+
+ Theory @{text A} is associated with the main theory file @{text
+ A}\verb,.thy,, which needs to be accessible through the theory
+ loader path. Any number of additional {\ML} source files may be
+ associated with each theory, by declaring these dependencies in the
+ theory header as @{text \<USES>}, and loading them consecutively
+ within the theory context. The system keeps track of incoming {\ML}
+ sources and associates them with the current theory. The file
+ @{text A}\verb,.ML, is loaded after a theory has been concluded, in
+ order to support legacy proof {\ML} proof scripts.
+
+ The basic internal actions of the theory database are @{text
+ "update"}, @{text "outdate"}, and @{text "remove"}:
+
+ \begin{itemize}
+
+ \item @{text "update A"} introduces a link of @{text "A"} with a
+ @{text "theory"} value of the same name; it asserts that the theory
+ sources are now consistent with that value;
+
+ \item @{text "outdate A"} invalidates the link of a theory database
+ entry to its sources, but retains the present theory value;
+
+ \item @{text "remove A"} deletes entry @{text "A"} from the theory
+ database.
+
+ \end{itemize}
+
+ These actions are propagated to sub- or super-graphs of a theory
+ entry as expected, in order to preserve global consistency of the
+ state of all loaded theories with the sources of the external store.
+ This implies certain causalities between actions: @{text "update"}
+ or @{text "outdate"} of an entry will @{text "outdate"} all
+ descendants; @{text "remove"} will @{text "remove"} all descendants.
+
+ \medskip There are separate user-level interfaces to operate on the
+ theory database directly or indirectly. The primitive actions then
+ just happen automatically while working with the system. In
+ particular, processing a theory header @{text "\<THEORY> A
+ \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"} ensures that the
+ sub-graph of the collective imports @{text "B\<^sub>1 \<dots> B\<^sub>n"}
+ is up-to-date, too. Earlier theories are reloaded as required, with
+ @{text update} actions proceeding in topological order according to
+ theory dependencies. There may be also a wave of implied @{text
+ outdate} actions for derived theory nodes until a stable situation
+ is achieved eventually.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML theory: "string -> theory"} \\
+ @{index_ML use_thy: "string -> unit"} \\
+ @{index_ML use_thys: "string list -> unit"} \\
+ @{index_ML ThyInfo.touch_thy: "string -> unit"} \\
+ @{index_ML ThyInfo.remove_thy: "string -> unit"} \\[1ex]
+ @{index_ML ThyInfo.begin_theory}@{verbatim ": ... -> bool -> theory"} \\
+ @{index_ML ThyInfo.end_theory: "theory -> unit"} \\
+ @{index_ML ThyInfo.register_theory: "theory -> unit"} \\[1ex]
+ @{verbatim "datatype action = Update | Outdate | Remove"} \\
+ @{index_ML ThyInfo.add_hook: "(ThyInfo.action -> string -> unit) -> unit"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML theory}~@{text A} retrieves the theory value presently
+ associated with name @{text A}. Note that the result might be
+ outdated.
+
+ \item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
+ up-to-date wrt.\ the external file store, reloading outdated
+ ancestors as required.
+
+ \item @{ML use_thys} is similar to @{ML use_thy}, but handles
+ several theories simultaneously. Thus it acts like processing the
+ import header of a theory, without performing the merge of the
+ result, though.
+
+ \item @{ML ThyInfo.touch_thy}~@{text A} performs and @{text outdate} action
+ on theory @{text A} and all descendants.
+
+ \item @{ML ThyInfo.remove_thy}~@{text A} deletes theory @{text A} and all
+ descendants from the theory database.
+
+ \item @{ML ThyInfo.begin_theory} is the basic operation behind a
+ @{text \<THEORY>} header declaration. This is {\ML} functions is
+ normally not invoked directly.
+
+ \item @{ML ThyInfo.end_theory} concludes the loading of a theory
+ proper and stores the result in the theory database.
+
+ \item @{ML ThyInfo.register_theory}~@{text "text thy"} registers an
+ existing theory value with the theory loader database. There is no
+ management of associated sources.
+
+ \item @{ML "ThyInfo.add_hook"}~@{text f} registers function @{text
+ f} as a hook for theory database actions. The function will be
+ invoked with the action and theory name being involved; thus derived
+ actions may be performed in associated system components, e.g.\
+ maintaining the state of an editor for the theory sources.
+
+ The kind and order of actions occurring in practice depends both on
+ user interactions and the internal process of resolving theory
+ imports. Hooks should not rely on a particular policy here! Any
+ exceptions raised by the hook are ignored.
+
+ \end{description}
+*}
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Isar.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,37 @@
+theory Isar
+imports Base
+begin
+
+chapter {* Isar language elements *}
+
+text {*
+ The primary Isar language consists of three main categories of
+ language elements:
+
+ \begin{enumerate}
+
+ \item Proof commands
+
+ \item Proof methods
+
+ \item Attributes
+
+ \end{enumerate}
+*}
+
+
+section {* Proof commands *}
+
+text FIXME
+
+
+section {* Proof methods *}
+
+text FIXME
+
+
+section {* Attributes *}
+
+text FIXME
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Local_Theory.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,168 @@
+theory Local_Theory
+imports Base
+begin
+
+chapter {* Local theory specifications *}
+
+text {*
+ A \emph{local theory} combines aspects of both theory and proof
+ context (cf.\ \secref{sec:context}), such that definitional
+ specifications may be given relatively to parameters and
+ assumptions. A local theory is represented as a regular proof
+ context, augmented by administrative data about the \emph{target
+ context}.
+
+ The target is usually derived from the background theory by adding
+ local @{text "\<FIX>"} and @{text "\<ASSUME>"} elements, plus
+ suitable modifications of non-logical context data (e.g.\ a special
+ type-checking discipline). Once initialized, the target is ready to
+ absorb definitional primitives: @{text "\<DEFINE>"} for terms and
+ @{text "\<NOTE>"} for theorems. Such definitions may get
+ transformed in a target-specific way, but the programming interface
+ hides such details.
+
+ Isabelle/Pure provides target mechanisms for locales, type-classes,
+ type-class instantiations, and general overloading. In principle,
+ users can implement new targets as well, but this rather arcane
+ discipline is beyond the scope of this manual. In contrast,
+ implementing derived definitional packages to be used within a local
+ theory context is quite easy: the interfaces are even simpler and
+ more abstract than the underlying primitives for raw theories.
+
+ Many definitional packages for local theories are available in
+ Isabelle. Although a few old packages only work for global
+ theories, the local theory interface is already the standard way of
+ implementing definitional packages in Isabelle.
+*}
+
+
+section {* Definitional elements *}
+
+text {*
+ There are separate elements @{text "\<DEFINE> c \<equiv> t"} for terms, and
+ @{text "\<NOTE> b = thm"} for theorems. Types are treated
+ implicitly, according to Hindley-Milner discipline (cf.\
+ \secref{sec:variables}). These definitional primitives essentially
+ act like @{text "let"}-bindings within a local context that may
+ already contain earlier @{text "let"}-bindings and some initial
+ @{text "\<lambda>"}-bindings. Thus we gain \emph{dependent definitions}
+ that are relative to an initial axiomatic context. The following
+ diagram illustrates this idea of axiomatic elements versus
+ definitional elements:
+
+ \begin{center}
+ \begin{tabular}{|l|l|l|}
+ \hline
+ & @{text "\<lambda>"}-binding & @{text "let"}-binding \\
+ \hline
+ types & fixed @{text "\<alpha>"} & arbitrary @{text "\<beta>"} \\
+ terms & @{text "\<FIX> x :: \<tau>"} & @{text "\<DEFINE> c \<equiv> t"} \\
+ theorems & @{text "\<ASSUME> a: A"} & @{text "\<NOTE> b = \<^BG>B\<^EN>"} \\
+ \hline
+ \end{tabular}
+ \end{center}
+
+ A user package merely needs to produce suitable @{text "\<DEFINE>"}
+ and @{text "\<NOTE>"} elements according to the application. For
+ example, a package for inductive definitions might first @{text
+ "\<DEFINE>"} a certain predicate as some fixed-point construction,
+ then @{text "\<NOTE>"} a proven result about monotonicity of the
+ functor involved here, and then produce further derived concepts via
+ additional @{text "\<DEFINE>"} and @{text "\<NOTE>"} elements.
+
+ The cumulative sequence of @{text "\<DEFINE>"} and @{text "\<NOTE>"}
+ produced at package runtime is managed by the local theory
+ infrastructure by means of an \emph{auxiliary context}. Thus the
+ system holds up the impression of working within a fully abstract
+ situation with hypothetical entities: @{text "\<DEFINE> c \<equiv> t"}
+ always results in a literal fact @{text "\<^BG>c \<equiv> t\<^EN>"}, where
+ @{text "c"} is a fixed variable @{text "c"}. The details about
+ global constants, name spaces etc. are handled internally.
+
+ So the general structure of a local theory is a sandwich of three
+ layers:
+
+ \begin{center}
+ \framebox{\quad auxiliary context \quad\framebox{\quad target context \quad\framebox{\quad background theory\quad}}}
+ \end{center}
+
+ \noindent When a definitional package is finished, the auxiliary
+ context is reset to the target context. The target now holds
+ definitions for terms and theorems that stem from the hypothetical
+ @{text "\<DEFINE>"} and @{text "\<NOTE>"} elements, transformed by
+ the particular target policy (see
+ \cite[\S4--5]{Haftmann-Wenzel:2009} for details).
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type local_theory: Proof.context} \\
+ @{index_ML TheoryTarget.init: "string option -> theory -> local_theory"} \\[1ex]
+ @{index_ML LocalTheory.define: "string ->
+ (binding * mixfix) * (Attrib.binding * term) -> local_theory ->
+ (term * (string * thm)) * local_theory"} \\
+ @{index_ML LocalTheory.note: "string ->
+ Attrib.binding * thm list -> local_theory ->
+ (string * thm list) * local_theory"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type local_theory} represents local theories. Although
+ this is merely an alias for @{ML_type Proof.context}, it is
+ semantically a subtype of the same: a @{ML_type local_theory} holds
+ target information as special context data. Subtyping means that
+ any value @{text "lthy:"}~@{ML_type local_theory} can be also used
+ with operations on expecting a regular @{text "ctxt:"}~@{ML_type
+ Proof.context}.
+
+ \item @{ML TheoryTarget.init}~@{text "NONE thy"} initializes a
+ trivial local theory from the given background theory.
+ Alternatively, @{text "SOME name"} may be given to initialize a
+ @{command locale} or @{command class} context (a fully-qualified
+ internal name is expected here). This is useful for experimentation
+ --- normally the Isar toplevel already takes care to initialize the
+ local theory context.
+
+ \item @{ML LocalTheory.define}~@{text "kind ((b, mx), (a, rhs))
+ lthy"} defines a local entity according to the specification that is
+ given relatively to the current @{text "lthy"} context. In
+ particular the term of the RHS may refer to earlier local entities
+ from the auxiliary context, or hypothetical parameters from the
+ target context. The result is the newly defined term (which is
+ always a fixed variable with exactly the same name as specified for
+ the LHS), together with an equational theorem that states the
+ definition as a hypothetical fact.
+
+ Unless an explicit name binding is given for the RHS, the resulting
+ fact will be called @{text "b_def"}. Any given attributes are
+ applied to that same fact --- immediately in the auxiliary context
+ \emph{and} in any transformed versions stemming from target-specific
+ policies or any later interpretations of results from the target
+ context (think of @{command locale} and @{command interpretation},
+ for example). This means that attributes should be usually plain
+ declarations such as @{attribute simp}, while non-trivial rules like
+ @{attribute simplified} are better avoided.
+
+ The @{text kind} determines the theorem kind tag of the resulting
+ fact. Typical examples are @{ML Thm.definitionK}, @{ML
+ Thm.theoremK}, or @{ML Thm.internalK}.
+
+ \item @{ML LocalTheory.note}~@{text "kind (a, ths) lthy"} is
+ analogous to @{ML LocalTheory.define}, but defines facts instead of
+ terms. There is also a slightly more general variant @{ML
+ LocalTheory.notes} that defines several facts (with attribute
+ expressions) simultaneously.
+
+ This is essentially the internal version of the @{command lemmas}
+ command, or @{command declare} if an empty name binding is given.
+
+ \end{description}
+*}
+
+
+section {* Morphisms and declarations *}
+
+text FIXME
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Logic.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,909 @@
+theory Logic
+imports Base
+begin
+
+chapter {* Primitive logic \label{ch:logic} *}
+
+text {*
+ The logical foundations of Isabelle/Isar are that of the Pure logic,
+ which has been introduced as a Natural Deduction framework in
+ \cite{paulson700}. This is essentially the same logic as ``@{text
+ "\<lambda>HOL"}'' in the more abstract setting of Pure Type Systems (PTS)
+ \cite{Barendregt-Geuvers:2001}, although there are some key
+ differences in the specific treatment of simple types in
+ Isabelle/Pure.
+
+ Following type-theoretic parlance, the Pure logic consists of three
+ levels of @{text "\<lambda>"}-calculus with corresponding arrows, @{text
+ "\<Rightarrow>"} for syntactic function space (terms depending on terms), @{text
+ "\<And>"} for universal quantification (proofs depending on terms), and
+ @{text "\<Longrightarrow>"} for implication (proofs depending on proofs).
+
+ Derivations are relative to a logical theory, which declares type
+ constructors, constants, and axioms. Theory declarations support
+ schematic polymorphism, which is strictly speaking outside the
+ logic.\footnote{This is the deeper logical reason, why the theory
+ context @{text "\<Theta>"} is separate from the proof context @{text "\<Gamma>"}
+ of the core calculus.}
+*}
+
+
+section {* Types \label{sec:types} *}
+
+text {*
+ The language of types is an uninterpreted order-sorted first-order
+ algebra; types are qualified by ordered type classes.
+
+ \medskip A \emph{type class} is an abstract syntactic entity
+ declared in the theory context. The \emph{subclass relation} @{text
+ "c\<^isub>1 \<subseteq> c\<^isub>2"} is specified by stating an acyclic
+ generating relation; the transitive closure is maintained
+ internally. The resulting relation is an ordering: reflexive,
+ transitive, and antisymmetric.
+
+ A \emph{sort} is a list of type classes written as @{text "s =
+ {c\<^isub>1, \<dots>, c\<^isub>m}"}, which represents symbolic
+ intersection. Notationally, the curly braces are omitted for
+ singleton intersections, i.e.\ any class @{text "c"} may be read as
+ a sort @{text "{c}"}. The ordering on type classes is extended to
+ sorts according to the meaning of intersections: @{text
+ "{c\<^isub>1, \<dots> c\<^isub>m} \<subseteq> {d\<^isub>1, \<dots>, d\<^isub>n}"} iff
+ @{text "\<forall>j. \<exists>i. c\<^isub>i \<subseteq> d\<^isub>j"}. The empty intersection
+ @{text "{}"} refers to the universal sort, which is the largest
+ element wrt.\ the sort order. The intersections of all (finitely
+ many) classes declared in the current theory are the minimal
+ elements wrt.\ the sort order.
+
+ \medskip A \emph{fixed type variable} is a pair of a basic name
+ (starting with a @{text "'"} character) and a sort constraint, e.g.\
+ @{text "('a, s)"} which is usually printed as @{text "\<alpha>\<^isub>s"}.
+ A \emph{schematic type variable} is a pair of an indexname and a
+ sort constraint, e.g.\ @{text "(('a, 0), s)"} which is usually
+ printed as @{text "?\<alpha>\<^isub>s"}.
+
+ Note that \emph{all} syntactic components contribute to the identity
+ of type variables, including the sort constraint. The core logic
+ handles type variables with the same name but different sorts as
+ different, although some outer layers of the system make it hard to
+ produce anything like this.
+
+ A \emph{type constructor} @{text "\<kappa>"} is a @{text "k"}-ary operator
+ on types declared in the theory. Type constructor application is
+ written postfix as @{text "(\<alpha>\<^isub>1, \<dots>, \<alpha>\<^isub>k)\<kappa>"}. For
+ @{text "k = 0"} the argument tuple is omitted, e.g.\ @{text "prop"}
+ instead of @{text "()prop"}. For @{text "k = 1"} the parentheses
+ are omitted, e.g.\ @{text "\<alpha> list"} instead of @{text "(\<alpha>)list"}.
+ Further notation is provided for specific constructors, notably the
+ right-associative infix @{text "\<alpha> \<Rightarrow> \<beta>"} instead of @{text "(\<alpha>,
+ \<beta>)fun"}.
+
+ A \emph{type} is defined inductively over type variables and type
+ constructors as follows: @{text "\<tau> = \<alpha>\<^isub>s | ?\<alpha>\<^isub>s |
+ (\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"}.
+
+ A \emph{type abbreviation} is a syntactic definition @{text
+ "(\<^vec>\<alpha>)\<kappa> = \<tau>"} of an arbitrary type expression @{text "\<tau>"} over
+ variables @{text "\<^vec>\<alpha>"}. Type abbreviations appear as type
+ constructors in the syntax, but are expanded before entering the
+ logical core.
+
+ A \emph{type arity} declares the image behavior of a type
+ constructor wrt.\ the algebra of sorts: @{text "\<kappa> :: (s\<^isub>1, \<dots>,
+ s\<^isub>k)s"} means that @{text "(\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>k)\<kappa>"} is
+ of sort @{text "s"} if every argument type @{text "\<tau>\<^isub>i"} is
+ of sort @{text "s\<^isub>i"}. Arity declarations are implicitly
+ completed, i.e.\ @{text "\<kappa> :: (\<^vec>s)c"} entails @{text "\<kappa> ::
+ (\<^vec>s)c'"} for any @{text "c' \<supseteq> c"}.
+
+ \medskip The sort algebra is always maintained as \emph{coregular},
+ which means that type arities are consistent with the subclass
+ relation: for any type constructor @{text "\<kappa>"}, and classes @{text
+ "c\<^isub>1 \<subseteq> c\<^isub>2"}, and arities @{text "\<kappa> ::
+ (\<^vec>s\<^isub>1)c\<^isub>1"} and @{text "\<kappa> ::
+ (\<^vec>s\<^isub>2)c\<^isub>2"} holds @{text "\<^vec>s\<^isub>1 \<subseteq>
+ \<^vec>s\<^isub>2"} component-wise.
+
+ The key property of a coregular order-sorted algebra is that sort
+ constraints can be solved in a most general fashion: for each type
+ constructor @{text "\<kappa>"} and sort @{text "s"} there is a most general
+ vector of argument sorts @{text "(s\<^isub>1, \<dots>, s\<^isub>k)"} such
+ that a type scheme @{text "(\<alpha>\<^bsub>s\<^isub>1\<^esub>, \<dots>,
+ \<alpha>\<^bsub>s\<^isub>k\<^esub>)\<kappa>"} is of sort @{text "s"}.
+ Consequently, type unification has most general solutions (modulo
+ equivalence of sorts), so type-inference produces primary types as
+ expected \cite{nipkow-prehofer}.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type class} \\
+ @{index_ML_type sort} \\
+ @{index_ML_type arity} \\
+ @{index_ML_type typ} \\
+ @{index_ML map_atyps: "(typ -> typ) -> typ -> typ"} \\
+ @{index_ML fold_atyps: "(typ -> 'a -> 'a) -> typ -> 'a -> 'a"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML Sign.subsort: "theory -> sort * sort -> bool"} \\
+ @{index_ML Sign.of_sort: "theory -> typ * sort -> bool"} \\
+ @{index_ML Sign.add_types: "(string * int * mixfix) list -> theory -> theory"} \\
+ @{index_ML Sign.add_tyabbrs_i: "
+ (string * string list * typ * mixfix) list -> theory -> theory"} \\
+ @{index_ML Sign.primitive_class: "string * class list -> theory -> theory"} \\
+ @{index_ML Sign.primitive_classrel: "class * class -> theory -> theory"} \\
+ @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type class} represents type classes; this is an alias for
+ @{ML_type string}.
+
+ \item @{ML_type sort} represents sorts; this is an alias for
+ @{ML_type "class list"}.
+
+ \item @{ML_type arity} represents type arities; this is an alias for
+ triples of the form @{text "(\<kappa>, \<^vec>s, s)"} for @{text "\<kappa> ::
+ (\<^vec>s)s"} described above.
+
+ \item @{ML_type typ} represents types; this is a datatype with
+ constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
+
+ \item @{ML map_atyps}~@{text "f \<tau>"} applies the mapping @{text "f"}
+ to all atomic types (@{ML TFree}, @{ML TVar}) occurring in @{text
+ "\<tau>"}.
+
+ \item @{ML fold_atyps}~@{text "f \<tau>"} iterates the operation @{text
+ "f"} over all occurrences of atomic types (@{ML TFree}, @{ML TVar})
+ in @{text "\<tau>"}; the type structure is traversed from left to right.
+
+ \item @{ML Sign.subsort}~@{text "thy (s\<^isub>1, s\<^isub>2)"}
+ tests the subsort relation @{text "s\<^isub>1 \<subseteq> s\<^isub>2"}.
+
+ \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
+ @{text "\<tau>"} is of sort @{text "s"}.
+
+ \item @{ML Sign.add_types}~@{text "[(\<kappa>, k, mx), \<dots>]"} declares a new
+ type constructors @{text "\<kappa>"} with @{text "k"} arguments and
+ optional mixfix syntax.
+
+ \item @{ML Sign.add_tyabbrs_i}~@{text "[(\<kappa>, \<^vec>\<alpha>, \<tau>, mx), \<dots>]"}
+ defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"} with
+ optional mixfix syntax.
+
+ \item @{ML Sign.primitive_class}~@{text "(c, [c\<^isub>1, \<dots>,
+ c\<^isub>n])"} declares a new class @{text "c"}, together with class
+ relations @{text "c \<subseteq> c\<^isub>i"}, for @{text "i = 1, \<dots>, n"}.
+
+ \item @{ML Sign.primitive_classrel}~@{text "(c\<^isub>1,
+ c\<^isub>2)"} declares the class relation @{text "c\<^isub>1 \<subseteq>
+ c\<^isub>2"}.
+
+ \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
+ the arity @{text "\<kappa> :: (\<^vec>s)s"}.
+
+ \end{description}
+*}
+
+
+section {* Terms \label{sec:terms} *}
+
+text {*
+ The language of terms is that of simply-typed @{text "\<lambda>"}-calculus
+ with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
+ or \cite{paulson-ml2}), with the types being determined by the
+ corresponding binders. In contrast, free variables and constants
+ are have an explicit name and type in each occurrence.
+
+ \medskip A \emph{bound variable} is a natural number @{text "b"},
+ which accounts for the number of intermediate binders between the
+ variable occurrence in the body and its binding position. For
+ example, the de-Bruijn term @{text
+ "\<lambda>\<^bsub>nat\<^esub>. \<lambda>\<^bsub>nat\<^esub>. 1 + 0"} would
+ correspond to @{text
+ "\<lambda>x\<^bsub>nat\<^esub>. \<lambda>y\<^bsub>nat\<^esub>. x + y"} in a named
+ representation. Note that a bound variable may be represented by
+ different de-Bruijn indices at different occurrences, depending on
+ the nesting of abstractions.
+
+ A \emph{loose variable} is a bound variable that is outside the
+ scope of local binders. The types (and names) for loose variables
+ can be managed as a separate context, that is maintained as a stack
+ of hypothetical binders. The core logic operates on closed terms,
+ without any loose variables.
+
+ A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
+ @{text "(x, \<tau>)"} which is usually printed @{text "x\<^isub>\<tau>"}. A
+ \emph{schematic variable} is a pair of an indexname and a type,
+ e.g.\ @{text "((x, 0), \<tau>)"} which is usually printed as @{text
+ "?x\<^isub>\<tau>"}.
+
+ \medskip A \emph{constant} is a pair of a basic name and a type,
+ e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text
+ "c\<^isub>\<tau>"}. Constants are declared in the context as polymorphic
+ families @{text "c :: \<sigma>"}, meaning that all substitution instances
+ @{text "c\<^isub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
+
+ The vector of \emph{type arguments} of constant @{text "c\<^isub>\<tau>"}
+ wrt.\ the declaration @{text "c :: \<sigma>"} is defined as the codomain of
+ the matcher @{text "\<vartheta> = {?\<alpha>\<^isub>1 \<mapsto> \<tau>\<^isub>1, \<dots>,
+ ?\<alpha>\<^isub>n \<mapsto> \<tau>\<^isub>n}"} presented in canonical order @{text
+ "(\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>n)"}. Within a given theory context,
+ there is a one-to-one correspondence between any constant @{text
+ "c\<^isub>\<tau>"} and the application @{text "c(\<tau>\<^isub>1, \<dots>,
+ \<tau>\<^isub>n)"} of its type arguments. For example, with @{text "plus
+ :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow>
+ nat\<^esub>"} corresponds to @{text "plus(nat)"}.
+
+ Constant declarations @{text "c :: \<sigma>"} may contain sort constraints
+ for type variables in @{text "\<sigma>"}. These are observed by
+ type-inference as expected, but \emph{ignored} by the core logic.
+ This means the primitive logic is able to reason with instances of
+ polymorphic constants that the user-level type-checker would reject
+ due to violation of type class restrictions.
+
+ \medskip An \emph{atomic} term is either a variable or constant. A
+ \emph{term} is defined inductively over atomic terms, with
+ abstraction and application as follows: @{text "t = b | x\<^isub>\<tau> |
+ ?x\<^isub>\<tau> | c\<^isub>\<tau> | \<lambda>\<^isub>\<tau>. t | t\<^isub>1 t\<^isub>2"}.
+ Parsing and printing takes care of converting between an external
+ representation with named bound variables. Subsequently, we shall
+ use the latter notation instead of internal de-Bruijn
+ representation.
+
+ The inductive relation @{text "t :: \<tau>"} assigns a (unique) type to a
+ term according to the structure of atomic terms, abstractions, and
+ applicatins:
+ \[
+ \infer{@{text "a\<^isub>\<tau> :: \<tau>"}}{}
+ \qquad
+ \infer{@{text "(\<lambda>x\<^sub>\<tau>. t) :: \<tau> \<Rightarrow> \<sigma>"}}{@{text "t :: \<sigma>"}}
+ \qquad
+ \infer{@{text "t u :: \<sigma>"}}{@{text "t :: \<tau> \<Rightarrow> \<sigma>"} & @{text "u :: \<tau>"}}
+ \]
+ A \emph{well-typed term} is a term that can be typed according to these rules.
+
+ Typing information can be omitted: type-inference is able to
+ reconstruct the most general type of a raw term, while assigning
+ most general types to all of its variables and constants.
+ Type-inference depends on a context of type constraints for fixed
+ variables, and declarations for polymorphic constants.
+
+ The identity of atomic terms consists both of the name and the type
+ component. This means that different variables @{text
+ "x\<^bsub>\<tau>\<^isub>1\<^esub>"} and @{text
+ "x\<^bsub>\<tau>\<^isub>2\<^esub>"} may become the same after type
+ instantiation. Some outer layers of the system make it hard to
+ produce variables of the same name, but different types. In
+ contrast, mixed instances of polymorphic constants occur frequently.
+
+ \medskip The \emph{hidden polymorphism} of a term @{text "t :: \<sigma>"}
+ is the set of type variables occurring in @{text "t"}, but not in
+ @{text "\<sigma>"}. This means that the term implicitly depends on type
+ arguments that are not accounted in the result type, i.e.\ there are
+ different type instances @{text "t\<vartheta> :: \<sigma>"} and @{text
+ "t\<vartheta>' :: \<sigma>"} with the same type. This slightly
+ pathological situation notoriously demands additional care.
+
+ \medskip A \emph{term abbreviation} is a syntactic definition @{text
+ "c\<^isub>\<sigma> \<equiv> t"} of a closed term @{text "t"} of type @{text "\<sigma>"},
+ without any hidden polymorphism. A term abbreviation looks like a
+ constant in the syntax, but is expanded before entering the logical
+ core. Abbreviations are usually reverted when printing terms, using
+ @{text "t \<rightarrow> c\<^isub>\<sigma>"} as rules for higher-order rewriting.
+
+ \medskip Canonical operations on @{text "\<lambda>"}-terms include @{text
+ "\<alpha>\<beta>\<eta>"}-conversion: @{text "\<alpha>"}-conversion refers to capture-free
+ renaming of bound variables; @{text "\<beta>"}-conversion contracts an
+ abstraction applied to an argument term, substituting the argument
+ in the body: @{text "(\<lambda>x. b)a"} becomes @{text "b[a/x]"}; @{text
+ "\<eta>"}-conversion contracts vacuous application-abstraction: @{text
+ "\<lambda>x. f x"} becomes @{text "f"}, provided that the bound variable
+ does not occur in @{text "f"}.
+
+ Terms are normally treated modulo @{text "\<alpha>"}-conversion, which is
+ implicit in the de-Bruijn representation. Names for bound variables
+ in abstractions are maintained separately as (meaningless) comments,
+ mostly for parsing and printing. Full @{text "\<alpha>\<beta>\<eta>"}-conversion is
+ commonplace in various standard operations (\secref{sec:obj-rules})
+ that are based on higher-order unification and matching.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type term} \\
+ @{index_ML "op aconv": "term * term -> bool"} \\
+ @{index_ML map_types: "(typ -> typ) -> term -> term"} \\
+ @{index_ML fold_types: "(typ -> 'a -> 'a) -> term -> 'a -> 'a"} \\
+ @{index_ML map_aterms: "(term -> term) -> term -> term"} \\
+ @{index_ML fold_aterms: "(term -> 'a -> 'a) -> term -> 'a -> 'a"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML fastype_of: "term -> typ"} \\
+ @{index_ML lambda: "term -> term -> term"} \\
+ @{index_ML betapply: "term * term -> term"} \\
+ @{index_ML Sign.declare_const: "Properties.T -> (binding * typ) * mixfix ->
+ theory -> term * theory"} \\
+ @{index_ML Sign.add_abbrev: "string -> Properties.T -> binding * term ->
+ theory -> (term * term) * theory"} \\
+ @{index_ML Sign.const_typargs: "theory -> string * typ -> typ list"} \\
+ @{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type term} represents de-Bruijn terms, with comments in
+ abstractions, and explicitly named free variables and constants;
+ this is a datatype with constructors @{ML Bound}, @{ML Free}, @{ML
+ Var}, @{ML Const}, @{ML Abs}, @{ML "op $"}.
+
+ \item @{text "t"}~@{ML aconv}~@{text "u"} checks @{text
+ "\<alpha>"}-equivalence of two terms. This is the basic equality relation
+ on type @{ML_type term}; raw datatype equality should only be used
+ for operations related to parsing or printing!
+
+ \item @{ML map_types}~@{text "f t"} applies the mapping @{text
+ "f"} to all types occurring in @{text "t"}.
+
+ \item @{ML fold_types}~@{text "f t"} iterates the operation @{text
+ "f"} over all occurrences of types in @{text "t"}; the term
+ structure is traversed from left to right.
+
+ \item @{ML map_aterms}~@{text "f t"} applies the mapping @{text "f"}
+ to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
+ Const}) occurring in @{text "t"}.
+
+ \item @{ML fold_aterms}~@{text "f t"} iterates the operation @{text
+ "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML Free},
+ @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
+ traversed from left to right.
+
+ \item @{ML fastype_of}~@{text "t"} determines the type of a
+ well-typed term. This operation is relatively slow, despite the
+ omission of any sanity checks.
+
+ \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
+ "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
+ body @{text "b"} are replaced by bound variables.
+
+ \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
+ "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
+ abstraction.
+
+ \item @{ML Sign.declare_const}~@{text "properties ((c, \<sigma>), mx)"}
+ declares a new constant @{text "c :: \<sigma>"} with optional mixfix
+ syntax.
+
+ \item @{ML Sign.add_abbrev}~@{text "print_mode properties (c, t)"}
+ introduces a new term abbreviation @{text "c \<equiv> t"}.
+
+ \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
+ Sign.const_instance}~@{text "thy (c, [\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>n])"}
+ convert between two representations of polymorphic constants: full
+ type instance vs.\ compact type arguments form.
+
+ \end{description}
+*}
+
+
+section {* Theorems \label{sec:thms} *}
+
+text {*
+ A \emph{proposition} is a well-typed term of type @{text "prop"}, a
+ \emph{theorem} is a proven proposition (depending on a context of
+ hypotheses and the background theory). Primitive inferences include
+ plain Natural Deduction rules for the primary connectives @{text
+ "\<And>"} and @{text "\<Longrightarrow>"} of the framework. There is also a builtin
+ notion of equality/equivalence @{text "\<equiv>"}.
+*}
+
+
+subsection {* Primitive connectives and rules \label{sec:prim-rules} *}
+
+text {*
+ The theory @{text "Pure"} contains constant declarations for the
+ primitive connectives @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"} of
+ the logical framework, see \figref{fig:pure-connectives}. The
+ derivability judgment @{text "A\<^isub>1, \<dots>, A\<^isub>n \<turnstile> B"} is
+ defined inductively by the primitive inferences given in
+ \figref{fig:prim-rules}, with the global restriction that the
+ hypotheses must \emph{not} contain any schematic variables. The
+ builtin equality is conceptually axiomatized as shown in
+ \figref{fig:pure-equality}, although the implementation works
+ directly with derived inferences.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{ll}
+ @{text "all :: (\<alpha> \<Rightarrow> prop) \<Rightarrow> prop"} & universal quantification (binder @{text "\<And>"}) \\
+ @{text "\<Longrightarrow> :: prop \<Rightarrow> prop \<Rightarrow> prop"} & implication (right associative infix) \\
+ @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} & equality relation (infix) \\
+ \end{tabular}
+ \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
+ \end{center}
+ \end{figure}
+
+ \begin{figure}[htb]
+ \begin{center}
+ \[
+ \infer[@{text "(axiom)"}]{@{text "\<turnstile> A"}}{@{text "A \<in> \<Theta>"}}
+ \qquad
+ \infer[@{text "(assume)"}]{@{text "A \<turnstile> A"}}{}
+ \]
+ \[
+ \infer[@{text "(\<And>_intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}{@{text "\<Gamma> \<turnstile> b[x]"} & @{text "x \<notin> \<Gamma>"}}
+ \qquad
+ \infer[@{text "(\<And>_elim)"}]{@{text "\<Gamma> \<turnstile> b[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}
+ \]
+ \[
+ \infer[@{text "(\<Longrightarrow>_intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \qquad
+ \infer[@{text "(\<Longrightarrow>_elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+ \]
+ \caption{Primitive inferences of Pure}\label{fig:prim-rules}
+ \end{center}
+ \end{figure}
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{ll}
+ @{text "\<turnstile> (\<lambda>x. b[x]) a \<equiv> b[a]"} & @{text "\<beta>"}-conversion \\
+ @{text "\<turnstile> x \<equiv> x"} & reflexivity \\
+ @{text "\<turnstile> x \<equiv> y \<Longrightarrow> P x \<Longrightarrow> P y"} & substitution \\
+ @{text "\<turnstile> (\<And>x. f x \<equiv> g x) \<Longrightarrow> f \<equiv> g"} & extensionality \\
+ @{text "\<turnstile> (A \<Longrightarrow> B) \<Longrightarrow> (B \<Longrightarrow> A) \<Longrightarrow> A \<equiv> B"} & logical equivalence \\
+ \end{tabular}
+ \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
+ \end{center}
+ \end{figure}
+
+ The introduction and elimination rules for @{text "\<And>"} and @{text
+ "\<Longrightarrow>"} are analogous to formation of dependently typed @{text
+ "\<lambda>"}-terms representing the underlying proof objects. Proof terms
+ are irrelevant in the Pure logic, though; they cannot occur within
+ propositions. The system provides a runtime option to record
+ explicit proof terms for primitive inferences. Thus all three
+ levels of @{text "\<lambda>"}-calculus become explicit: @{text "\<Rightarrow>"} for
+ terms, and @{text "\<And>/\<Longrightarrow>"} for proofs (cf.\
+ \cite{Berghofer-Nipkow:2000:TPHOL}).
+
+ Observe that locally fixed parameters (as in @{text "\<And>_intro"}) need
+ not be recorded in the hypotheses, because the simple syntactic
+ types of Pure are always inhabitable. ``Assumptions'' @{text "x ::
+ \<tau>"} for type-membership are only present as long as some @{text
+ "x\<^isub>\<tau>"} occurs in the statement body.\footnote{This is the key
+ difference to ``@{text "\<lambda>HOL"}'' in the PTS framework
+ \cite{Barendregt-Geuvers:2001}, where hypotheses @{text "x : A"} are
+ treated uniformly for propositions and types.}
+
+ \medskip The axiomatization of a theory is implicitly closed by
+ forming all instances of type and term variables: @{text "\<turnstile>
+ A\<vartheta>"} holds for any substitution instance of an axiom
+ @{text "\<turnstile> A"}. By pushing substitutions through derivations
+ inductively, we also get admissible @{text "generalize"} and @{text
+ "instance"} rules as shown in \figref{fig:subst-rules}.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \[
+ \infer{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}{@{text "\<Gamma> \<turnstile> B[\<alpha>]"} & @{text "\<alpha> \<notin> \<Gamma>"}}
+ \quad
+ \infer[\quad@{text "(generalize)"}]{@{text "\<Gamma> \<turnstile> B[?x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
+ \]
+ \[
+ \infer{@{text "\<Gamma> \<turnstile> B[\<tau>]"}}{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}
+ \quad
+ \infer[\quad@{text "(instantiate)"}]{@{text "\<Gamma> \<turnstile> B[t]"}}{@{text "\<Gamma> \<turnstile> B[?x]"}}
+ \]
+ \caption{Admissible substitution rules}\label{fig:subst-rules}
+ \end{center}
+ \end{figure}
+
+ Note that @{text "instantiate"} does not require an explicit
+ side-condition, because @{text "\<Gamma>"} may never contain schematic
+ variables.
+
+ In principle, variables could be substituted in hypotheses as well,
+ but this would disrupt the monotonicity of reasoning: deriving
+ @{text "\<Gamma>\<vartheta> \<turnstile> B\<vartheta>"} from @{text "\<Gamma> \<turnstile> B"} is
+ correct, but @{text "\<Gamma>\<vartheta> \<supseteq> \<Gamma>"} does not necessarily hold:
+ the result belongs to a different proof context.
+
+ \medskip An \emph{oracle} is a function that produces axioms on the
+ fly. Logically, this is an instance of the @{text "axiom"} rule
+ (\figref{fig:prim-rules}), but there is an operational difference.
+ The system always records oracle invocations within derivations of
+ theorems by a unique tag.
+
+ Axiomatizations should be limited to the bare minimum, typically as
+ part of the initial logical basis of an object-logic formalization.
+ Later on, theories are usually developed in a strictly definitional
+ fashion, by stating only certain equalities over new constants.
+
+ A \emph{simple definition} consists of a constant declaration @{text
+ "c :: \<sigma>"} together with an axiom @{text "\<turnstile> c \<equiv> t"}, where @{text "t
+ :: \<sigma>"} is a closed term without any hidden polymorphism. The RHS
+ may depend on further defined constants, but not @{text "c"} itself.
+ Definitions of functions may be presented as @{text "c \<^vec>x \<equiv>
+ t"} instead of the puristic @{text "c \<equiv> \<lambda>\<^vec>x. t"}.
+
+ An \emph{overloaded definition} consists of a collection of axioms
+ for the same constant, with zero or one equations @{text
+ "c((\<^vec>\<alpha>)\<kappa>) \<equiv> t"} for each type constructor @{text "\<kappa>"} (for
+ distinct variables @{text "\<^vec>\<alpha>"}). The RHS may mention
+ previously defined constants as above, or arbitrary constants @{text
+ "d(\<alpha>\<^isub>i)"} for some @{text "\<alpha>\<^isub>i"} projected from @{text
+ "\<^vec>\<alpha>"}. Thus overloaded definitions essentially work by
+ primitive recursion over the syntactic structure of a single type
+ argument.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type ctyp} \\
+ @{index_ML_type cterm} \\
+ @{index_ML Thm.ctyp_of: "theory -> typ -> ctyp"} \\
+ @{index_ML Thm.cterm_of: "theory -> term -> cterm"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML_type thm} \\
+ @{index_ML proofs: "int ref"} \\
+ @{index_ML Thm.assume: "cterm -> thm"} \\
+ @{index_ML Thm.forall_intr: "cterm -> thm -> thm"} \\
+ @{index_ML Thm.forall_elim: "cterm -> thm -> thm"} \\
+ @{index_ML Thm.implies_intr: "cterm -> thm -> thm"} \\
+ @{index_ML Thm.implies_elim: "thm -> thm -> thm"} \\
+ @{index_ML Thm.generalize: "string list * string list -> int -> thm -> thm"} \\
+ @{index_ML Thm.instantiate: "(ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm"} \\
+ @{index_ML Thm.axiom: "theory -> string -> thm"} \\
+ @{index_ML Thm.add_oracle: "bstring * ('a -> cterm) -> theory
+ -> (string * ('a -> thm)) * theory"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML Theory.add_axioms_i: "(binding * term) list -> theory -> theory"} \\
+ @{index_ML Theory.add_deps: "string -> string * typ -> (string * typ) list -> theory -> theory"} \\
+ @{index_ML Theory.add_defs_i: "bool -> bool -> (binding * term) list -> theory -> theory"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type ctyp} and @{ML_type cterm} represent certified types
+ and terms, respectively. These are abstract datatypes that
+ guarantee that its values have passed the full well-formedness (and
+ well-typedness) checks, relative to the declarations of type
+ constructors, constants etc. in the theory.
+
+ \item @{ML Thm.ctyp_of}~@{text "thy \<tau>"} and @{ML
+ Thm.cterm_of}~@{text "thy t"} explicitly checks types and terms,
+ respectively. This also involves some basic normalizations, such
+ expansion of type and term abbreviations from the theory context.
+
+ Re-certification is relatively slow and should be avoided in tight
+ reasoning loops. There are separate operations to decompose
+ certified entities (including actual theorems).
+
+ \item @{ML_type thm} represents proven propositions. This is an
+ abstract datatype that guarantees that its values have been
+ constructed by basic principles of the @{ML_struct Thm} module.
+ Every @{ML thm} value contains a sliding back-reference to the
+ enclosing theory, cf.\ \secref{sec:context-theory}.
+
+ \item @{ML proofs} determines the detail of proof recording within
+ @{ML_type thm} values: @{ML 0} records only the names of oracles,
+ @{ML 1} records oracle names and propositions, @{ML 2} additionally
+ records full proof terms. Officially named theorems that contribute
+ to a result are always recorded.
+
+ \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
+ Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
+ correspond to the primitive inferences of \figref{fig:prim-rules}.
+
+ \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
+ corresponds to the @{text "generalize"} rules of
+ \figref{fig:subst-rules}. Here collections of type and term
+ variables are generalized simultaneously, specified by the given
+ basic names.
+
+ \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^isub>s,
+ \<^vec>x\<^isub>\<tau>)"} corresponds to the @{text "instantiate"} rules
+ of \figref{fig:subst-rules}. Type variables are substituted before
+ term variables. Note that the types in @{text "\<^vec>x\<^isub>\<tau>"}
+ refer to the instantiated versions.
+
+ \item @{ML Thm.axiom}~@{text "thy name"} retrieves a named
+ axiom, cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
+
+ \item @{ML Thm.add_oracle}~@{text "(name, oracle)"} produces a named
+ oracle rule, essentially generating arbitrary axioms on the fly,
+ cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
+
+ \item @{ML Theory.add_axioms_i}~@{text "[(name, A), \<dots>]"} declares
+ arbitrary propositions as axioms.
+
+ \item @{ML Theory.add_deps}~@{text "name c\<^isub>\<tau>
+ \<^vec>d\<^isub>\<sigma>"} declares dependencies of a named specification
+ for constant @{text "c\<^isub>\<tau>"}, relative to existing
+ specifications for constants @{text "\<^vec>d\<^isub>\<sigma>"}.
+
+ \item @{ML Theory.add_defs_i}~@{text "unchecked overloaded [(name, c
+ \<^vec>x \<equiv> t), \<dots>]"} states a definitional axiom for an existing
+ constant @{text "c"}. Dependencies are recorded (cf.\ @{ML
+ Theory.add_deps}), unless the @{text "unchecked"} option is set.
+
+ \end{description}
+*}
+
+
+subsection {* Auxiliary definitions *}
+
+text {*
+ Theory @{text "Pure"} provides a few auxiliary definitions, see
+ \figref{fig:pure-aux}. These special constants are normally not
+ exposed to the user, but appear in internal encodings.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{ll}
+ @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&"}) \\
+ @{text "\<turnstile> A & B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
+ @{text "prop :: prop \<Rightarrow> prop"} & (prefix @{text "#"}, suppressed) \\
+ @{text "#A \<equiv> A"} \\[1ex]
+ @{text "term :: \<alpha> \<Rightarrow> prop"} & (prefix @{text "TERM"}) \\
+ @{text "term x \<equiv> (\<And>A. A \<Longrightarrow> A)"} \\[1ex]
+ @{text "TYPE :: \<alpha> itself"} & (prefix @{text "TYPE"}) \\
+ @{text "(unspecified)"} \\
+ \end{tabular}
+ \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
+ \end{center}
+ \end{figure}
+
+ Derived conjunction rules include introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &
+ B"}, and destructions @{text "A & B \<Longrightarrow> A"} and @{text "A & B \<Longrightarrow> B"}.
+ Conjunction allows to treat simultaneous assumptions and conclusions
+ uniformly. For example, multiple claims are intermediately
+ represented as explicit conjunction, but this is refined into
+ separate sub-goals before the user continues the proof; the final
+ result is projected into a list of theorems (cf.\
+ \secref{sec:tactical-goals}).
+
+ The @{text "prop"} marker (@{text "#"}) makes arbitrarily complex
+ propositions appear as atomic, without changing the meaning: @{text
+ "\<Gamma> \<turnstile> A"} and @{text "\<Gamma> \<turnstile> #A"} are interchangeable. See
+ \secref{sec:tactical-goals} for specific operations.
+
+ The @{text "term"} marker turns any well-typed term into a derivable
+ proposition: @{text "\<turnstile> TERM t"} holds unconditionally. Although
+ this is logically vacuous, it allows to treat terms and proofs
+ uniformly, similar to a type-theoretic framework.
+
+ The @{text "TYPE"} constructor is the canonical representative of
+ the unspecified type @{text "\<alpha> itself"}; it essentially injects the
+ language of types into that of terms. There is specific notation
+ @{text "TYPE(\<tau>)"} for @{text "TYPE\<^bsub>\<tau>
+ itself\<^esub>"}.
+ Although being devoid of any particular meaning, the @{text
+ "TYPE(\<tau>)"} accounts for the type @{text "\<tau>"} within the term
+ language. In particular, @{text "TYPE(\<alpha>)"} may be used as formal
+ argument in primitive definitions, in order to circumvent hidden
+ polymorphism (cf.\ \secref{sec:terms}). For example, @{text "c
+ TYPE(\<alpha>) \<equiv> A[\<alpha>]"} defines @{text "c :: \<alpha> itself \<Rightarrow> prop"} in terms of
+ a proposition @{text "A"} that depends on an additional type
+ argument, which is essentially a predicate on types.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML Conjunction.intr: "thm -> thm -> thm"} \\
+ @{index_ML Conjunction.elim: "thm -> thm * thm"} \\
+ @{index_ML Drule.mk_term: "cterm -> thm"} \\
+ @{index_ML Drule.dest_term: "thm -> cterm"} \\
+ @{index_ML Logic.mk_type: "typ -> term"} \\
+ @{index_ML Logic.dest_type: "term -> typ"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML Conjunction.intr} derives @{text "A & B"} from @{text
+ "A"} and @{text "B"}.
+
+ \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
+ from @{text "A & B"}.
+
+ \item @{ML Drule.mk_term} derives @{text "TERM t"}.
+
+ \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
+ "TERM t"}.
+
+ \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
+ "TYPE(\<tau>)"}.
+
+ \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
+ @{text "\<tau>"}.
+
+ \end{description}
+*}
+
+
+section {* Object-level rules \label{sec:obj-rules} *}
+
+text {*
+ The primitive inferences covered so far mostly serve foundational
+ purposes. User-level reasoning usually works via object-level rules
+ that are represented as theorems of Pure. Composition of rules
+ involves \emph{backchaining}, \emph{higher-order unification} modulo
+ @{text "\<alpha>\<beta>\<eta>"}-conversion of @{text "\<lambda>"}-terms, and so-called
+ \emph{lifting} of rules into a context of @{text "\<And>"} and @{text
+ "\<Longrightarrow>"} connectives. Thus the full power of higher-order Natural
+ Deduction in Isabelle/Pure becomes readily available.
+*}
+
+
+subsection {* Hereditary Harrop Formulae *}
+
+text {*
+ The idea of object-level rules is to model Natural Deduction
+ inferences in the style of Gentzen \cite{Gentzen:1935}, but we allow
+ arbitrary nesting similar to \cite{extensions91}. The most basic
+ rule format is that of a \emph{Horn Clause}:
+ \[
+ \infer{@{text "A"}}{@{text "A\<^sub>1"} & @{text "\<dots>"} & @{text "A\<^sub>n"}}
+ \]
+ where @{text "A, A\<^sub>1, \<dots>, A\<^sub>n"} are atomic propositions
+ of the framework, usually of the form @{text "Trueprop B"}, where
+ @{text "B"} is a (compound) object-level statement. This
+ object-level inference corresponds to an iterated implication in
+ Pure like this:
+ \[
+ @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"}
+ \]
+ As an example consider conjunction introduction: @{text "A \<Longrightarrow> B \<Longrightarrow> A \<and>
+ B"}. Any parameters occurring in such rule statements are
+ conceptionally treated as arbitrary:
+ \[
+ @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. A\<^sub>1 x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> \<dots> A\<^sub>n x\<^sub>1 \<dots> x\<^sub>m \<Longrightarrow> A x\<^sub>1 \<dots> x\<^sub>m"}
+ \]
+
+ Nesting of rules means that the positions of @{text "A\<^sub>i"} may
+ again hold compound rules, not just atomic propositions.
+ Propositions of this format are called \emph{Hereditary Harrop
+ Formulae} in the literature \cite{Miller:1991}. Here we give an
+ inductive characterization as follows:
+
+ \medskip
+ \begin{tabular}{ll}
+ @{text "\<^bold>x"} & set of variables \\
+ @{text "\<^bold>A"} & set of atomic propositions \\
+ @{text "\<^bold>H = \<And>\<^bold>x\<^sup>*. \<^bold>H\<^sup>* \<Longrightarrow> \<^bold>A"} & set of Hereditary Harrop Formulas \\
+ \end{tabular}
+ \medskip
+
+ \noindent Thus we essentially impose nesting levels on propositions
+ formed from @{text "\<And>"} and @{text "\<Longrightarrow>"}. At each level there is a
+ prefix of parameters and compound premises, concluding an atomic
+ proposition. Typical examples are @{text "\<longrightarrow>"}-introduction @{text
+ "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B"} or mathematical induction @{text "P 0 \<Longrightarrow> (\<And>n. P n
+ \<Longrightarrow> P (Suc n)) \<Longrightarrow> P n"}. Even deeper nesting occurs in well-founded
+ induction @{text "(\<And>x. (\<And>y. y \<prec> x \<Longrightarrow> P y) \<Longrightarrow> P x) \<Longrightarrow> P x"}, but this
+ already marks the limit of rule complexity seen in practice.
+
+ \medskip Regular user-level inferences in Isabelle/Pure always
+ maintain the following canonical form of results:
+
+ \begin{itemize}
+
+ \item Normalization by @{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"},
+ which is a theorem of Pure, means that quantifiers are pushed in
+ front of implication at each level of nesting. The normal form is a
+ Hereditary Harrop Formula.
+
+ \item The outermost prefix of parameters is represented via
+ schematic variables: instead of @{text "\<And>\<^vec>x. \<^vec>H \<^vec>x
+ \<Longrightarrow> A \<^vec>x"} we have @{text "\<^vec>H ?\<^vec>x \<Longrightarrow> A ?\<^vec>x"}.
+ Note that this representation looses information about the order of
+ parameters, and vacuous quantifiers vanish automatically.
+
+ \end{itemize}
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML MetaSimplifier.norm_hhf: "thm -> thm"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML MetaSimplifier.norm_hhf}~@{text thm} normalizes the given
+ theorem according to the canonical form specified above. This is
+ occasionally helpful to repair some low-level tools that do not
+ handle Hereditary Harrop Formulae properly.
+
+ \end{description}
+*}
+
+
+subsection {* Rule composition *}
+
+text {*
+ The rule calculus of Isabelle/Pure provides two main inferences:
+ @{inference resolution} (i.e.\ back-chaining of rules) and
+ @{inference assumption} (i.e.\ closing a branch), both modulo
+ higher-order unification. There are also combined variants, notably
+ @{inference elim_resolution} and @{inference dest_resolution}.
+
+ To understand the all-important @{inference resolution} principle,
+ we first consider raw @{inference_def composition} (modulo
+ higher-order unification with substitution @{text "\<vartheta>"}):
+ \[
+ \infer[(@{inference_def composition})]{@{text "\<^vec>A\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+ {@{text "\<^vec>A \<Longrightarrow> B"} & @{text "B' \<Longrightarrow> C"} & @{text "B\<vartheta> = B'\<vartheta>"}}
+ \]
+ Here the conclusion of the first rule is unified with the premise of
+ the second; the resulting rule instance inherits the premises of the
+ first and conclusion of the second. Note that @{text "C"} can again
+ consist of iterated implications. We can also permute the premises
+ of the second rule back-and-forth in order to compose with @{text
+ "B'"} in any position (subsequently we shall always refer to
+ position 1 w.l.o.g.).
+
+ In @{inference composition} the internal structure of the common
+ part @{text "B"} and @{text "B'"} is not taken into account. For
+ proper @{inference resolution} we require @{text "B"} to be atomic,
+ and explicitly observe the structure @{text "\<And>\<^vec>x. \<^vec>H
+ \<^vec>x \<Longrightarrow> B' \<^vec>x"} of the premise of the second rule. The
+ idea is to adapt the first rule by ``lifting'' it into this context,
+ by means of iterated application of the following inferences:
+ \[
+ \infer[(@{inference_def imp_lift})]{@{text "(\<^vec>H \<Longrightarrow> \<^vec>A) \<Longrightarrow> (\<^vec>H \<Longrightarrow> B)"}}{@{text "\<^vec>A \<Longrightarrow> B"}}
+ \]
+ \[
+ \infer[(@{inference_def all_lift})]{@{text "(\<And>\<^vec>x. \<^vec>A (?\<^vec>a \<^vec>x)) \<Longrightarrow> (\<And>\<^vec>x. B (?\<^vec>a \<^vec>x))"}}{@{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"}}
+ \]
+ By combining raw composition with lifting, we get full @{inference
+ resolution} as follows:
+ \[
+ \infer[(@{inference_def resolution})]
+ {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (?\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+ {\begin{tabular}{l}
+ @{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"} \\
+ @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+ @{text "(\<lambda>\<^vec>x. B (?\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+ \end{tabular}}
+ \]
+
+ Continued resolution of rules allows to back-chain a problem towards
+ more and sub-problems. Branches are closed either by resolving with
+ a rule of 0 premises, or by producing a ``short-circuit'' within a
+ solved situation (again modulo unification):
+ \[
+ \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
+ {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text i})}}
+ \]
+
+ FIXME @{inference_def elim_resolution}, @{inference_def dest_resolution}
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML "op RS": "thm * thm -> thm"} \\
+ @{index_ML "op OF": "thm * thm list -> thm"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{text "rule\<^sub>1 RS rule\<^sub>2"} resolves @{text
+ "rule\<^sub>1"} with @{text "rule\<^sub>2"} according to the
+ @{inference resolution} principle explained above. Note that the
+ corresponding attribute in the Isar language is called @{attribute
+ THEN}.
+
+ \item @{text "rule OF rules"} resolves a list of rules with the
+ first rule, addressing its premises @{text "1, \<dots>, length rules"}
+ (operating from last to first). This means the newly emerging
+ premises are all concatenated, without interfering. Also note that
+ compared to @{text "RS"}, the rule argument order is swapped: @{text
+ "rule\<^sub>1 RS rule\<^sub>2 = rule\<^sub>2 OF [rule\<^sub>1]"}.
+
+ \end{description}
+*}
+
+end
--- a/doc-src/IsarImplementation/Thy/ML.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/ML.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,6 @@
-(* $Id$ *)
-
-theory "ML" imports base begin
+theory "ML"
+imports Base
+begin
chapter {* Advanced ML programming *}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Prelim.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,764 @@
+theory Prelim
+imports Base
+begin
+
+chapter {* Preliminaries *}
+
+section {* Contexts \label{sec:context} *}
+
+text {*
+ A logical context represents the background that is required for
+ formulating statements and composing proofs. It acts as a medium to
+ produce formal content, depending on earlier material (declarations,
+ results etc.).
+
+ For example, derivations within the Isabelle/Pure logic can be
+ described as a judgment @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<phi>"}, which means that a
+ proposition @{text "\<phi>"} is derivable from hypotheses @{text "\<Gamma>"}
+ within the theory @{text "\<Theta>"}. There are logical reasons for
+ keeping @{text "\<Theta>"} and @{text "\<Gamma>"} separate: theories can be
+ liberal about supporting type constructors and schematic
+ polymorphism of constants and axioms, while the inner calculus of
+ @{text "\<Gamma> \<turnstile> \<phi>"} is strictly limited to Simple Type Theory (with
+ fixed type variables in the assumptions).
+
+ \medskip Contexts and derivations are linked by the following key
+ principles:
+
+ \begin{itemize}
+
+ \item Transfer: monotonicity of derivations admits results to be
+ transferred into a \emph{larger} context, i.e.\ @{text "\<Gamma> \<turnstile>\<^sub>\<Theta>
+ \<phi>"} implies @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta>\<^sub>' \<phi>"} for contexts @{text "\<Theta>'
+ \<supseteq> \<Theta>"} and @{text "\<Gamma>' \<supseteq> \<Gamma>"}.
+
+ \item Export: discharge of hypotheses admits results to be exported
+ into a \emph{smaller} context, i.e.\ @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta> \<phi>"}
+ implies @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<Delta> \<Longrightarrow> \<phi>"} where @{text "\<Gamma>' \<supseteq> \<Gamma>"} and
+ @{text "\<Delta> = \<Gamma>' - \<Gamma>"}. Note that @{text "\<Theta>"} remains unchanged here,
+ only the @{text "\<Gamma>"} part is affected.
+
+ \end{itemize}
+
+ \medskip By modeling the main characteristics of the primitive
+ @{text "\<Theta>"} and @{text "\<Gamma>"} above, and abstracting over any
+ particular logical content, we arrive at the fundamental notions of
+ \emph{theory context} and \emph{proof context} in Isabelle/Isar.
+ These implement a certain policy to manage arbitrary \emph{context
+ data}. There is a strongly-typed mechanism to declare new kinds of
+ data at compile time.
+
+ The internal bootstrap process of Isabelle/Pure eventually reaches a
+ stage where certain data slots provide the logical content of @{text
+ "\<Theta>"} and @{text "\<Gamma>"} sketched above, but this does not stop there!
+ Various additional data slots support all kinds of mechanisms that
+ are not necessarily part of the core logic.
+
+ For example, there would be data for canonical introduction and
+ elimination rules for arbitrary operators (depending on the
+ object-logic and application), which enables users to perform
+ standard proof steps implicitly (cf.\ the @{text "rule"} method
+ \cite{isabelle-isar-ref}).
+
+ \medskip Thus Isabelle/Isar is able to bring forth more and more
+ concepts successively. In particular, an object-logic like
+ Isabelle/HOL continues the Isabelle/Pure setup by adding specific
+ components for automated reasoning (classical reasoner, tableau
+ prover, structured induction etc.) and derived specification
+ mechanisms (inductive predicates, recursive functions etc.). All of
+ this is ultimately based on the generic data management by theory
+ and proof contexts introduced here.
+*}
+
+
+subsection {* Theory context \label{sec:context-theory} *}
+
+text {*
+ A \emph{theory} is a data container with explicit name and unique
+ identifier. Theories are related by a (nominal) sub-theory
+ relation, which corresponds to the dependency graph of the original
+ construction; each theory is derived from a certain sub-graph of
+ ancestor theories.
+
+ The @{text "merge"} operation produces the least upper bound of two
+ theories, which actually degenerates into absorption of one theory
+ into the other (due to the nominal sub-theory relation).
+
+ The @{text "begin"} operation starts a new theory by importing
+ several parent theories and entering a special @{text "draft"} mode,
+ which is sustained until the final @{text "end"} operation. A draft
+ theory acts like a linear type, where updates invalidate earlier
+ versions. An invalidated draft is called ``stale''.
+
+ The @{text "checkpoint"} operation produces an intermediate stepping
+ stone that will survive the next update: both the original and the
+ changed theory remain valid and are related by the sub-theory
+ relation. Checkpointing essentially recovers purely functional
+ theory values, at the expense of some extra internal bookkeeping.
+
+ The @{text "copy"} operation produces an auxiliary version that has
+ the same data content, but is unrelated to the original: updates of
+ the copy do not affect the original, neither does the sub-theory
+ relation hold.
+
+ \medskip The example in \figref{fig:ex-theory} below shows a theory
+ graph derived from @{text "Pure"}, with theory @{text "Length"}
+ importing @{text "Nat"} and @{text "List"}. The body of @{text
+ "Length"} consists of a sequence of updates, working mostly on
+ drafts. Intermediate checkpoints may occur as well, due to the
+ history mechanism provided by the Isar top-level, cf.\
+ \secref{sec:isar-toplevel}.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{rcccl}
+ & & @{text "Pure"} \\
+ & & @{text "\<down>"} \\
+ & & @{text "FOL"} \\
+ & $\swarrow$ & & $\searrow$ & \\
+ @{text "Nat"} & & & & @{text "List"} \\
+ & $\searrow$ & & $\swarrow$ \\
+ & & @{text "Length"} \\
+ & & \multicolumn{3}{l}{~~@{keyword "imports"}} \\
+ & & \multicolumn{3}{l}{~~@{keyword "begin"}} \\
+ & & $\vdots$~~ \\
+ & & @{text "\<bullet>"}~~ \\
+ & & $\vdots$~~ \\
+ & & @{text "\<bullet>"}~~ \\
+ & & $\vdots$~~ \\
+ & & \multicolumn{3}{l}{~~@{command "end"}} \\
+ \end{tabular}
+ \caption{A theory definition depending on ancestors}\label{fig:ex-theory}
+ \end{center}
+ \end{figure}
+
+ \medskip There is a separate notion of \emph{theory reference} for
+ maintaining a live link to an evolving theory context: updates on
+ drafts are propagated automatically. Dynamic updating stops after
+ an explicit @{text "end"} only.
+
+ Derived entities may store a theory reference in order to indicate
+ the context they belong to. This implicitly assumes monotonic
+ reasoning, because the referenced context may become larger without
+ further notice.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type theory} \\
+ @{index_ML Theory.subthy: "theory * theory -> bool"} \\
+ @{index_ML Theory.merge: "theory * theory -> theory"} \\
+ @{index_ML Theory.checkpoint: "theory -> theory"} \\
+ @{index_ML Theory.copy: "theory -> theory"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML_type theory_ref} \\
+ @{index_ML Theory.deref: "theory_ref -> theory"} \\
+ @{index_ML Theory.check_thy: "theory -> theory_ref"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type theory} represents theory contexts. This is
+ essentially a linear type! Most operations destroy the original
+ version, which then becomes ``stale''.
+
+ \item @{ML "Theory.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"}
+ compares theories according to the inherent graph structure of the
+ construction. This sub-theory relation is a nominal approximation
+ of inclusion (@{text "\<subseteq>"}) of the corresponding content.
+
+ \item @{ML "Theory.merge"}~@{text "(thy\<^sub>1, thy\<^sub>2)"}
+ absorbs one theory into the other. This fails for unrelated
+ theories!
+
+ \item @{ML "Theory.checkpoint"}~@{text "thy"} produces a safe
+ stepping stone in the linear development of @{text "thy"}. The next
+ update will result in two related, valid theories.
+
+ \item @{ML "Theory.copy"}~@{text "thy"} produces a variant of @{text
+ "thy"} that holds a copy of the same data. The result is not
+ related to the original; the original is unchanged.
+
+ \item @{ML_type theory_ref} represents a sliding reference to an
+ always valid theory; updates on the original are propagated
+ automatically.
+
+ \item @{ML "Theory.deref"}~@{text "thy_ref"} turns a @{ML_type
+ "theory_ref"} into an @{ML_type "theory"} value. As the referenced
+ theory evolves monotonically over time, later invocations of @{ML
+ "Theory.deref"} may refer to a larger context.
+
+ \item @{ML "Theory.check_thy"}~@{text "thy"} produces a @{ML_type
+ "theory_ref"} from a valid @{ML_type "theory"} value.
+
+ \end{description}
+*}
+
+
+subsection {* Proof context \label{sec:context-proof} *}
+
+text {*
+ A proof context is a container for pure data with a back-reference
+ to the theory it belongs to. The @{text "init"} operation creates a
+ proof context from a given theory. Modifications to draft theories
+ are propagated to the proof context as usual, but there is also an
+ explicit @{text "transfer"} operation to force resynchronization
+ with more substantial updates to the underlying theory. The actual
+ context data does not require any special bookkeeping, thanks to the
+ lack of destructive features.
+
+ Entities derived in a proof context need to record inherent logical
+ requirements explicitly, since there is no separate context
+ identification as for theories. For example, hypotheses used in
+ primitive derivations (cf.\ \secref{sec:thms}) are recorded
+ separately within the sequent @{text "\<Gamma> \<turnstile> \<phi>"}, just to make double
+ sure. Results could still leak into an alien proof context due to
+ programming errors, but Isabelle/Isar includes some extra validity
+ checks in critical positions, notably at the end of a sub-proof.
+
+ Proof contexts may be manipulated arbitrarily, although the common
+ discipline is to follow block structure as a mental model: a given
+ context is extended consecutively, and results are exported back
+ into the original context. Note that the Isar proof states model
+ block-structured reasoning explicitly, using a stack of proof
+ contexts internally.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type Proof.context} \\
+ @{index_ML ProofContext.init: "theory -> Proof.context"} \\
+ @{index_ML ProofContext.theory_of: "Proof.context -> theory"} \\
+ @{index_ML ProofContext.transfer: "theory -> Proof.context -> Proof.context"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type Proof.context} represents proof contexts. Elements
+ of this type are essentially pure values, with a sliding reference
+ to the background theory.
+
+ \item @{ML ProofContext.init}~@{text "thy"} produces a proof context
+ derived from @{text "thy"}, initializing all data.
+
+ \item @{ML ProofContext.theory_of}~@{text "ctxt"} selects the
+ background theory from @{text "ctxt"}, dereferencing its internal
+ @{ML_type theory_ref}.
+
+ \item @{ML ProofContext.transfer}~@{text "thy ctxt"} promotes the
+ background theory of @{text "ctxt"} to the super theory @{text
+ "thy"}.
+
+ \end{description}
+*}
+
+
+subsection {* Generic contexts \label{sec:generic-context} *}
+
+text {*
+ A generic context is the disjoint sum of either a theory or proof
+ context. Occasionally, this enables uniform treatment of generic
+ context data, typically extra-logical information. Operations on
+ generic contexts include the usual injections, partial selections,
+ and combinators for lifting operations on either component of the
+ disjoint sum.
+
+ Moreover, there are total operations @{text "theory_of"} and @{text
+ "proof_of"} to convert a generic context into either kind: a theory
+ can always be selected from the sum, while a proof context might
+ have to be constructed by an ad-hoc @{text "init"} operation.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type Context.generic} \\
+ @{index_ML Context.theory_of: "Context.generic -> theory"} \\
+ @{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type Context.generic} is the direct sum of @{ML_type
+ "theory"} and @{ML_type "Proof.context"}, with the datatype
+ constructors @{ML "Context.Theory"} and @{ML "Context.Proof"}.
+
+ \item @{ML Context.theory_of}~@{text "context"} always produces a
+ theory from the generic @{text "context"}, using @{ML
+ "ProofContext.theory_of"} as required.
+
+ \item @{ML Context.proof_of}~@{text "context"} always produces a
+ proof context from the generic @{text "context"}, using @{ML
+ "ProofContext.init"} as required (note that this re-initializes the
+ context data with each invocation).
+
+ \end{description}
+*}
+
+
+subsection {* Context data \label{sec:context-data} *}
+
+text {*
+ The main purpose of theory and proof contexts is to manage arbitrary
+ data. New data types can be declared incrementally at compile time.
+ There are separate declaration mechanisms for any of the three kinds
+ of contexts: theory, proof, generic.
+
+ \paragraph{Theory data} may refer to destructive entities, which are
+ maintained in direct correspondence to the linear evolution of
+ theory values, including explicit copies.\footnote{Most existing
+ instances of destructive theory data are merely historical relics
+ (e.g.\ the destructive theorem storage, and destructive hints for
+ the Simplifier and Classical rules).} A theory data declaration
+ needs to implement the following SML signature:
+
+ \medskip
+ \begin{tabular}{ll}
+ @{text "\<type> T"} & representing type \\
+ @{text "\<val> empty: T"} & empty default value \\
+ @{text "\<val> copy: T \<rightarrow> T"} & refresh impure data \\
+ @{text "\<val> extend: T \<rightarrow> T"} & re-initialize on import \\
+ @{text "\<val> merge: T \<times> T \<rightarrow> T"} & join on import \\
+ \end{tabular}
+ \medskip
+
+ \noindent The @{text "empty"} value acts as initial default for
+ \emph{any} theory that does not declare actual data content; @{text
+ "copy"} maintains persistent integrity for impure data, it is just
+ the identity for pure values; @{text "extend"} is acts like a
+ unitary version of @{text "merge"}, both operations should also
+ include the functionality of @{text "copy"} for impure data.
+
+ \paragraph{Proof context data} is purely functional. A declaration
+ needs to implement the following SML signature:
+
+ \medskip
+ \begin{tabular}{ll}
+ @{text "\<type> T"} & representing type \\
+ @{text "\<val> init: theory \<rightarrow> T"} & produce initial value \\
+ \end{tabular}
+ \medskip
+
+ \noindent The @{text "init"} operation is supposed to produce a pure
+ value from the given background theory.
+
+ \paragraph{Generic data} provides a hybrid interface for both theory
+ and proof data. The declaration is essentially the same as for
+ (pure) theory data, without @{text "copy"}. The @{text "init"}
+ operation for proof contexts merely selects the current data value
+ from the background theory.
+
+ \bigskip A data declaration of type @{text "T"} results in the
+ following interface:
+
+ \medskip
+ \begin{tabular}{ll}
+ @{text "init: theory \<rightarrow> T"} \\
+ @{text "get: context \<rightarrow> T"} \\
+ @{text "put: T \<rightarrow> context \<rightarrow> context"} \\
+ @{text "map: (T \<rightarrow> T) \<rightarrow> context \<rightarrow> context"} \\
+ \end{tabular}
+ \medskip
+
+ \noindent Here @{text "init"} is only applicable to impure theory
+ data to install a fresh copy persistently (destructive update on
+ uninitialized has no permanent effect). The other operations provide
+ access for the particular kind of context (theory, proof, or generic
+ context). Note that this is a safe interface: there is no other way
+ to access the corresponding data slot of a context. By keeping
+ these operations private, a component may maintain abstract values
+ authentically, without other components interfering.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_functor TheoryDataFun} \\
+ @{index_ML_functor ProofDataFun} \\
+ @{index_ML_functor GenericDataFun} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_functor TheoryDataFun}@{text "(spec)"} declares data for
+ type @{ML_type theory} according to the specification provided as
+ argument structure. The resulting structure provides data init and
+ access operations as described above.
+
+ \item @{ML_functor ProofDataFun}@{text "(spec)"} is analogous to
+ @{ML_functor TheoryDataFun} for type @{ML_type Proof.context}.
+
+ \item @{ML_functor GenericDataFun}@{text "(spec)"} is analogous to
+ @{ML_functor TheoryDataFun} for type @{ML_type Context.generic}.
+
+ \end{description}
+*}
+
+
+section {* Names \label{sec:names} *}
+
+text {*
+ In principle, a name is just a string, but there are various
+ convention for encoding additional structure. For example, ``@{text
+ "Foo.bar.baz"}'' is considered as a qualified name consisting of
+ three basic name components. The individual constituents of a name
+ may have further substructure, e.g.\ the string
+ ``\verb,\,\verb,<alpha>,'' encodes as a single symbol.
+*}
+
+
+subsection {* Strings of symbols *}
+
+text {*
+ A \emph{symbol} constitutes the smallest textual unit in Isabelle
+ --- raw characters are normally not encountered at all. Isabelle
+ strings consist of a sequence of symbols, represented as a packed
+ string or a list of strings. Each symbol is in itself a small
+ string, which has either one of the following forms:
+
+ \begin{enumerate}
+
+ \item a single ASCII character ``@{text "c"}'', for example
+ ``\verb,a,'',
+
+ \item a regular symbol ``\verb,\,\verb,<,@{text "ident"}\verb,>,'',
+ for example ``\verb,\,\verb,<alpha>,'',
+
+ \item a control symbol ``\verb,\,\verb,<^,@{text "ident"}\verb,>,'',
+ for example ``\verb,\,\verb,<^bold>,'',
+
+ \item a raw symbol ``\verb,\,\verb,<^raw:,@{text text}\verb,>,''
+ where @{text text} constists of printable characters excluding
+ ``\verb,.,'' and ``\verb,>,'', for example
+ ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
+
+ \item a numbered raw control symbol ``\verb,\,\verb,<^raw,@{text
+ n}\verb,>, where @{text n} consists of digits, for example
+ ``\verb,\,\verb,<^raw42>,''.
+
+ \end{enumerate}
+
+ \noindent The @{text "ident"} syntax for symbol names is @{text
+ "letter (letter | digit)\<^sup>*"}, where @{text "letter =
+ A..Za..z"} and @{text "digit = 0..9"}. There are infinitely many
+ regular symbols and control symbols, but a fixed collection of
+ standard symbols is treated specifically. For example,
+ ``\verb,\,\verb,<alpha>,'' is classified as a letter, which means it
+ may occur within regular Isabelle identifiers.
+
+ Since the character set underlying Isabelle symbols is 7-bit ASCII
+ and 8-bit characters are passed through transparently, Isabelle may
+ also process Unicode/UCS data in UTF-8 encoding. Unicode provides
+ its own collection of mathematical symbols, but there is no built-in
+ link to the standard collection of Isabelle.
+
+ \medskip Output of Isabelle symbols depends on the print mode
+ (\secref{print-mode}). For example, the standard {\LaTeX} setup of
+ the Isabelle document preparation system would present
+ ``\verb,\,\verb,<alpha>,'' as @{text "\<alpha>"}, and
+ ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as @{text
+ "\<^bold>\<alpha>"}.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type "Symbol.symbol"} \\
+ @{index_ML Symbol.explode: "string -> Symbol.symbol list"} \\
+ @{index_ML Symbol.is_letter: "Symbol.symbol -> bool"} \\
+ @{index_ML Symbol.is_digit: "Symbol.symbol -> bool"} \\
+ @{index_ML Symbol.is_quasi: "Symbol.symbol -> bool"} \\
+ @{index_ML Symbol.is_blank: "Symbol.symbol -> bool"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML_type "Symbol.sym"} \\
+ @{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type "Symbol.symbol"} represents individual Isabelle
+ symbols; this is an alias for @{ML_type "string"}.
+
+ \item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
+ from the packed form. This function supercedes @{ML
+ "String.explode"} for virtually all purposes of manipulating text in
+ Isabelle!
+
+ \item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
+ "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
+ symbols according to fixed syntactic conventions of Isabelle, cf.\
+ \cite{isabelle-isar-ref}.
+
+ \item @{ML_type "Symbol.sym"} is a concrete datatype that represents
+ the different kinds of symbols explicitly, with constructors @{ML
+ "Symbol.Char"}, @{ML "Symbol.Sym"}, @{ML "Symbol.Ctrl"}, @{ML
+ "Symbol.Raw"}.
+
+ \item @{ML "Symbol.decode"} converts the string representation of a
+ symbol into the datatype version.
+
+ \end{description}
+*}
+
+
+subsection {* Basic names \label{sec:basic-names} *}
+
+text {*
+ A \emph{basic name} essentially consists of a single Isabelle
+ identifier. There are conventions to mark separate classes of basic
+ names, by attaching a suffix of underscores: one underscore means
+ \emph{internal name}, two underscores means \emph{Skolem name},
+ three underscores means \emph{internal Skolem name}.
+
+ For example, the basic name @{text "foo"} has the internal version
+ @{text "foo_"}, with Skolem versions @{text "foo__"} and @{text
+ "foo___"}, respectively.
+
+ These special versions provide copies of the basic name space, apart
+ from anything that normally appears in the user text. For example,
+ system generated variables in Isar proof contexts are usually marked
+ as internal, which prevents mysterious name references like @{text
+ "xaa"} to appear in the text.
+
+ \medskip Manipulating binding scopes often requires on-the-fly
+ renamings. A \emph{name context} contains a collection of already
+ used names. The @{text "declare"} operation adds names to the
+ context.
+
+ The @{text "invents"} operation derives a number of fresh names from
+ a given starting point. For example, the first three names derived
+ from @{text "a"} are @{text "a"}, @{text "b"}, @{text "c"}.
+
+ The @{text "variants"} operation produces fresh names by
+ incrementing tentative names as base-26 numbers (with digits @{text
+ "a..z"}) until all clashes are resolved. For example, name @{text
+ "foo"} results in variants @{text "fooa"}, @{text "foob"}, @{text
+ "fooc"}, \dots, @{text "fooaa"}, @{text "fooab"} etc.; each renaming
+ step picks the next unused variant from this sequence.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML Name.internal: "string -> string"} \\
+ @{index_ML Name.skolem: "string -> string"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML_type Name.context} \\
+ @{index_ML Name.context: Name.context} \\
+ @{index_ML Name.declare: "string -> Name.context -> Name.context"} \\
+ @{index_ML Name.invents: "Name.context -> string -> int -> string list"} \\
+ @{index_ML Name.variants: "string list -> Name.context -> string list * Name.context"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML Name.internal}~@{text "name"} produces an internal name
+ by adding one underscore.
+
+ \item @{ML Name.skolem}~@{text "name"} produces a Skolem name by
+ adding two underscores.
+
+ \item @{ML_type Name.context} represents the context of already used
+ names; the initial value is @{ML "Name.context"}.
+
+ \item @{ML Name.declare}~@{text "name"} enters a used name into the
+ context.
+
+ \item @{ML Name.invents}~@{text "context name n"} produces @{text
+ "n"} fresh names derived from @{text "name"}.
+
+ \item @{ML Name.variants}~@{text "names context"} produces fresh
+ variants of @{text "names"}; the result is entered into the context.
+
+ \end{description}
+*}
+
+
+subsection {* Indexed names *}
+
+text {*
+ An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic
+ name and a natural number. This representation allows efficient
+ renaming by incrementing the second component only. The canonical
+ way to rename two collections of indexnames apart from each other is
+ this: determine the maximum index @{text "maxidx"} of the first
+ collection, then increment all indexes of the second collection by
+ @{text "maxidx + 1"}; the maximum index of an empty collection is
+ @{text "-1"}.
+
+ Occasionally, basic names and indexed names are injected into the
+ same pair type: the (improper) indexname @{text "(x, -1)"} is used
+ to encode basic names.
+
+ \medskip Isabelle syntax observes the following rules for
+ representing an indexname @{text "(x, i)"} as a packed string:
+
+ \begin{itemize}
+
+ \item @{text "?x"} if @{text "x"} does not end with a digit and @{text "i = 0"},
+
+ \item @{text "?xi"} if @{text "x"} does not end with a digit,
+
+ \item @{text "?x.i"} otherwise.
+
+ \end{itemize}
+
+ Indexnames may acquire large index numbers over time. Results are
+ normalized towards @{text "0"} at certain checkpoints, notably at
+ the end of a proof. This works by producing variants of the
+ corresponding basic name components. For example, the collection
+ @{text "?x1, ?x7, ?x42"} becomes @{text "?x, ?xa, ?xb"}.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type indexname} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type indexname} represents indexed names. This is an
+ abbreviation for @{ML_type "string * int"}. The second component is
+ usually non-negative, except for situations where @{text "(x, -1)"}
+ is used to embed basic names into this type.
+
+ \end{description}
+*}
+
+
+subsection {* Qualified names and name spaces *}
+
+text {*
+ A \emph{qualified name} consists of a non-empty sequence of basic
+ name components. The packed representation uses a dot as separator,
+ as in ``@{text "A.b.c"}''. The last component is called \emph{base}
+ name, the remaining prefix \emph{qualifier} (which may be empty).
+ The idea of qualified names is to encode nested structures by
+ recording the access paths as qualifiers. For example, an item
+ named ``@{text "A.b.c"}'' may be understood as a local entity @{text
+ "c"}, within a local structure @{text "b"}, within a global
+ structure @{text "A"}. Typically, name space hierarchies consist of
+ 1--2 levels of qualification, but this need not be always so.
+
+ The empty name is commonly used as an indication of unnamed
+ entities, whenever this makes any sense. The basic operations on
+ qualified names are smart enough to pass through such improper names
+ unchanged.
+
+ \medskip A @{text "naming"} policy tells how to turn a name
+ specification into a fully qualified internal name (by the @{text
+ "full"} operation), and how fully qualified names may be accessed
+ externally. For example, the default naming policy is to prefix an
+ implicit path: @{text "full x"} produces @{text "path.x"}, and the
+ standard accesses for @{text "path.x"} include both @{text "x"} and
+ @{text "path.x"}. Normally, the naming is implicit in the theory or
+ proof context; there are separate versions of the corresponding.
+
+ \medskip A @{text "name space"} manages a collection of fully
+ internalized names, together with a mapping between external names
+ and internal names (in both directions). The corresponding @{text
+ "intern"} and @{text "extern"} operations are mostly used for
+ parsing and printing only! The @{text "declare"} operation augments
+ a name space according to the accesses determined by the naming
+ policy.
+
+ \medskip As a general principle, there is a separate name space for
+ each kind of formal entity, e.g.\ logical constant, type
+ constructor, type class, theorem. It is usually clear from the
+ occurrence in concrete syntax (or from the scope) which kind of
+ entity a name refers to. For example, the very same name @{text
+ "c"} may be used uniformly for a constant, type constructor, and
+ type class.
+
+ There are common schemes to name theorems systematically, according
+ to the name of the main logical entity involved, e.g.\ @{text
+ "c.intro"} for a canonical theorem related to constant @{text "c"}.
+ This technique of mapping names from one space into another requires
+ some care in order to avoid conflicts. In particular, theorem names
+ derived from a type constructor or type class are better suffixed in
+ addition to the usual qualification, e.g.\ @{text "c_type.intro"}
+ and @{text "c_class.intro"} for theorems related to type @{text "c"}
+ and class @{text "c"}, respectively.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML NameSpace.base: "string -> string"} \\
+ @{index_ML NameSpace.qualifier: "string -> string"} \\
+ @{index_ML NameSpace.append: "string -> string -> string"} \\
+ @{index_ML NameSpace.implode: "string list -> string"} \\
+ @{index_ML NameSpace.explode: "string -> string list"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML_type NameSpace.naming} \\
+ @{index_ML NameSpace.default_naming: NameSpace.naming} \\
+ @{index_ML NameSpace.add_path: "string -> NameSpace.naming -> NameSpace.naming"} \\
+ @{index_ML NameSpace.full_name: "NameSpace.naming -> binding -> string"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML_type NameSpace.T} \\
+ @{index_ML NameSpace.empty: NameSpace.T} \\
+ @{index_ML NameSpace.merge: "NameSpace.T * NameSpace.T -> NameSpace.T"} \\
+ @{index_ML NameSpace.declare: "NameSpace.naming -> binding -> NameSpace.T -> string * NameSpace.T"} \\
+ @{index_ML NameSpace.intern: "NameSpace.T -> string -> string"} \\
+ @{index_ML NameSpace.extern: "NameSpace.T -> string -> string"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML NameSpace.base}~@{text "name"} returns the base name of a
+ qualified name.
+
+ \item @{ML NameSpace.qualifier}~@{text "name"} returns the qualifier
+ of a qualified name.
+
+ \item @{ML NameSpace.append}~@{text "name\<^isub>1 name\<^isub>2"}
+ appends two qualified names.
+
+ \item @{ML NameSpace.implode}~@{text "name"} and @{ML
+ NameSpace.explode}~@{text "names"} convert between the packed string
+ representation and the explicit list form of qualified names.
+
+ \item @{ML_type NameSpace.naming} represents the abstract concept of
+ a naming policy.
+
+ \item @{ML NameSpace.default_naming} is the default naming policy.
+ In a theory context, this is usually augmented by a path prefix
+ consisting of the theory name.
+
+ \item @{ML NameSpace.add_path}~@{text "path naming"} augments the
+ naming policy by extending its path component.
+
+ \item @{ML NameSpace.full_name}@{text "naming binding"} turns a name
+ binding (usually a basic name) into the fully qualified
+ internal name, according to the given naming policy.
+
+ \item @{ML_type NameSpace.T} represents name spaces.
+
+ \item @{ML NameSpace.empty} and @{ML NameSpace.merge}~@{text
+ "(space\<^isub>1, space\<^isub>2)"} are the canonical operations for
+ maintaining name spaces according to theory data management
+ (\secref{sec:context-data}).
+
+ \item @{ML NameSpace.declare}~@{text "naming bindings space"} enters a
+ name binding as fully qualified internal name into the name space,
+ with external accesses determined by the naming policy.
+
+ \item @{ML NameSpace.intern}~@{text "space name"} internalizes a
+ (partially qualified) external name.
+
+ This operation is mostly for parsing! Note that fully qualified
+ names stemming from declarations are produced via @{ML
+ "NameSpace.full_name"} and @{ML "NameSpace.declare"}
+ (or their derivatives for @{ML_type theory} and
+ @{ML_type Proof.context}).
+
+ \item @{ML NameSpace.extern}~@{text "space name"} externalizes a
+ (fully qualified) internal name.
+
+ This operation is mostly for printing! Note unqualified names are
+ produced via @{ML NameSpace.base}.
+
+ \end{description}
+*}
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Proof.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,330 @@
+theory Proof
+imports Base
+begin
+
+chapter {* Structured proofs *}
+
+section {* Variables \label{sec:variables} *}
+
+text {*
+ Any variable that is not explicitly bound by @{text "\<lambda>"}-abstraction
+ is considered as ``free''. Logically, free variables act like
+ outermost universal quantification at the sequent level: @{text
+ "A\<^isub>1(x), \<dots>, A\<^isub>n(x) \<turnstile> B(x)"} means that the result
+ holds \emph{for all} values of @{text "x"}. Free variables for
+ terms (not types) can be fully internalized into the logic: @{text
+ "\<turnstile> B(x)"} and @{text "\<turnstile> \<And>x. B(x)"} are interchangeable, provided
+ that @{text "x"} does not occur elsewhere in the context.
+ Inspecting @{text "\<turnstile> \<And>x. B(x)"} more closely, we see that inside the
+ quantifier, @{text "x"} is essentially ``arbitrary, but fixed'',
+ while from outside it appears as a place-holder for instantiation
+ (thanks to @{text "\<And>"} elimination).
+
+ The Pure logic represents the idea of variables being either inside
+ or outside the current scope by providing separate syntactic
+ categories for \emph{fixed variables} (e.g.\ @{text "x"}) vs.\
+ \emph{schematic variables} (e.g.\ @{text "?x"}). Incidently, a
+ universal result @{text "\<turnstile> \<And>x. B(x)"} has the HHF normal form @{text
+ "\<turnstile> B(?x)"}, which represents its generality nicely without requiring
+ an explicit quantifier. The same principle works for type
+ variables: @{text "\<turnstile> B(?\<alpha>)"} represents the idea of ``@{text "\<turnstile>
+ \<forall>\<alpha>. B(\<alpha>)"}'' without demanding a truly polymorphic framework.
+
+ \medskip Additional care is required to treat type variables in a
+ way that facilitates type-inference. In principle, term variables
+ depend on type variables, which means that type variables would have
+ to be declared first. For example, a raw type-theoretic framework
+ would demand the context to be constructed in stages as follows:
+ @{text "\<Gamma> = \<alpha>: type, x: \<alpha>, a: A(x\<^isub>\<alpha>)"}.
+
+ We allow a slightly less formalistic mode of operation: term
+ variables @{text "x"} are fixed without specifying a type yet
+ (essentially \emph{all} potential occurrences of some instance
+ @{text "x\<^isub>\<tau>"} are fixed); the first occurrence of @{text "x"}
+ within a specific term assigns its most general type, which is then
+ maintained consistently in the context. The above example becomes
+ @{text "\<Gamma> = x: term, \<alpha>: type, A(x\<^isub>\<alpha>)"}, where type @{text
+ "\<alpha>"} is fixed \emph{after} term @{text "x"}, and the constraint
+ @{text "x :: \<alpha>"} is an implicit consequence of the occurrence of
+ @{text "x\<^isub>\<alpha>"} in the subsequent proposition.
+
+ This twist of dependencies is also accommodated by the reverse
+ operation of exporting results from a context: a type variable
+ @{text "\<alpha>"} is considered fixed as long as it occurs in some fixed
+ term variable of the context. For example, exporting @{text "x:
+ term, \<alpha>: type \<turnstile> x\<^isub>\<alpha> = x\<^isub>\<alpha>"} produces in the first step
+ @{text "x: term \<turnstile> x\<^isub>\<alpha> = x\<^isub>\<alpha>"} for fixed @{text "\<alpha>"},
+ and only in the second step @{text "\<turnstile> ?x\<^isub>?\<^isub>\<alpha> =
+ ?x\<^isub>?\<^isub>\<alpha>"} for schematic @{text "?x"} and @{text "?\<alpha>"}.
+
+ \medskip The Isabelle/Isar proof context manages the gory details of
+ term vs.\ type variables, with high-level principles for moving the
+ frontier between fixed and schematic variables.
+
+ The @{text "add_fixes"} operation explictly declares fixed
+ variables; the @{text "declare_term"} operation absorbs a term into
+ a context by fixing new type variables and adding syntactic
+ constraints.
+
+ The @{text "export"} operation is able to perform the main work of
+ generalizing term and type variables as sketched above, assuming
+ that fixing variables and terms have been declared properly.
+
+ There @{text "import"} operation makes a generalized fact a genuine
+ part of the context, by inventing fixed variables for the schematic
+ ones. The effect can be reversed by using @{text "export"} later,
+ potentially with an extended context; the result is equivalent to
+ the original modulo renaming of schematic variables.
+
+ The @{text "focus"} operation provides a variant of @{text "import"}
+ for nested propositions (with explicit quantification): @{text
+ "\<And>x\<^isub>1 \<dots> x\<^isub>n. B(x\<^isub>1, \<dots>, x\<^isub>n)"} is
+ decomposed by inventing fixed variables @{text "x\<^isub>1, \<dots>,
+ x\<^isub>n"} for the body.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML Variable.add_fixes: "
+ string list -> Proof.context -> string list * Proof.context"} \\
+ @{index_ML Variable.variant_fixes: "
+ string list -> Proof.context -> string list * Proof.context"} \\
+ @{index_ML Variable.declare_term: "term -> Proof.context -> Proof.context"} \\
+ @{index_ML Variable.declare_constraints: "term -> Proof.context -> Proof.context"} \\
+ @{index_ML Variable.export: "Proof.context -> Proof.context -> thm list -> thm list"} \\
+ @{index_ML Variable.polymorphic: "Proof.context -> term list -> term list"} \\
+ @{index_ML Variable.import_thms: "bool -> thm list -> Proof.context ->
+ ((ctyp list * cterm list) * thm list) * Proof.context"} \\
+ @{index_ML Variable.focus: "cterm -> Proof.context -> (cterm list * cterm) * Proof.context"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
+ variables @{text "xs"}, returning the resulting internal names. By
+ default, the internal representation coincides with the external
+ one, which also means that the given variables must not be fixed
+ already. There is a different policy within a local proof body: the
+ given names are just hints for newly invented Skolem variables.
+
+ \item @{ML Variable.variant_fixes} is similar to @{ML
+ Variable.add_fixes}, but always produces fresh variants of the given
+ names.
+
+ \item @{ML Variable.declare_term}~@{text "t ctxt"} declares term
+ @{text "t"} to belong to the context. This automatically fixes new
+ type variables, but not term variables. Syntactic constraints for
+ type and term variables are declared uniformly, though.
+
+ \item @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
+ syntactic constraints from term @{text "t"}, without making it part
+ of the context yet.
+
+ \item @{ML Variable.export}~@{text "inner outer thms"} generalizes
+ fixed type and term variables in @{text "thms"} according to the
+ difference of the @{text "inner"} and @{text "outer"} context,
+ following the principles sketched above.
+
+ \item @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
+ variables in @{text "ts"} as far as possible, even those occurring
+ in fixed term variables. The default policy of type-inference is to
+ fix newly introduced type variables, which is essentially reversed
+ with @{ML Variable.polymorphic}: here the given terms are detached
+ from the context as far as possible.
+
+ \item @{ML Variable.import_thms}~@{text "open thms ctxt"} invents fixed
+ type and term variables for the schematic ones occurring in @{text
+ "thms"}. The @{text "open"} flag indicates whether the fixed names
+ should be accessible to the user, otherwise newly introduced names
+ are marked as ``internal'' (\secref{sec:names}).
+
+ \item @{ML Variable.focus}~@{text B} decomposes the outermost @{text
+ "\<And>"} prefix of proposition @{text "B"}.
+
+ \end{description}
+*}
+
+
+section {* Assumptions \label{sec:assumptions} *}
+
+text {*
+ An \emph{assumption} is a proposition that it is postulated in the
+ current context. Local conclusions may use assumptions as
+ additional facts, but this imposes implicit hypotheses that weaken
+ the overall statement.
+
+ Assumptions are restricted to fixed non-schematic statements, i.e.\
+ all generality needs to be expressed by explicit quantifiers.
+ Nevertheless, the result will be in HHF normal form with outermost
+ quantifiers stripped. For example, by assuming @{text "\<And>x :: \<alpha>. P
+ x"} we get @{text "\<And>x :: \<alpha>. P x \<turnstile> P ?x"} for schematic @{text "?x"}
+ of fixed type @{text "\<alpha>"}. Local derivations accumulate more and
+ more explicit references to hypotheses: @{text "A\<^isub>1, \<dots>,
+ A\<^isub>n \<turnstile> B"} where @{text "A\<^isub>1, \<dots>, A\<^isub>n"} needs to
+ be covered by the assumptions of the current context.
+
+ \medskip The @{text "add_assms"} operation augments the context by
+ local assumptions, which are parameterized by an arbitrary @{text
+ "export"} rule (see below).
+
+ The @{text "export"} operation moves facts from a (larger) inner
+ context into a (smaller) outer context, by discharging the
+ difference of the assumptions as specified by the associated export
+ rules. Note that the discharged portion is determined by the
+ difference contexts, not the facts being exported! There is a
+ separate flag to indicate a goal context, where the result is meant
+ to refine an enclosing sub-goal of a structured proof state.
+
+ \medskip The most basic export rule discharges assumptions directly
+ by means of the @{text "\<Longrightarrow>"} introduction rule:
+ \[
+ \infer[(@{text "\<Longrightarrow>_intro"})]{@{text "\<Gamma> \\ A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \]
+
+ The variant for goal refinements marks the newly introduced
+ premises, which causes the canonical Isar goal refinement scheme to
+ enforce unification with local premises within the goal:
+ \[
+ \infer[(@{text "#\<Longrightarrow>_intro"})]{@{text "\<Gamma> \\ A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \]
+
+ \medskip Alternative versions of assumptions may perform arbitrary
+ transformations on export, as long as the corresponding portion of
+ hypotheses is removed from the given facts. For example, a local
+ definition works by fixing @{text "x"} and assuming @{text "x \<equiv> t"},
+ with the following export rule to reverse the effect:
+ \[
+ \infer[(@{text "\<equiv>-expand"})]{@{text "\<Gamma> \\ x \<equiv> t \<turnstile> B t"}}{@{text "\<Gamma> \<turnstile> B x"}}
+ \]
+ This works, because the assumption @{text "x \<equiv> t"} was introduced in
+ a context with @{text "x"} being fresh, so @{text "x"} does not
+ occur in @{text "\<Gamma>"} here.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type Assumption.export} \\
+ @{index_ML Assumption.assume: "cterm -> thm"} \\
+ @{index_ML Assumption.add_assms:
+ "Assumption.export ->
+ cterm list -> Proof.context -> thm list * Proof.context"} \\
+ @{index_ML Assumption.add_assumes: "
+ cterm list -> Proof.context -> thm list * Proof.context"} \\
+ @{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type Assumption.export} represents arbitrary export
+ rules, which is any function of type @{ML_type "bool -> cterm list -> thm -> thm"},
+ where the @{ML_type "bool"} indicates goal mode, and the @{ML_type
+ "cterm list"} the collection of assumptions to be discharged
+ simultaneously.
+
+ \item @{ML Assumption.assume}~@{text "A"} turns proposition @{text
+ "A"} into a raw assumption @{text "A \<turnstile> A'"}, where the conclusion
+ @{text "A'"} is in HHF normal form.
+
+ \item @{ML Assumption.add_assms}~@{text "r As"} augments the context
+ by assumptions @{text "As"} with export rule @{text "r"}. The
+ resulting facts are hypothetical theorems as produced by the raw
+ @{ML Assumption.assume}.
+
+ \item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
+ @{ML Assumption.add_assms} where the export rule performs @{text
+ "\<Longrightarrow>_intro"} or @{text "#\<Longrightarrow>_intro"}, depending on goal mode.
+
+ \item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
+ exports result @{text "thm"} from the the @{text "inner"} context
+ back into the @{text "outer"} one; @{text "is_goal = true"} means
+ this is a goal context. The result is in HHF normal form. Note
+ that @{ML "ProofContext.export"} combines @{ML "Variable.export"}
+ and @{ML "Assumption.export"} in the canonical way.
+
+ \end{description}
+*}
+
+
+section {* Results \label{sec:results} *}
+
+text {*
+ Local results are established by monotonic reasoning from facts
+ within a context. This allows common combinations of theorems,
+ e.g.\ via @{text "\<And>/\<Longrightarrow>"} elimination, resolution rules, or equational
+ reasoning, see \secref{sec:thms}. Unaccounted context manipulations
+ should be avoided, notably raw @{text "\<And>/\<Longrightarrow>"} introduction or ad-hoc
+ references to free variables or assumptions not present in the proof
+ context.
+
+ \medskip The @{text "SUBPROOF"} combinator allows to structure a
+ tactical proof recursively by decomposing a selected sub-goal:
+ @{text "(\<And>x. A(x) \<Longrightarrow> B(x)) \<Longrightarrow> \<dots>"} is turned into @{text "B(x) \<Longrightarrow> \<dots>"}
+ after fixing @{text "x"} and assuming @{text "A(x)"}. This means
+ the tactic needs to solve the conclusion, but may use the premise as
+ a local fact, for locally fixed variables.
+
+ The @{text "prove"} operation provides an interface for structured
+ backwards reasoning under program control, with some explicit sanity
+ checks of the result. The goal context can be augmented by
+ additional fixed variables (cf.\ \secref{sec:variables}) and
+ assumptions (cf.\ \secref{sec:assumptions}), which will be available
+ as local facts during the proof and discharged into implications in
+ the result. Type and term variables are generalized as usual,
+ according to the context.
+
+ The @{text "obtain"} operation produces results by eliminating
+ existing facts by means of a given tactic. This acts like a dual
+ conclusion: the proof demonstrates that the context may be augmented
+ by certain fixed variables and assumptions. See also
+ \cite{isabelle-isar-ref} for the user-level @{text "\<OBTAIN>"} and
+ @{text "\<GUESS>"} elements. Final results, which may not refer to
+ the parameters in the conclusion, need to exported explicitly into
+ the original context.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML SUBPROOF:
+ "({context: Proof.context, schematics: ctyp list * cterm list,
+ params: cterm list, asms: cterm list, concl: cterm,
+ prems: thm list} -> tactic) -> Proof.context -> int -> tactic"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML Goal.prove: "Proof.context -> string list -> term list -> term ->
+ ({prems: thm list, context: Proof.context} -> tactic) -> thm"} \\
+ @{index_ML Goal.prove_multi: "Proof.context -> string list -> term list -> term list ->
+ ({prems: thm list, context: Proof.context} -> tactic) -> thm list"} \\
+ \end{mldecls}
+ \begin{mldecls}
+ @{index_ML Obtain.result: "(Proof.context -> tactic) ->
+ thm list -> Proof.context -> (cterm list * thm list) * Proof.context"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML SUBPROOF}~@{text "tac ctxt i"} decomposes the structure
+ of the specified sub-goal, producing an extended context and a
+ reduced goal, which needs to be solved by the given tactic. All
+ schematic parameters of the goal are imported into the context as
+ fixed ones, which may not be instantiated in the sub-proof.
+
+ \item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
+ "C"} in the context augmented by fixed variables @{text "xs"} and
+ assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
+ it. The latter may depend on the local assumptions being presented
+ as facts. The result is in HHF normal form.
+
+ \item @{ML Goal.prove_multi} is simular to @{ML Goal.prove}, but
+ states several conclusions simultaneously. The goal is encoded by
+ means of Pure conjunction; @{ML Goal.conjunction_tac} will turn this
+ into a collection of individual subgoals.
+
+ \item @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
+ given facts using a tactic, which results in additional fixed
+ variables and assumptions in the context. Final results need to be
+ exported explicitly.
+
+ \end{description}
+*}
+
+end
--- a/doc-src/IsarImplementation/Thy/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/ROOT.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,11 +1,11 @@
-
-(* $Id$ *)
-
-use_thy "prelim";
-use_thy "logic";
-use_thy "tactic";
-use_thy "proof";
-use_thy "isar";
-use_thy "locale";
-use_thy "integration";
-use_thy "ML";
+use_thys [
+ "Integration",
+ "Isar",
+ "Local_Theory",
+ "Logic",
+ "ML",
+ "Prelim",
+ "Proof",
+ "Syntax",
+ "Tactic"
+];
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Syntax.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,9 @@
+theory Syntax
+imports Base
+begin
+
+chapter {* Syntax and type-checking *}
+
+text FIXME
+
+end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/Tactic.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,405 @@
+theory Tactic
+imports Base
+begin
+
+chapter {* Tactical reasoning *}
+
+text {*
+ Tactical reasoning works by refining the initial claim in a
+ backwards fashion, until a solved form is reached. A @{text "goal"}
+ consists of several subgoals that need to be solved in order to
+ achieve the main statement; zero subgoals means that the proof may
+ be finished. A @{text "tactic"} is a refinement operation that maps
+ a goal to a lazy sequence of potential successors. A @{text
+ "tactical"} is a combinator for composing tactics.
+*}
+
+
+section {* Goals \label{sec:tactical-goals} *}
+
+text {*
+ Isabelle/Pure represents a goal as a theorem stating that the
+ subgoals imply the main goal: @{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow>
+ C"}. The outermost goal structure is that of a Horn Clause: i.e.\
+ an iterated implication without any quantifiers\footnote{Recall that
+ outermost @{text "\<And>x. \<phi>[x]"} is always represented via schematic
+ variables in the body: @{text "\<phi>[?x]"}. These variables may get
+ instantiated during the course of reasoning.}. For @{text "n = 0"}
+ a goal is called ``solved''.
+
+ The structure of each subgoal @{text "A\<^sub>i"} is that of a
+ general Hereditary Harrop Formula @{text "\<And>x\<^sub>1 \<dots>
+ \<And>x\<^sub>k. H\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> H\<^sub>m \<Longrightarrow> B"}. Here @{text
+ "x\<^sub>1, \<dots>, x\<^sub>k"} are goal parameters, i.e.\
+ arbitrary-but-fixed entities of certain types, and @{text
+ "H\<^sub>1, \<dots>, H\<^sub>m"} are goal hypotheses, i.e.\ facts that may
+ be assumed locally. Together, this forms the goal context of the
+ conclusion @{text B} to be established. The goal hypotheses may be
+ again arbitrary Hereditary Harrop Formulas, although the level of
+ nesting rarely exceeds 1--2 in practice.
+
+ The main conclusion @{text C} is internally marked as a protected
+ proposition, which is represented explicitly by the notation @{text
+ "#C"}. This ensures that the decomposition into subgoals and main
+ conclusion is well-defined for arbitrarily structured claims.
+
+ \medskip Basic goal management is performed via the following
+ Isabelle/Pure rules:
+
+ \[
+ \infer[@{text "(init)"}]{@{text "C \<Longrightarrow> #C"}}{} \qquad
+ \infer[@{text "(finish)"}]{@{text "C"}}{@{text "#C"}}
+ \]
+
+ \medskip The following low-level variants admit general reasoning
+ with protected propositions:
+
+ \[
+ \infer[@{text "(protect)"}]{@{text "#C"}}{@{text "C"}} \qquad
+ \infer[@{text "(conclude)"}]{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> C"}}{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> #C"}}
+ \]
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML Goal.init: "cterm -> thm"} \\
+ @{index_ML Goal.finish: "thm -> thm"} \\
+ @{index_ML Goal.protect: "thm -> thm"} \\
+ @{index_ML Goal.conclude: "thm -> thm"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML "Goal.init"}~@{text C} initializes a tactical goal from
+ the well-formed proposition @{text C}.
+
+ \item @{ML "Goal.finish"}~@{text "thm"} checks whether theorem
+ @{text "thm"} is a solved goal (no subgoals), and concludes the
+ result by removing the goal protection.
+
+ \item @{ML "Goal.protect"}~@{text "thm"} protects the full statement
+ of theorem @{text "thm"}.
+
+ \item @{ML "Goal.conclude"}~@{text "thm"} removes the goal
+ protection, even if there are pending subgoals.
+
+ \end{description}
+*}
+
+
+section {* Tactics *}
+
+text {* A @{text "tactic"} is a function @{text "goal \<rightarrow> goal\<^sup>*\<^sup>*"} that
+ maps a given goal state (represented as a theorem, cf.\
+ \secref{sec:tactical-goals}) to a lazy sequence of potential
+ successor states. The underlying sequence implementation is lazy
+ both in head and tail, and is purely functional in \emph{not}
+ supporting memoing.\footnote{The lack of memoing and the strict
+ nature of SML requires some care when working with low-level
+ sequence operations, to avoid duplicate or premature evaluation of
+ results.}
+
+ An \emph{empty result sequence} means that the tactic has failed: in
+ a compound tactic expressions other tactics might be tried instead,
+ or the whole refinement step might fail outright, producing a
+ toplevel error message. When implementing tactics from scratch, one
+ should take care to observe the basic protocol of mapping regular
+ error conditions to an empty result; only serious faults should
+ emerge as exceptions.
+
+ By enumerating \emph{multiple results}, a tactic can easily express
+ the potential outcome of an internal search process. There are also
+ combinators for building proof tools that involve search
+ systematically, see also \secref{sec:tacticals}.
+
+ \medskip As explained in \secref{sec:tactical-goals}, a goal state
+ essentially consists of a list of subgoals that imply the main goal
+ (conclusion). Tactics may operate on all subgoals or on a
+ particularly specified subgoal, but must not change the main
+ conclusion (apart from instantiating schematic goal variables).
+
+ Tactics with explicit \emph{subgoal addressing} are of the form
+ @{text "int \<rightarrow> tactic"} and may be applied to a particular subgoal
+ (counting from 1). If the subgoal number is out of range, the
+ tactic should fail with an empty result sequence, but must not raise
+ an exception!
+
+ Operating on a particular subgoal means to replace it by an interval
+ of zero or more subgoals in the same place; other subgoals must not
+ be affected, apart from instantiating schematic variables ranging
+ over the whole goal state.
+
+ A common pattern of composing tactics with subgoal addressing is to
+ try the first one, and then the second one only if the subgoal has
+ not been solved yet. Special care is required here to avoid bumping
+ into unrelated subgoals that happen to come after the original
+ subgoal. Assuming that there is only a single initial subgoal is a
+ very common error when implementing tactics!
+
+ Tactics with internal subgoal addressing should expose the subgoal
+ index as @{text "int"} argument in full generality; a hardwired
+ subgoal 1 inappropriate.
+
+ \medskip The main well-formedness conditions for proper tactics are
+ summarized as follows.
+
+ \begin{itemize}
+
+ \item General tactic failure is indicated by an empty result, only
+ serious faults may produce an exception.
+
+ \item The main conclusion must not be changed, apart from
+ instantiating schematic variables.
+
+ \item A tactic operates either uniformly on all subgoals, or
+ specifically on a selected subgoal (without bumping into unrelated
+ subgoals).
+
+ \item Range errors in subgoal addressing produce an empty result.
+
+ \end{itemize}
+
+ Some of these conditions are checked by higher-level goal
+ infrastructure (\secref{sec:results}); others are not checked
+ explicitly, and violating them merely results in ill-behaved tactics
+ experienced by the user (e.g.\ tactics that insist in being
+ applicable only to singleton goals, or disallow composition with
+ basic tacticals).
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML_type tactic: "thm -> thm Seq.seq"} \\
+ @{index_ML no_tac: tactic} \\
+ @{index_ML all_tac: tactic} \\
+ @{index_ML print_tac: "string -> tactic"} \\[1ex]
+ @{index_ML PRIMITIVE: "(thm -> thm) -> tactic"} \\[1ex]
+ @{index_ML SUBGOAL: "(term * int -> tactic) -> int -> tactic"} \\
+ @{index_ML CSUBGOAL: "(cterm * int -> tactic) -> int -> tactic"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML_type tactic} represents tactics. The well-formedness
+ conditions described above need to be observed. See also @{"file"
+ "~~/src/Pure/General/seq.ML"} for the underlying implementation of
+ lazy sequences.
+
+ \item @{ML_type "int -> tactic"} represents tactics with explicit
+ subgoal addressing, with well-formedness conditions as described
+ above.
+
+ \item @{ML no_tac} is a tactic that always fails, returning the
+ empty sequence.
+
+ \item @{ML all_tac} is a tactic that always succeeds, returning a
+ singleton sequence with unchanged goal state.
+
+ \item @{ML print_tac}~@{text "message"} is like @{ML all_tac}, but
+ prints a message together with the goal state on the tracing
+ channel.
+
+ \item @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
+ into a tactic with unique result. Exception @{ML THM} is considered
+ a regular tactic failure and produces an empty result; other
+ exceptions are passed through.
+
+ \item @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
+ most basic form to produce a tactic with subgoal addressing. The
+ given abstraction over the subgoal term and subgoal number allows to
+ peek at the relevant information of the full goal state. The
+ subgoal range is checked as required above.
+
+ \item @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
+ subgoal as @{ML_type cterm} instead of raw @{ML_type term}. This
+ avoids expensive re-certification in situations where the subgoal is
+ used directly for primitive inferences.
+
+ \end{description}
+*}
+
+
+subsection {* Resolution and assumption tactics \label{sec:resolve-assume-tac} *}
+
+text {* \emph{Resolution} is the most basic mechanism for refining a
+ subgoal using a theorem as object-level rule.
+ \emph{Elim-resolution} is particularly suited for elimination rules:
+ it resolves with a rule, proves its first premise by assumption, and
+ finally deletes that assumption from any new subgoals.
+ \emph{Destruct-resolution} is like elim-resolution, but the given
+ destruction rules are first turned into canonical elimination
+ format. \emph{Forward-resolution} is like destruct-resolution, but
+ without deleting the selected assumption. The @{text "r/e/d/f"}
+ naming convention is maintained for several different kinds of
+ resolution rules and tactics.
+
+ Assumption tactics close a subgoal by unifying some of its premises
+ against its conclusion.
+
+ \medskip All the tactics in this section operate on a subgoal
+ designated by a positive integer. Other subgoals might be affected
+ indirectly, due to instantiation of schematic variables.
+
+ There are various sources of non-determinism, the tactic result
+ sequence enumerates all possibilities of the following choices (if
+ applicable):
+
+ \begin{enumerate}
+
+ \item selecting one of the rules given as argument to the tactic;
+
+ \item selecting a subgoal premise to eliminate, unifying it against
+ the first premise of the rule;
+
+ \item unifying the conclusion of the subgoal to the conclusion of
+ the rule.
+
+ \end{enumerate}
+
+ Recall that higher-order unification may produce multiple results
+ that are enumerated here.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML resolve_tac: "thm list -> int -> tactic"} \\
+ @{index_ML eresolve_tac: "thm list -> int -> tactic"} \\
+ @{index_ML dresolve_tac: "thm list -> int -> tactic"} \\
+ @{index_ML forward_tac: "thm list -> int -> tactic"} \\[1ex]
+ @{index_ML assume_tac: "int -> tactic"} \\
+ @{index_ML eq_assume_tac: "int -> tactic"} \\[1ex]
+ @{index_ML match_tac: "thm list -> int -> tactic"} \\
+ @{index_ML ematch_tac: "thm list -> int -> tactic"} \\
+ @{index_ML dmatch_tac: "thm list -> int -> tactic"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML resolve_tac}~@{text "thms i"} refines the goal state
+ using the given theorems, which should normally be introduction
+ rules. The tactic resolves a rule's conclusion with subgoal @{text
+ i}, replacing it by the corresponding versions of the rule's
+ premises.
+
+ \item @{ML eresolve_tac}~@{text "thms i"} performs elim-resolution
+ with the given theorems, which should normally be elimination rules.
+
+ \item @{ML dresolve_tac}~@{text "thms i"} performs
+ destruct-resolution with the given theorems, which should normally
+ be destruction rules. This replaces an assumption by the result of
+ applying one of the rules.
+
+ \item @{ML forward_tac} is like @{ML dresolve_tac} except that the
+ selected assumption is not deleted. It applies a rule to an
+ assumption, adding the result as a new assumption.
+
+ \item @{ML assume_tac}~@{text i} attempts to solve subgoal @{text i}
+ by assumption (modulo higher-order unification).
+
+ \item @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
+ only for immediate @{text "\<alpha>"}-convertibility instead of using
+ unification. It succeeds (with a unique next state) if one of the
+ assumptions is equal to the subgoal's conclusion. Since it does not
+ instantiate variables, it cannot make other subgoals unprovable.
+
+ \item @{ML match_tac}, @{ML ematch_tac}, and @{ML dmatch_tac} are
+ similar to @{ML resolve_tac}, @{ML eresolve_tac}, and @{ML
+ dresolve_tac}, respectively, but do not instantiate schematic
+ variables in the goal state.
+
+ Flexible subgoals are not updated at will, but are left alone.
+ Strictly speaking, matching means to treat the unknowns in the goal
+ state as constants; these tactics merely discard unifiers that would
+ update the goal state.
+
+ \end{description}
+*}
+
+
+subsection {* Explicit instantiation within a subgoal context *}
+
+text {* The main resolution tactics (\secref{sec:resolve-assume-tac})
+ use higher-order unification, which works well in many practical
+ situations despite its daunting theoretical properties.
+ Nonetheless, there are important problem classes where unguided
+ higher-order unification is not so useful. This typically involves
+ rules like universal elimination, existential introduction, or
+ equational substitution. Here the unification problem involves
+ fully flexible @{text "?P ?x"} schemes, which are hard to manage
+ without further hints.
+
+ By providing a (small) rigid term for @{text "?x"} explicitly, the
+ remaining unification problem is to assign a (large) term to @{text
+ "?P"}, according to the shape of the given subgoal. This is
+ sufficiently well-behaved in most practical situations.
+
+ \medskip Isabelle provides separate versions of the standard @{text
+ "r/e/d/f"} resolution tactics that allow to provide explicit
+ instantiations of unknowns of the given rule, wrt.\ terms that refer
+ to the implicit context of the selected subgoal.
+
+ An instantiation consists of a list of pairs of the form @{text
+ "(?x, t)"}, where @{text ?x} is a schematic variable occurring in
+ the given rule, and @{text t} is a term from the current proof
+ context, augmented by the local goal parameters of the selected
+ subgoal; cf.\ the @{text "focus"} operation described in
+ \secref{sec:variables}.
+
+ Entering the syntactic context of a subgoal is a brittle operation,
+ because its exact form is somewhat accidental, and the choice of
+ bound variable names depends on the presence of other local and
+ global names. Explicit renaming of subgoal parameters prior to
+ explicit instantiation might help to achieve a bit more robustness.
+
+ Type instantiations may be given as well, via pairs like @{text
+ "(?'a, \<tau>)"}. Type instantiations are distinguished from term
+ instantiations by the syntactic form of the schematic variable.
+ Types are instantiated before terms are. Since term instantiation
+ already performs type-inference as expected, explicit type
+ instantiations are seldom necessary.
+*}
+
+text %mlref {*
+ \begin{mldecls}
+ @{index_ML res_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+ @{index_ML eres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+ @{index_ML dres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
+ @{index_ML forw_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\[1ex]
+ @{index_ML rename_tac: "string list -> int -> tactic"} \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item @{ML res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
+ rule @{text thm} with the instantiations @{text insts}, as described
+ above, and then performs resolution on subgoal @{text i}.
+
+ \item @{ML eres_inst_tac} is like @{ML res_inst_tac}, but performs
+ elim-resolution.
+
+ \item @{ML dres_inst_tac} is like @{ML res_inst_tac}, but performs
+ destruct-resolution.
+
+ \item @{ML forw_inst_tac} is like @{ML dres_inst_tac} except that
+ the selected assumption is not deleted.
+
+ \item @{ML rename_tac}~@{text "names i"} renames the innermost
+ parameters of subgoal @{text i} according to the provided @{text
+ names} (which need to be distinct indentifiers).
+
+ \end{description}
+*}
+
+
+section {* Tacticals \label{sec:tacticals} *}
+
+text {*
+ A \emph{tactical} is a functional combinator for building up complex
+ tactics from simpler ones. Typical tactical perform sequential
+ composition, disjunction (choice), iteration, or goal addressing.
+ Various search strategies may be expressed via tacticals.
+
+ \medskip FIXME
+*}
+
+end
--- a/doc-src/IsarImplementation/Thy/base.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-
-(* $Id$ *)
-
-theory base
-imports Pure
-uses "../../antiquote_setup.ML"
-begin
-
-end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Base.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,29 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Base}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Base\isanewline
+\isakeyword{imports}\ Pure\isanewline
+\isakeyword{uses}\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isacharslash}{\isachardot}{\isachardot}{\isacharslash}antiquote{\isacharunderscore}setup{\isachardot}ML{\isachardoublequoteclose}\isanewline
+\isakeyword{begin}\isanewline
+\isanewline
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+\isanewline
+%
+\endisadelimtheory
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Integration.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,520 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Integration}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Integration\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{System integration%
+}
+\isamarkuptrue%
+%
+\isamarkupsection{Isar toplevel \label{sec:isar-toplevel}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The Isar toplevel may be considered the centeral hub of the
+ Isabelle/Isar system, where all key components and sub-systems are
+ integrated into a single read-eval-print loop of Isar commands. We
+ shall even incorporate the existing {\ML} toplevel of the compiler
+ and run-time system (cf.\ \secref{sec:ML-toplevel}).
+
+ Isabelle/Isar departs from the original ``LCF system architecture''
+ where {\ML} was really The Meta Language for defining theories and
+ conducting proofs. Instead, {\ML} now only serves as the
+ implementation language for the system (and user extensions), while
+ the specific Isar toplevel supports the concepts of theory and proof
+ development natively. This includes the graph structure of theories
+ and the block structure of proofs, support for unlimited undo,
+ facilities for tracing, debugging, timing, profiling etc.
+
+ \medskip The toplevel maintains an implicit state, which is
+ transformed by a sequence of transitions -- either interactively or
+ in batch-mode. In interactive mode, Isar state transitions are
+ encapsulated as safe transactions, such that both failure and undo
+ are handled conveniently without destroying the underlying draft
+ theory (cf.~\secref{sec:context-theory}). In batch mode,
+ transitions operate in a linear (destructive) fashion, such that
+ error conditions abort the present attempt to construct a theory or
+ proof altogether.
+
+ The toplevel state is a disjoint sum of empty \isa{toplevel}, or
+ \isa{theory}, or \isa{proof}. On entering the main Isar loop we
+ start with an empty toplevel. A theory is commenced by giving a
+ \isa{{\isasymTHEORY}} header; within a theory we may issue theory
+ commands such as \isa{{\isasymDEFINITION}}, or state a \isa{{\isasymTHEOREM}} to be proven. Now we are within a proof state, with a
+ rich collection of Isar proof commands for structured proof
+ composition, or unstructured proof scripts. When the proof is
+ concluded we get back to the theory, which is then updated by
+ storing the resulting fact. Further theory declarations or theorem
+ statements with proofs may follow, until we eventually conclude the
+ theory development by issuing \isa{{\isasymEND}}. The resulting theory
+ is then stored within the theory database and we are back to the
+ empty toplevel.
+
+ In addition to these proper state transformations, there are also
+ some diagnostic commands for peeking at the toplevel state without
+ modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term},
+ \isakeyword{print-cases}).%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{Toplevel.state}\verb|type Toplevel.state| \\
+ \indexdef{}{ML}{Toplevel.UNDEF}\verb|Toplevel.UNDEF: exn| \\
+ \indexdef{}{ML}{Toplevel.is\_toplevel}\verb|Toplevel.is_toplevel: Toplevel.state -> bool| \\
+ \indexdef{}{ML}{Toplevel.theory\_of}\verb|Toplevel.theory_of: Toplevel.state -> theory| \\
+ \indexdef{}{ML}{Toplevel.proof\_of}\verb|Toplevel.proof_of: Toplevel.state -> Proof.state| \\
+ \indexdef{}{ML}{Toplevel.debug}\verb|Toplevel.debug: bool ref| \\
+ \indexdef{}{ML}{Toplevel.timing}\verb|Toplevel.timing: bool ref| \\
+ \indexdef{}{ML}{Toplevel.profiling}\verb|Toplevel.profiling: int ref| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Toplevel.state| represents Isar toplevel states,
+ which are normally manipulated through the concept of toplevel
+ transitions only (\secref{sec:toplevel-transition}). Also note that
+ a raw toplevel state is subject to the same linearity restrictions
+ as a theory context (cf.~\secref{sec:context-theory}).
+
+ \item \verb|Toplevel.UNDEF| is raised for undefined toplevel
+ operations. Many operations work only partially for certain cases,
+ since \verb|Toplevel.state| is a sum type.
+
+ \item \verb|Toplevel.is_toplevel|~\isa{state} checks for an empty
+ toplevel state.
+
+ \item \verb|Toplevel.theory_of|~\isa{state} selects the theory of
+ a theory or proof (!), otherwise raises \verb|Toplevel.UNDEF|.
+
+ \item \verb|Toplevel.proof_of|~\isa{state} selects the Isar proof
+ state if available, otherwise raises \verb|Toplevel.UNDEF|.
+
+ \item \verb|set Toplevel.debug| makes the toplevel print further
+ details about internal error conditions, exceptions being raised
+ etc.
+
+ \item \verb|set Toplevel.timing| makes the toplevel print timing
+ information for each Isar command being executed.
+
+ \item \verb|Toplevel.profiling|~\verb|:=|~\isa{n} controls
+ low-level profiling of the underlying {\ML} runtime system. For
+ Poly/ML, \isa{n\ {\isacharequal}\ {\isadigit{1}}} means time and \isa{n\ {\isacharequal}\ {\isadigit{2}}} space
+ profiling.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Toplevel transitions \label{sec:toplevel-transition}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+An Isar toplevel transition consists of a partial function on the
+ toplevel state, with additional information for diagnostics and
+ error reporting: there are fields for command name, source position,
+ optional source text, as well as flags for interactive-only commands
+ (which issue a warning in batch-mode), printing of result state,
+ etc.
+
+ The operational part is represented as the sequential union of a
+ list of partial functions, which are tried in turn until the first
+ one succeeds. This acts like an outer case-expression for various
+ alternative state transitions. For example, \isakeyword{qed} acts
+ differently for a local proofs vs.\ the global ending of the main
+ proof.
+
+ Toplevel transitions are composed via transition transformers.
+ Internally, Isar commands are put together from an empty transition
+ extended by name and source position (and optional source text). It
+ is then left to the individual command parser to turn the given
+ concrete syntax into a suitable transition transformer that adjoins
+ actual operations on a theory or proof state etc.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{Toplevel.print}\verb|Toplevel.print: Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.no\_timing}\verb|Toplevel.no_timing: Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.keep}\verb|Toplevel.keep: (Toplevel.state -> unit) ->|\isasep\isanewline%
+\verb| Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.theory}\verb|Toplevel.theory: (theory -> theory) ->|\isasep\isanewline%
+\verb| Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.theory\_to\_proof}\verb|Toplevel.theory_to_proof: (theory -> Proof.state) ->|\isasep\isanewline%
+\verb| Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.proof}\verb|Toplevel.proof: (Proof.state -> Proof.state) ->|\isasep\isanewline%
+\verb| Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.proofs}\verb|Toplevel.proofs: (Proof.state -> Proof.state Seq.seq) ->|\isasep\isanewline%
+\verb| Toplevel.transition -> Toplevel.transition| \\
+ \indexdef{}{ML}{Toplevel.end\_proof}\verb|Toplevel.end_proof: (bool -> Proof.state -> Proof.context) ->|\isasep\isanewline%
+\verb| Toplevel.transition -> Toplevel.transition| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Toplevel.print|~\isa{tr} sets the print flag, which
+ causes the toplevel loop to echo the result state (in interactive
+ mode).
+
+ \item \verb|Toplevel.no_timing|~\isa{tr} indicates that the
+ transition should never show timing information, e.g.\ because it is
+ a diagnostic command.
+
+ \item \verb|Toplevel.keep|~\isa{tr} adjoins a diagnostic
+ function.
+
+ \item \verb|Toplevel.theory|~\isa{tr} adjoins a theory
+ transformer.
+
+ \item \verb|Toplevel.theory_to_proof|~\isa{tr} adjoins a global
+ goal function, which turns a theory into a proof state. The theory
+ may be changed before entering the proof; the generic Isar goal
+ setup includes an argument that specifies how to apply the proven
+ result to the theory, when the proof is finished.
+
+ \item \verb|Toplevel.proof|~\isa{tr} adjoins a deterministic
+ proof command, with a singleton result.
+
+ \item \verb|Toplevel.proofs|~\isa{tr} adjoins a general proof
+ command, with zero or more result states (represented as a lazy
+ list).
+
+ \item \verb|Toplevel.end_proof|~\isa{tr} adjoins a concluding
+ proof command, that returns the resulting theory, after storing the
+ resulting facts in the context etc.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Toplevel control%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+There are a few special control commands that modify the behavior
+ the toplevel itself, and only make sense in interactive mode. Under
+ normal circumstances, the user encounters these only implicitly as
+ part of the protocol between the Isabelle/Isar system and a
+ user-interface such as ProofGeneral.
+
+ \begin{description}
+
+ \item \isacommand{undo} follows the three-level hierarchy of empty
+ toplevel vs.\ theory vs.\ proof: undo within a proof reverts to the
+ previous proof context, undo after a proof reverts to the theory
+ before the initial goal statement, undo of a theory command reverts
+ to the previous theory value, undo of a theory header discontinues
+ the current theory development and removes it from the theory
+ database (\secref{sec:theory-database}).
+
+ \item \isacommand{kill} aborts the current level of development:
+ kill in a proof context reverts to the theory before the initial
+ goal statement, kill in a theory context aborts the current theory
+ development, removing it from the database.
+
+ \item \isacommand{exit} drops out of the Isar toplevel into the
+ underlying {\ML} toplevel (\secref{sec:ML-toplevel}). The Isar
+ toplevel state is preserved and may be continued later.
+
+ \item \isacommand{quit} terminates the Isabelle/Isar process without
+ saving.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{ML toplevel \label{sec:ML-toplevel}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The {\ML} toplevel provides a read-compile-eval-print loop for {\ML}
+ values, types, structures, and functors. {\ML} declarations operate
+ on the global system state, which consists of the compiler
+ environment plus the values of {\ML} reference variables. There is
+ no clean way to undo {\ML} declarations, except for reverting to a
+ previously saved state of the whole Isabelle process. {\ML} input
+ is either read interactively from a TTY, or from a string (usually
+ within a theory text), or from a source file (usually loaded from a
+ theory).
+
+ Whenever the {\ML} toplevel is active, the current Isabelle theory
+ context is passed as an internal reference variable. Thus {\ML}
+ code may access the theory context during compilation, it may even
+ change the value of a theory being under construction --- while
+ observing the usual linearity restrictions
+ (cf.~\secref{sec:context-theory}).%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{the\_context}\verb|the_context: unit -> theory| \\
+ \indexdef{}{ML}{Context.$>$$>$ }\verb|Context.>> : (Context.generic -> Context.generic) -> unit| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|the_context ()| refers to the theory context of the
+ {\ML} toplevel --- at compile time! {\ML} code needs to take care
+ to refer to \verb|the_context ()| correctly. Recall that
+ evaluation of a function body is delayed until actual runtime.
+ Moreover, persistent {\ML} toplevel bindings to an unfinished theory
+ should be avoided: code should either project out the desired
+ information immediately, or produce an explicit \verb|theory_ref| (cf.\ \secref{sec:context-theory}).
+
+ \item \verb|Context.>>|~\isa{f} applies context transformation
+ \isa{f} to the implicit context of the {\ML} toplevel.
+
+ \end{description}
+
+ It is very important to note that the above functions are really
+ restricted to the compile time, even though the {\ML} compiler is
+ invoked at runtime! The majority of {\ML} code uses explicit
+ functional arguments of a theory or proof context instead. Thus it
+ may be invoked for an arbitrary context later on, without having to
+ worry about any operational details.
+
+ \bigskip
+
+ \begin{mldecls}
+ \indexdef{}{ML}{Isar.main}\verb|Isar.main: unit -> unit| \\
+ \indexdef{}{ML}{Isar.loop}\verb|Isar.loop: unit -> unit| \\
+ \indexdef{}{ML}{Isar.state}\verb|Isar.state: unit -> Toplevel.state| \\
+ \indexdef{}{ML}{Isar.exn}\verb|Isar.exn: unit -> (exn * string) option| \\
+ \indexdef{}{ML}{Isar.context}\verb|Isar.context: unit -> Proof.context| \\
+ \indexdef{}{ML}{Isar.goal}\verb|Isar.goal: unit -> thm| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Isar.main ()| invokes the Isar toplevel from {\ML},
+ initializing an empty toplevel state.
+
+ \item \verb|Isar.loop ()| continues the Isar toplevel with the
+ current state, after having dropped out of the Isar toplevel loop.
+
+ \item \verb|Isar.state ()| and \verb|Isar.exn ()| get current
+ toplevel state and error condition, respectively. This only works
+ after having dropped out of the Isar toplevel loop.
+
+ \item \verb|Isar.context ()| produces the proof context from \verb|Isar.state ()|, analogous to \verb|Context.proof_of|
+ (\secref{sec:generic-context}).
+
+ \item \verb|Isar.goal ()| picks the tactical goal from \verb|Isar.state ()|, represented as a theorem according to
+ \secref{sec:tactical-goals}.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Theory database \label{sec:theory-database}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The theory database maintains a collection of theories, together
+ with some administrative information about their original sources,
+ which are held in an external store (i.e.\ some directory within the
+ regular file system).
+
+ The theory database is organized as a directed acyclic graph;
+ entries are referenced by theory name. Although some additional
+ interfaces allow to include a directory specification as well, this
+ is only a hint to the underlying theory loader. The internal theory
+ name space is flat!
+
+ Theory \isa{A} is associated with the main theory file \isa{A}\verb,.thy,, which needs to be accessible through the theory
+ loader path. Any number of additional {\ML} source files may be
+ associated with each theory, by declaring these dependencies in the
+ theory header as \isa{{\isasymUSES}}, and loading them consecutively
+ within the theory context. The system keeps track of incoming {\ML}
+ sources and associates them with the current theory. The file
+ \isa{A}\verb,.ML, is loaded after a theory has been concluded, in
+ order to support legacy proof {\ML} proof scripts.
+
+ The basic internal actions of the theory database are \isa{update}, \isa{outdate}, and \isa{remove}:
+
+ \begin{itemize}
+
+ \item \isa{update\ A} introduces a link of \isa{A} with a
+ \isa{theory} value of the same name; it asserts that the theory
+ sources are now consistent with that value;
+
+ \item \isa{outdate\ A} invalidates the link of a theory database
+ entry to its sources, but retains the present theory value;
+
+ \item \isa{remove\ A} deletes entry \isa{A} from the theory
+ database.
+
+ \end{itemize}
+
+ These actions are propagated to sub- or super-graphs of a theory
+ entry as expected, in order to preserve global consistency of the
+ state of all loaded theories with the sources of the external store.
+ This implies certain causalities between actions: \isa{update}
+ or \isa{outdate} of an entry will \isa{outdate} all
+ descendants; \isa{remove} will \isa{remove} all descendants.
+
+ \medskip There are separate user-level interfaces to operate on the
+ theory database directly or indirectly. The primitive actions then
+ just happen automatically while working with the system. In
+ particular, processing a theory header \isa{{\isasymTHEORY}\ A\ {\isasymIMPORTS}\ B\isactrlsub {\isadigit{1}}\ {\isasymdots}\ B\isactrlsub n\ {\isasymBEGIN}} ensures that the
+ sub-graph of the collective imports \isa{B\isactrlsub {\isadigit{1}}\ {\isasymdots}\ B\isactrlsub n}
+ is up-to-date, too. Earlier theories are reloaded as required, with
+ \isa{update} actions proceeding in topological order according to
+ theory dependencies. There may be also a wave of implied \isa{outdate} actions for derived theory nodes until a stable situation
+ is achieved eventually.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{theory}\verb|theory: string -> theory| \\
+ \indexdef{}{ML}{use\_thy}\verb|use_thy: string -> unit| \\
+ \indexdef{}{ML}{use\_thys}\verb|use_thys: string list -> unit| \\
+ \indexdef{}{ML}{ThyInfo.touch\_thy}\verb|ThyInfo.touch_thy: string -> unit| \\
+ \indexdef{}{ML}{ThyInfo.remove\_thy}\verb|ThyInfo.remove_thy: string -> unit| \\[1ex]
+ \indexdef{}{ML}{ThyInfo.begin\_theory}\verb|ThyInfo.begin_theory|\verb|: ... -> bool -> theory| \\
+ \indexdef{}{ML}{ThyInfo.end\_theory}\verb|ThyInfo.end_theory: theory -> unit| \\
+ \indexdef{}{ML}{ThyInfo.register\_theory}\verb|ThyInfo.register_theory: theory -> unit| \\[1ex]
+ \verb|datatype action = Update |\verb,|,\verb| Outdate |\verb,|,\verb| Remove| \\
+ \indexdef{}{ML}{ThyInfo.add\_hook}\verb|ThyInfo.add_hook: (ThyInfo.action -> string -> unit) -> unit| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|theory|~\isa{A} retrieves the theory value presently
+ associated with name \isa{A}. Note that the result might be
+ outdated.
+
+ \item \verb|use_thy|~\isa{A} ensures that theory \isa{A} is fully
+ up-to-date wrt.\ the external file store, reloading outdated
+ ancestors as required.
+
+ \item \verb|use_thys| is similar to \verb|use_thy|, but handles
+ several theories simultaneously. Thus it acts like processing the
+ import header of a theory, without performing the merge of the
+ result, though.
+
+ \item \verb|ThyInfo.touch_thy|~\isa{A} performs and \isa{outdate} action
+ on theory \isa{A} and all descendants.
+
+ \item \verb|ThyInfo.remove_thy|~\isa{A} deletes theory \isa{A} and all
+ descendants from the theory database.
+
+ \item \verb|ThyInfo.begin_theory| is the basic operation behind a
+ \isa{{\isasymTHEORY}} header declaration. This is {\ML} functions is
+ normally not invoked directly.
+
+ \item \verb|ThyInfo.end_theory| concludes the loading of a theory
+ proper and stores the result in the theory database.
+
+ \item \verb|ThyInfo.register_theory|~\isa{text\ thy} registers an
+ existing theory value with the theory loader database. There is no
+ management of associated sources.
+
+ \item \verb|ThyInfo.add_hook|~\isa{f} registers function \isa{f} as a hook for theory database actions. The function will be
+ invoked with the action and theory name being involved; thus derived
+ actions may be performed in associated system components, e.g.\
+ maintaining the state of an editor for the theory sources.
+
+ The kind and order of actions occurring in practice depends both on
+ user interactions and the internal process of resolving theory
+ imports. Hooks should not rely on a particular policy here! Any
+ exceptions raised by the hook are ignored.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Isar.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,86 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Isar}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Isar\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Isar language elements%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The primary Isar language consists of three main categories of
+ language elements:
+
+ \begin{enumerate}
+
+ \item Proof commands
+
+ \item Proof methods
+
+ \item Attributes
+
+ \end{enumerate}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Proof commands%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Proof methods%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Attributes%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Local_Theory.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,220 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Local{\isacharunderscore}Theory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Local{\isacharunderscore}Theory\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Local theory specifications%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{local theory} combines aspects of both theory and proof
+ context (cf.\ \secref{sec:context}), such that definitional
+ specifications may be given relatively to parameters and
+ assumptions. A local theory is represented as a regular proof
+ context, augmented by administrative data about the \emph{target
+ context}.
+
+ The target is usually derived from the background theory by adding
+ local \isa{{\isasymFIX}} and \isa{{\isasymASSUME}} elements, plus
+ suitable modifications of non-logical context data (e.g.\ a special
+ type-checking discipline). Once initialized, the target is ready to
+ absorb definitional primitives: \isa{{\isasymDEFINE}} for terms and
+ \isa{{\isasymNOTE}} for theorems. Such definitions may get
+ transformed in a target-specific way, but the programming interface
+ hides such details.
+
+ Isabelle/Pure provides target mechanisms for locales, type-classes,
+ type-class instantiations, and general overloading. In principle,
+ users can implement new targets as well, but this rather arcane
+ discipline is beyond the scope of this manual. In contrast,
+ implementing derived definitional packages to be used within a local
+ theory context is quite easy: the interfaces are even simpler and
+ more abstract than the underlying primitives for raw theories.
+
+ Many definitional packages for local theories are available in
+ Isabelle. Although a few old packages only work for global
+ theories, the local theory interface is already the standard way of
+ implementing definitional packages in Isabelle.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Definitional elements%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+There are separate elements \isa{{\isasymDEFINE}\ c\ {\isasymequiv}\ t} for terms, and
+ \isa{{\isasymNOTE}\ b\ {\isacharequal}\ thm} for theorems. Types are treated
+ implicitly, according to Hindley-Milner discipline (cf.\
+ \secref{sec:variables}). These definitional primitives essentially
+ act like \isa{let}-bindings within a local context that may
+ already contain earlier \isa{let}-bindings and some initial
+ \isa{{\isasymlambda}}-bindings. Thus we gain \emph{dependent definitions}
+ that are relative to an initial axiomatic context. The following
+ diagram illustrates this idea of axiomatic elements versus
+ definitional elements:
+
+ \begin{center}
+ \begin{tabular}{|l|l|l|}
+ \hline
+ & \isa{{\isasymlambda}}-binding & \isa{let}-binding \\
+ \hline
+ types & fixed \isa{{\isasymalpha}} & arbitrary \isa{{\isasymbeta}} \\
+ terms & \isa{{\isasymFIX}\ x\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} & \isa{{\isasymDEFINE}\ c\ {\isasymequiv}\ t} \\
+ theorems & \isa{{\isasymASSUME}\ a{\isacharcolon}\ A} & \isa{{\isasymNOTE}\ b\ {\isacharequal}\ \isactrlBG B\isactrlEN } \\
+ \hline
+ \end{tabular}
+ \end{center}
+
+ A user package merely needs to produce suitable \isa{{\isasymDEFINE}}
+ and \isa{{\isasymNOTE}} elements according to the application. For
+ example, a package for inductive definitions might first \isa{{\isasymDEFINE}} a certain predicate as some fixed-point construction,
+ then \isa{{\isasymNOTE}} a proven result about monotonicity of the
+ functor involved here, and then produce further derived concepts via
+ additional \isa{{\isasymDEFINE}} and \isa{{\isasymNOTE}} elements.
+
+ The cumulative sequence of \isa{{\isasymDEFINE}} and \isa{{\isasymNOTE}}
+ produced at package runtime is managed by the local theory
+ infrastructure by means of an \emph{auxiliary context}. Thus the
+ system holds up the impression of working within a fully abstract
+ situation with hypothetical entities: \isa{{\isasymDEFINE}\ c\ {\isasymequiv}\ t}
+ always results in a literal fact \isa{\isactrlBG c\ {\isasymequiv}\ t\isactrlEN }, where
+ \isa{c} is a fixed variable \isa{c}. The details about
+ global constants, name spaces etc. are handled internally.
+
+ So the general structure of a local theory is a sandwich of three
+ layers:
+
+ \begin{center}
+ \framebox{\quad auxiliary context \quad\framebox{\quad target context \quad\framebox{\quad background theory\quad}}}
+ \end{center}
+
+ \noindent When a definitional package is finished, the auxiliary
+ context is reset to the target context. The target now holds
+ definitions for terms and theorems that stem from the hypothetical
+ \isa{{\isasymDEFINE}} and \isa{{\isasymNOTE}} elements, transformed by
+ the particular target policy (see
+ \cite[\S4--5]{Haftmann-Wenzel:2009} for details).%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{local\_theory}\verb|type local_theory = Proof.context| \\
+ \indexdef{}{ML}{TheoryTarget.init}\verb|TheoryTarget.init: string option -> theory -> local_theory| \\[1ex]
+ \indexdef{}{ML}{LocalTheory.define}\verb|LocalTheory.define: string ->|\isasep\isanewline%
+\verb| (binding * mixfix) * (Attrib.binding * term) -> local_theory ->|\isasep\isanewline%
+\verb| (term * (string * thm)) * local_theory| \\
+ \indexdef{}{ML}{LocalTheory.note}\verb|LocalTheory.note: string ->|\isasep\isanewline%
+\verb| Attrib.binding * thm list -> local_theory ->|\isasep\isanewline%
+\verb| (string * thm list) * local_theory| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|local_theory| represents local theories. Although
+ this is merely an alias for \verb|Proof.context|, it is
+ semantically a subtype of the same: a \verb|local_theory| holds
+ target information as special context data. Subtyping means that
+ any value \isa{lthy{\isacharcolon}}~\verb|local_theory| can be also used
+ with operations on expecting a regular \isa{ctxt{\isacharcolon}}~\verb|Proof.context|.
+
+ \item \verb|TheoryTarget.init|~\isa{NONE\ thy} initializes a
+ trivial local theory from the given background theory.
+ Alternatively, \isa{SOME\ name} may be given to initialize a
+ \hyperlink{command.locale}{\mbox{\isa{\isacommand{locale}}}} or \hyperlink{command.class}{\mbox{\isa{\isacommand{class}}}} context (a fully-qualified
+ internal name is expected here). This is useful for experimentation
+ --- normally the Isar toplevel already takes care to initialize the
+ local theory context.
+
+ \item \verb|LocalTheory.define|~\isa{kind\ {\isacharparenleft}{\isacharparenleft}b{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isacharparenleft}a{\isacharcomma}\ rhs{\isacharparenright}{\isacharparenright}\ lthy} defines a local entity according to the specification that is
+ given relatively to the current \isa{lthy} context. In
+ particular the term of the RHS may refer to earlier local entities
+ from the auxiliary context, or hypothetical parameters from the
+ target context. The result is the newly defined term (which is
+ always a fixed variable with exactly the same name as specified for
+ the LHS), together with an equational theorem that states the
+ definition as a hypothetical fact.
+
+ Unless an explicit name binding is given for the RHS, the resulting
+ fact will be called \isa{b{\isacharunderscore}def}. Any given attributes are
+ applied to that same fact --- immediately in the auxiliary context
+ \emph{and} in any transformed versions stemming from target-specific
+ policies or any later interpretations of results from the target
+ context (think of \hyperlink{command.locale}{\mbox{\isa{\isacommand{locale}}}} and \hyperlink{command.interpretation}{\mbox{\isa{\isacommand{interpretation}}}},
+ for example). This means that attributes should be usually plain
+ declarations such as \hyperlink{attribute.simp}{\mbox{\isa{simp}}}, while non-trivial rules like
+ \hyperlink{attribute.simplified}{\mbox{\isa{simplified}}} are better avoided.
+
+ The \isa{kind} determines the theorem kind tag of the resulting
+ fact. Typical examples are \verb|Thm.definitionK|, \verb|Thm.theoremK|, or \verb|Thm.internalK|.
+
+ \item \verb|LocalTheory.note|~\isa{kind\ {\isacharparenleft}a{\isacharcomma}\ ths{\isacharparenright}\ lthy} is
+ analogous to \verb|LocalTheory.define|, but defines facts instead of
+ terms. There is also a slightly more general variant \verb|LocalTheory.notes| that defines several facts (with attribute
+ expressions) simultaneously.
+
+ This is essentially the internal version of the \hyperlink{command.lemmas}{\mbox{\isa{\isacommand{lemmas}}}}
+ command, or \hyperlink{command.declare}{\mbox{\isa{\isacommand{declare}}}} if an empty name binding is given.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Morphisms and declarations%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Logic.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,959 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Logic}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Logic\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Primitive logic \label{ch:logic}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The logical foundations of Isabelle/Isar are that of the Pure logic,
+ which has been introduced as a Natural Deduction framework in
+ \cite{paulson700}. This is essentially the same logic as ``\isa{{\isasymlambda}HOL}'' in the more abstract setting of Pure Type Systems (PTS)
+ \cite{Barendregt-Geuvers:2001}, although there are some key
+ differences in the specific treatment of simple types in
+ Isabelle/Pure.
+
+ Following type-theoretic parlance, the Pure logic consists of three
+ levels of \isa{{\isasymlambda}}-calculus with corresponding arrows, \isa{{\isasymRightarrow}} for syntactic function space (terms depending on terms), \isa{{\isasymAnd}} for universal quantification (proofs depending on terms), and
+ \isa{{\isasymLongrightarrow}} for implication (proofs depending on proofs).
+
+ Derivations are relative to a logical theory, which declares type
+ constructors, constants, and axioms. Theory declarations support
+ schematic polymorphism, which is strictly speaking outside the
+ logic.\footnote{This is the deeper logical reason, why the theory
+ context \isa{{\isasymTheta}} is separate from the proof context \isa{{\isasymGamma}}
+ of the core calculus.}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Types \label{sec:types}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The language of types is an uninterpreted order-sorted first-order
+ algebra; types are qualified by ordered type classes.
+
+ \medskip A \emph{type class} is an abstract syntactic entity
+ declared in the theory context. The \emph{subclass relation} \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}} is specified by stating an acyclic
+ generating relation; the transitive closure is maintained
+ internally. The resulting relation is an ordering: reflexive,
+ transitive, and antisymmetric.
+
+ A \emph{sort} is a list of type classes written as \isa{s\ {\isacharequal}\ {\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub m{\isacharbraceright}}, which represents symbolic
+ intersection. Notationally, the curly braces are omitted for
+ singleton intersections, i.e.\ any class \isa{c} may be read as
+ a sort \isa{{\isacharbraceleft}c{\isacharbraceright}}. The ordering on type classes is extended to
+ sorts according to the meaning of intersections: \isa{{\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}\ c\isactrlisub m{\isacharbraceright}\ {\isasymsubseteq}\ {\isacharbraceleft}d\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ d\isactrlisub n{\isacharbraceright}} iff
+ \isa{{\isasymforall}j{\isachardot}\ {\isasymexists}i{\isachardot}\ c\isactrlisub i\ {\isasymsubseteq}\ d\isactrlisub j}. The empty intersection
+ \isa{{\isacharbraceleft}{\isacharbraceright}} refers to the universal sort, which is the largest
+ element wrt.\ the sort order. The intersections of all (finitely
+ many) classes declared in the current theory are the minimal
+ elements wrt.\ the sort order.
+
+ \medskip A \emph{fixed type variable} is a pair of a basic name
+ (starting with a \isa{{\isacharprime}} character) and a sort constraint, e.g.\
+ \isa{{\isacharparenleft}{\isacharprime}a{\isacharcomma}\ s{\isacharparenright}} which is usually printed as \isa{{\isasymalpha}\isactrlisub s}.
+ A \emph{schematic type variable} is a pair of an indexname and a
+ sort constraint, e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}{\isacharprime}a{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ s{\isacharparenright}} which is usually
+ printed as \isa{{\isacharquery}{\isasymalpha}\isactrlisub s}.
+
+ Note that \emph{all} syntactic components contribute to the identity
+ of type variables, including the sort constraint. The core logic
+ handles type variables with the same name but different sorts as
+ different, although some outer layers of the system make it hard to
+ produce anything like this.
+
+ A \emph{type constructor} \isa{{\isasymkappa}} is a \isa{k}-ary operator
+ on types declared in the theory. Type constructor application is
+ written postfix as \isa{{\isacharparenleft}{\isasymalpha}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlisub k{\isacharparenright}{\isasymkappa}}. For
+ \isa{k\ {\isacharequal}\ {\isadigit{0}}} the argument tuple is omitted, e.g.\ \isa{prop}
+ instead of \isa{{\isacharparenleft}{\isacharparenright}prop}. For \isa{k\ {\isacharequal}\ {\isadigit{1}}} the parentheses
+ are omitted, e.g.\ \isa{{\isasymalpha}\ list} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharparenright}list}.
+ Further notation is provided for specific constructors, notably the
+ right-associative infix \isa{{\isasymalpha}\ {\isasymRightarrow}\ {\isasymbeta}} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharcomma}\ {\isasymbeta}{\isacharparenright}fun}.
+
+ A \emph{type} is defined inductively over type variables and type
+ constructors as follows: \isa{{\isasymtau}\ {\isacharequal}\ {\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharquery}{\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharparenleft}{\isasymtau}\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlsub k{\isacharparenright}{\isasymkappa}}.
+
+ A \emph{type abbreviation} is a syntactic definition \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} of an arbitrary type expression \isa{{\isasymtau}} over
+ variables \isa{\isactrlvec {\isasymalpha}}. Type abbreviations appear as type
+ constructors in the syntax, but are expanded before entering the
+ logical core.
+
+ A \emph{type arity} declares the image behavior of a type
+ constructor wrt.\ the algebra of sorts: \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ s\isactrlisub k{\isacharparenright}s} means that \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub k{\isacharparenright}{\isasymkappa}} is
+ of sort \isa{s} if every argument type \isa{{\isasymtau}\isactrlisub i} is
+ of sort \isa{s\isactrlisub i}. Arity declarations are implicitly
+ completed, i.e.\ \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}c} entails \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}c{\isacharprime}} for any \isa{c{\isacharprime}\ {\isasymsupseteq}\ c}.
+
+ \medskip The sort algebra is always maintained as \emph{coregular},
+ which means that type arities are consistent with the subclass
+ relation: for any type constructor \isa{{\isasymkappa}}, and classes \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}}, and arities \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s\isactrlisub {\isadigit{1}}{\isacharparenright}c\isactrlisub {\isadigit{1}}} and \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s\isactrlisub {\isadigit{2}}{\isacharparenright}c\isactrlisub {\isadigit{2}}} holds \isa{\isactrlvec s\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ \isactrlvec s\isactrlisub {\isadigit{2}}} component-wise.
+
+ The key property of a coregular order-sorted algebra is that sort
+ constraints can be solved in a most general fashion: for each type
+ constructor \isa{{\isasymkappa}} and sort \isa{s} there is a most general
+ vector of argument sorts \isa{{\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ s\isactrlisub k{\isacharparenright}} such
+ that a type scheme \isa{{\isacharparenleft}{\isasymalpha}\isactrlbsub s\isactrlisub {\isadigit{1}}\isactrlesub {\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlbsub s\isactrlisub k\isactrlesub {\isacharparenright}{\isasymkappa}} is of sort \isa{s}.
+ Consequently, type unification has most general solutions (modulo
+ equivalence of sorts), so type-inference produces primary types as
+ expected \cite{nipkow-prehofer}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{class}\verb|type class| \\
+ \indexdef{}{ML type}{sort}\verb|type sort| \\
+ \indexdef{}{ML type}{arity}\verb|type arity| \\
+ \indexdef{}{ML type}{typ}\verb|type typ| \\
+ \indexdef{}{ML}{map\_atyps}\verb|map_atyps: (typ -> typ) -> typ -> typ| \\
+ \indexdef{}{ML}{fold\_atyps}\verb|fold_atyps: (typ -> 'a -> 'a) -> typ -> 'a -> 'a| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML}{Sign.subsort}\verb|Sign.subsort: theory -> sort * sort -> bool| \\
+ \indexdef{}{ML}{Sign.of\_sort}\verb|Sign.of_sort: theory -> typ * sort -> bool| \\
+ \indexdef{}{ML}{Sign.add\_types}\verb|Sign.add_types: (string * int * mixfix) list -> theory -> theory| \\
+ \indexdef{}{ML}{Sign.add\_tyabbrs\_i}\verb|Sign.add_tyabbrs_i: |\isasep\isanewline%
+\verb| (string * string list * typ * mixfix) list -> theory -> theory| \\
+ \indexdef{}{ML}{Sign.primitive\_class}\verb|Sign.primitive_class: string * class list -> theory -> theory| \\
+ \indexdef{}{ML}{Sign.primitive\_classrel}\verb|Sign.primitive_classrel: class * class -> theory -> theory| \\
+ \indexdef{}{ML}{Sign.primitive\_arity}\verb|Sign.primitive_arity: arity -> theory -> theory| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|class| represents type classes; this is an alias for
+ \verb|string|.
+
+ \item \verb|sort| represents sorts; this is an alias for
+ \verb|class list|.
+
+ \item \verb|arity| represents type arities; this is an alias for
+ triples of the form \isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} for \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s} described above.
+
+ \item \verb|typ| represents types; this is a datatype with
+ constructors \verb|TFree|, \verb|TVar|, \verb|Type|.
+
+ \item \verb|map_atyps|~\isa{f\ {\isasymtau}} applies the mapping \isa{f}
+ to all atomic types (\verb|TFree|, \verb|TVar|) occurring in \isa{{\isasymtau}}.
+
+ \item \verb|fold_atyps|~\isa{f\ {\isasymtau}} iterates the operation \isa{f} over all occurrences of atomic types (\verb|TFree|, \verb|TVar|)
+ in \isa{{\isasymtau}}; the type structure is traversed from left to right.
+
+ \item \verb|Sign.subsort|~\isa{thy\ {\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ s\isactrlisub {\isadigit{2}}{\isacharparenright}}
+ tests the subsort relation \isa{s\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ s\isactrlisub {\isadigit{2}}}.
+
+ \item \verb|Sign.of_sort|~\isa{thy\ {\isacharparenleft}{\isasymtau}{\isacharcomma}\ s{\isacharparenright}} tests whether type
+ \isa{{\isasymtau}} is of sort \isa{s}.
+
+ \item \verb|Sign.add_types|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ k{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} declares a new
+ type constructors \isa{{\isasymkappa}} with \isa{k} arguments and
+ optional mixfix syntax.
+
+ \item \verb|Sign.add_tyabbrs_i|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec {\isasymalpha}{\isacharcomma}\ {\isasymtau}{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}}
+ defines a new type abbreviation \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} with
+ optional mixfix syntax.
+
+ \item \verb|Sign.primitive_class|~\isa{{\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub n{\isacharbrackright}{\isacharparenright}} declares a new class \isa{c}, together with class
+ relations \isa{c\ {\isasymsubseteq}\ c\isactrlisub i}, for \isa{i\ {\isacharequal}\ {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ n}.
+
+ \item \verb|Sign.primitive_classrel|~\isa{{\isacharparenleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ c\isactrlisub {\isadigit{2}}{\isacharparenright}} declares the class relation \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}}.
+
+ \item \verb|Sign.primitive_arity|~\isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} declares
+ the arity \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s}.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Terms \label{sec:terms}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The language of terms is that of simply-typed \isa{{\isasymlambda}}-calculus
+ with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
+ or \cite{paulson-ml2}), with the types being determined by the
+ corresponding binders. In contrast, free variables and constants
+ are have an explicit name and type in each occurrence.
+
+ \medskip A \emph{bound variable} is a natural number \isa{b},
+ which accounts for the number of intermediate binders between the
+ variable occurrence in the body and its binding position. For
+ example, the de-Bruijn term \isa{{\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isadigit{1}}\ {\isacharplus}\ {\isadigit{0}}} would
+ correspond to \isa{{\isasymlambda}x\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}y\isactrlbsub nat\isactrlesub {\isachardot}\ x\ {\isacharplus}\ y} in a named
+ representation. Note that a bound variable may be represented by
+ different de-Bruijn indices at different occurrences, depending on
+ the nesting of abstractions.
+
+ A \emph{loose variable} is a bound variable that is outside the
+ scope of local binders. The types (and names) for loose variables
+ can be managed as a separate context, that is maintained as a stack
+ of hypothetical binders. The core logic operates on closed terms,
+ without any loose variables.
+
+ A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
+ \isa{{\isacharparenleft}x{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed \isa{x\isactrlisub {\isasymtau}}. A
+ \emph{schematic variable} is a pair of an indexname and a type,
+ e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}x{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{{\isacharquery}x\isactrlisub {\isasymtau}}.
+
+ \medskip A \emph{constant} is a pair of a basic name and a type,
+ e.g.\ \isa{{\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{c\isactrlisub {\isasymtau}}. Constants are declared in the context as polymorphic
+ families \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}, meaning that all substitution instances
+ \isa{c\isactrlisub {\isasymtau}} for \isa{{\isasymtau}\ {\isacharequal}\ {\isasymsigma}{\isasymvartheta}} are valid.
+
+ The vector of \emph{type arguments} of constant \isa{c\isactrlisub {\isasymtau}}
+ wrt.\ the declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is defined as the codomain of
+ the matcher \isa{{\isasymvartheta}\ {\isacharequal}\ {\isacharbraceleft}{\isacharquery}{\isasymalpha}\isactrlisub {\isadigit{1}}\ {\isasymmapsto}\ {\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isacharquery}{\isasymalpha}\isactrlisub n\ {\isasymmapsto}\ {\isasymtau}\isactrlisub n{\isacharbraceright}} presented in canonical order \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}}. Within a given theory context,
+ there is a one-to-one correspondence between any constant \isa{c\isactrlisub {\isasymtau}} and the application \isa{c{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}} of its type arguments. For example, with \isa{plus\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}}, the instance \isa{plus\isactrlbsub nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ nat\isactrlesub } corresponds to \isa{plus{\isacharparenleft}nat{\isacharparenright}}.
+
+ Constant declarations \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} may contain sort constraints
+ for type variables in \isa{{\isasymsigma}}. These are observed by
+ type-inference as expected, but \emph{ignored} by the core logic.
+ This means the primitive logic is able to reason with instances of
+ polymorphic constants that the user-level type-checker would reject
+ due to violation of type class restrictions.
+
+ \medskip An \emph{atomic} term is either a variable or constant. A
+ \emph{term} is defined inductively over atomic terms, with
+ abstraction and application as follows: \isa{t\ {\isacharequal}\ b\ {\isacharbar}\ x\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isacharquery}x\isactrlisub {\isasymtau}\ {\isacharbar}\ c\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isasymlambda}\isactrlisub {\isasymtau}{\isachardot}\ t\ {\isacharbar}\ t\isactrlisub {\isadigit{1}}\ t\isactrlisub {\isadigit{2}}}.
+ Parsing and printing takes care of converting between an external
+ representation with named bound variables. Subsequently, we shall
+ use the latter notation instead of internal de-Bruijn
+ representation.
+
+ The inductive relation \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} assigns a (unique) type to a
+ term according to the structure of atomic terms, abstractions, and
+ applicatins:
+ \[
+ \infer{\isa{a\isactrlisub {\isasymtau}\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}}}{}
+ \qquad
+ \infer{\isa{{\isacharparenleft}{\isasymlambda}x\isactrlsub {\isasymtau}{\isachardot}\ t{\isacharparenright}\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymsigma}}}{\isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}}
+ \qquad
+ \infer{\isa{t\ u\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}}{\isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymsigma}} & \isa{u\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}}}
+ \]
+ A \emph{well-typed term} is a term that can be typed according to these rules.
+
+ Typing information can be omitted: type-inference is able to
+ reconstruct the most general type of a raw term, while assigning
+ most general types to all of its variables and constants.
+ Type-inference depends on a context of type constraints for fixed
+ variables, and declarations for polymorphic constants.
+
+ The identity of atomic terms consists both of the name and the type
+ component. This means that different variables \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{1}}\isactrlesub } and \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{2}}\isactrlesub } may become the same after type
+ instantiation. Some outer layers of the system make it hard to
+ produce variables of the same name, but different types. In
+ contrast, mixed instances of polymorphic constants occur frequently.
+
+ \medskip The \emph{hidden polymorphism} of a term \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}
+ is the set of type variables occurring in \isa{t}, but not in
+ \isa{{\isasymsigma}}. This means that the term implicitly depends on type
+ arguments that are not accounted in the result type, i.e.\ there are
+ different type instances \isa{t{\isasymvartheta}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} and \isa{t{\isasymvartheta}{\isacharprime}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with the same type. This slightly
+ pathological situation notoriously demands additional care.
+
+ \medskip A \emph{term abbreviation} is a syntactic definition \isa{c\isactrlisub {\isasymsigma}\ {\isasymequiv}\ t} of a closed term \isa{t} of type \isa{{\isasymsigma}},
+ without any hidden polymorphism. A term abbreviation looks like a
+ constant in the syntax, but is expanded before entering the logical
+ core. Abbreviations are usually reverted when printing terms, using
+ \isa{t\ {\isasymrightarrow}\ c\isactrlisub {\isasymsigma}} as rules for higher-order rewriting.
+
+ \medskip Canonical operations on \isa{{\isasymlambda}}-terms include \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion: \isa{{\isasymalpha}}-conversion refers to capture-free
+ renaming of bound variables; \isa{{\isasymbeta}}-conversion contracts an
+ abstraction applied to an argument term, substituting the argument
+ in the body: \isa{{\isacharparenleft}{\isasymlambda}x{\isachardot}\ b{\isacharparenright}a} becomes \isa{b{\isacharbrackleft}a{\isacharslash}x{\isacharbrackright}}; \isa{{\isasymeta}}-conversion contracts vacuous application-abstraction: \isa{{\isasymlambda}x{\isachardot}\ f\ x} becomes \isa{f}, provided that the bound variable
+ does not occur in \isa{f}.
+
+ Terms are normally treated modulo \isa{{\isasymalpha}}-conversion, which is
+ implicit in the de-Bruijn representation. Names for bound variables
+ in abstractions are maintained separately as (meaningless) comments,
+ mostly for parsing and printing. Full \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion is
+ commonplace in various standard operations (\secref{sec:obj-rules})
+ that are based on higher-order unification and matching.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{term}\verb|type term| \\
+ \indexdef{}{ML}{op aconv}\verb|op aconv: term * term -> bool| \\
+ \indexdef{}{ML}{map\_types}\verb|map_types: (typ -> typ) -> term -> term| \\
+ \indexdef{}{ML}{fold\_types}\verb|fold_types: (typ -> 'a -> 'a) -> term -> 'a -> 'a| \\
+ \indexdef{}{ML}{map\_aterms}\verb|map_aterms: (term -> term) -> term -> term| \\
+ \indexdef{}{ML}{fold\_aterms}\verb|fold_aterms: (term -> 'a -> 'a) -> term -> 'a -> 'a| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML}{fastype\_of}\verb|fastype_of: term -> typ| \\
+ \indexdef{}{ML}{lambda}\verb|lambda: term -> term -> term| \\
+ \indexdef{}{ML}{betapply}\verb|betapply: term * term -> term| \\
+ \indexdef{}{ML}{Sign.declare\_const}\verb|Sign.declare_const: Properties.T -> (binding * typ) * mixfix ->|\isasep\isanewline%
+\verb| theory -> term * theory| \\
+ \indexdef{}{ML}{Sign.add\_abbrev}\verb|Sign.add_abbrev: string -> Properties.T -> binding * term ->|\isasep\isanewline%
+\verb| theory -> (term * term) * theory| \\
+ \indexdef{}{ML}{Sign.const\_typargs}\verb|Sign.const_typargs: theory -> string * typ -> typ list| \\
+ \indexdef{}{ML}{Sign.const\_instance}\verb|Sign.const_instance: theory -> string * typ list -> typ| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|term| represents de-Bruijn terms, with comments in
+ abstractions, and explicitly named free variables and constants;
+ this is a datatype with constructors \verb|Bound|, \verb|Free|, \verb|Var|, \verb|Const|, \verb|Abs|, \verb|op $|.
+
+ \item \isa{t}~\verb|aconv|~\isa{u} checks \isa{{\isasymalpha}}-equivalence of two terms. This is the basic equality relation
+ on type \verb|term|; raw datatype equality should only be used
+ for operations related to parsing or printing!
+
+ \item \verb|map_types|~\isa{f\ t} applies the mapping \isa{f} to all types occurring in \isa{t}.
+
+ \item \verb|fold_types|~\isa{f\ t} iterates the operation \isa{f} over all occurrences of types in \isa{t}; the term
+ structure is traversed from left to right.
+
+ \item \verb|map_aterms|~\isa{f\ t} applies the mapping \isa{f}
+ to all atomic terms (\verb|Bound|, \verb|Free|, \verb|Var|, \verb|Const|) occurring in \isa{t}.
+
+ \item \verb|fold_aterms|~\isa{f\ t} iterates the operation \isa{f} over all occurrences of atomic terms (\verb|Bound|, \verb|Free|,
+ \verb|Var|, \verb|Const|) in \isa{t}; the term structure is
+ traversed from left to right.
+
+ \item \verb|fastype_of|~\isa{t} determines the type of a
+ well-typed term. This operation is relatively slow, despite the
+ omission of any sanity checks.
+
+ \item \verb|lambda|~\isa{a\ b} produces an abstraction \isa{{\isasymlambda}a{\isachardot}\ b}, where occurrences of the atomic term \isa{a} in the
+ body \isa{b} are replaced by bound variables.
+
+ \item \verb|betapply|~\isa{{\isacharparenleft}t{\isacharcomma}\ u{\isacharparenright}} produces an application \isa{t\ u}, with topmost \isa{{\isasymbeta}}-conversion if \isa{t} is an
+ abstraction.
+
+ \item \verb|Sign.declare_const|~\isa{properties\ {\isacharparenleft}{\isacharparenleft}c{\isacharcomma}\ {\isasymsigma}{\isacharparenright}{\isacharcomma}\ mx{\isacharparenright}}
+ declares a new constant \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with optional mixfix
+ syntax.
+
+ \item \verb|Sign.add_abbrev|~\isa{print{\isacharunderscore}mode\ properties\ {\isacharparenleft}c{\isacharcomma}\ t{\isacharparenright}}
+ introduces a new term abbreviation \isa{c\ {\isasymequiv}\ t}.
+
+ \item \verb|Sign.const_typargs|~\isa{thy\ {\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} and \verb|Sign.const_instance|~\isa{thy\ {\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharbrackright}{\isacharparenright}}
+ convert between two representations of polymorphic constants: full
+ type instance vs.\ compact type arguments form.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Theorems \label{sec:thms}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{proposition} is a well-typed term of type \isa{prop}, a
+ \emph{theorem} is a proven proposition (depending on a context of
+ hypotheses and the background theory). Primitive inferences include
+ plain Natural Deduction rules for the primary connectives \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} of the framework. There is also a builtin
+ notion of equality/equivalence \isa{{\isasymequiv}}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Primitive connectives and rules \label{sec:prim-rules}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The theory \isa{Pure} contains constant declarations for the
+ primitive connectives \isa{{\isasymAnd}}, \isa{{\isasymLongrightarrow}}, and \isa{{\isasymequiv}} of
+ the logical framework, see \figref{fig:pure-connectives}. The
+ derivability judgment \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n\ {\isasymturnstile}\ B} is
+ defined inductively by the primitive inferences given in
+ \figref{fig:prim-rules}, with the global restriction that the
+ hypotheses must \emph{not} contain any schematic variables. The
+ builtin equality is conceptually axiomatized as shown in
+ \figref{fig:pure-equality}, although the implementation works
+ directly with derived inferences.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{ll}
+ \isa{all\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}{\isasymalpha}\ {\isasymRightarrow}\ prop{\isacharparenright}\ {\isasymRightarrow}\ prop} & universal quantification (binder \isa{{\isasymAnd}}) \\
+ \isa{{\isasymLongrightarrow}\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & implication (right associative infix) \\
+ \isa{{\isasymequiv}\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & equality relation (infix) \\
+ \end{tabular}
+ \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
+ \end{center}
+ \end{figure}
+
+ \begin{figure}[htb]
+ \begin{center}
+ \[
+ \infer[\isa{{\isacharparenleft}axiom{\isacharparenright}}]{\isa{{\isasymturnstile}\ A}}{\isa{A\ {\isasymin}\ {\isasymTheta}}}
+ \qquad
+ \infer[\isa{{\isacharparenleft}assume{\isacharparenright}}]{\isa{A\ {\isasymturnstile}\ A}}{}
+ \]
+ \[
+ \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}}
+ \qquad
+ \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}a{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}
+ \]
+ \[
+ \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
+ \qquad
+ \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymunion}\ {\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ B}}{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B} & \isa{{\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ A}}
+ \]
+ \caption{Primitive inferences of Pure}\label{fig:prim-rules}
+ \end{center}
+ \end{figure}
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{ll}
+ \isa{{\isasymturnstile}\ {\isacharparenleft}{\isasymlambda}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}{\isacharparenright}\ a\ {\isasymequiv}\ b{\isacharbrackleft}a{\isacharbrackright}} & \isa{{\isasymbeta}}-conversion \\
+ \isa{{\isasymturnstile}\ x\ {\isasymequiv}\ x} & reflexivity \\
+ \isa{{\isasymturnstile}\ x\ {\isasymequiv}\ y\ {\isasymLongrightarrow}\ P\ x\ {\isasymLongrightarrow}\ P\ y} & substitution \\
+ \isa{{\isasymturnstile}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ f\ x\ {\isasymequiv}\ g\ x{\isacharparenright}\ {\isasymLongrightarrow}\ f\ {\isasymequiv}\ g} & extensionality \\
+ \isa{{\isasymturnstile}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}B\ {\isasymLongrightarrow}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymequiv}\ B} & logical equivalence \\
+ \end{tabular}
+ \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
+ \end{center}
+ \end{figure}
+
+ The introduction and elimination rules for \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} are analogous to formation of dependently typed \isa{{\isasymlambda}}-terms representing the underlying proof objects. Proof terms
+ are irrelevant in the Pure logic, though; they cannot occur within
+ propositions. The system provides a runtime option to record
+ explicit proof terms for primitive inferences. Thus all three
+ levels of \isa{{\isasymlambda}}-calculus become explicit: \isa{{\isasymRightarrow}} for
+ terms, and \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} for proofs (cf.\
+ \cite{Berghofer-Nipkow:2000:TPHOL}).
+
+ Observe that locally fixed parameters (as in \isa{{\isasymAnd}{\isacharunderscore}intro}) need
+ not be recorded in the hypotheses, because the simple syntactic
+ types of Pure are always inhabitable. ``Assumptions'' \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} for type-membership are only present as long as some \isa{x\isactrlisub {\isasymtau}} occurs in the statement body.\footnote{This is the key
+ difference to ``\isa{{\isasymlambda}HOL}'' in the PTS framework
+ \cite{Barendregt-Geuvers:2001}, where hypotheses \isa{x\ {\isacharcolon}\ A} are
+ treated uniformly for propositions and types.}
+
+ \medskip The axiomatization of a theory is implicitly closed by
+ forming all instances of type and term variables: \isa{{\isasymturnstile}\ A{\isasymvartheta}} holds for any substitution instance of an axiom
+ \isa{{\isasymturnstile}\ A}. By pushing substitutions through derivations
+ inductively, we also get admissible \isa{generalize} and \isa{instance} rules as shown in \figref{fig:subst-rules}.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \[
+ \infer{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}{\isasymalpha}{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} & \isa{{\isasymalpha}\ {\isasymnotin}\ {\isasymGamma}}}
+ \quad
+ \infer[\quad\isa{{\isacharparenleft}generalize{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}}
+ \]
+ \[
+ \infer{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isasymtau}{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}{\isasymalpha}{\isacharbrackright}}}
+ \quad
+ \infer[\quad\isa{{\isacharparenleft}instantiate{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}t{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}}
+ \]
+ \caption{Admissible substitution rules}\label{fig:subst-rules}
+ \end{center}
+ \end{figure}
+
+ Note that \isa{instantiate} does not require an explicit
+ side-condition, because \isa{{\isasymGamma}} may never contain schematic
+ variables.
+
+ In principle, variables could be substituted in hypotheses as well,
+ but this would disrupt the monotonicity of reasoning: deriving
+ \isa{{\isasymGamma}{\isasymvartheta}\ {\isasymturnstile}\ B{\isasymvartheta}} from \isa{{\isasymGamma}\ {\isasymturnstile}\ B} is
+ correct, but \isa{{\isasymGamma}{\isasymvartheta}\ {\isasymsupseteq}\ {\isasymGamma}} does not necessarily hold:
+ the result belongs to a different proof context.
+
+ \medskip An \emph{oracle} is a function that produces axioms on the
+ fly. Logically, this is an instance of the \isa{axiom} rule
+ (\figref{fig:prim-rules}), but there is an operational difference.
+ The system always records oracle invocations within derivations of
+ theorems by a unique tag.
+
+ Axiomatizations should be limited to the bare minimum, typically as
+ part of the initial logical basis of an object-logic formalization.
+ Later on, theories are usually developed in a strictly definitional
+ fashion, by stating only certain equalities over new constants.
+
+ A \emph{simple definition} consists of a constant declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} together with an axiom \isa{{\isasymturnstile}\ c\ {\isasymequiv}\ t}, where \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is a closed term without any hidden polymorphism. The RHS
+ may depend on further defined constants, but not \isa{c} itself.
+ Definitions of functions may be presented as \isa{c\ \isactrlvec x\ {\isasymequiv}\ t} instead of the puristic \isa{c\ {\isasymequiv}\ {\isasymlambda}\isactrlvec x{\isachardot}\ t}.
+
+ An \emph{overloaded definition} consists of a collection of axioms
+ for the same constant, with zero or one equations \isa{c{\isacharparenleft}{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}{\isacharparenright}\ {\isasymequiv}\ t} for each type constructor \isa{{\isasymkappa}} (for
+ distinct variables \isa{\isactrlvec {\isasymalpha}}). The RHS may mention
+ previously defined constants as above, or arbitrary constants \isa{d{\isacharparenleft}{\isasymalpha}\isactrlisub i{\isacharparenright}} for some \isa{{\isasymalpha}\isactrlisub i} projected from \isa{\isactrlvec {\isasymalpha}}. Thus overloaded definitions essentially work by
+ primitive recursion over the syntactic structure of a single type
+ argument.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{ctyp}\verb|type ctyp| \\
+ \indexdef{}{ML type}{cterm}\verb|type cterm| \\
+ \indexdef{}{ML}{Thm.ctyp\_of}\verb|Thm.ctyp_of: theory -> typ -> ctyp| \\
+ \indexdef{}{ML}{Thm.cterm\_of}\verb|Thm.cterm_of: theory -> term -> cterm| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML type}{thm}\verb|type thm| \\
+ \indexdef{}{ML}{proofs}\verb|proofs: int ref| \\
+ \indexdef{}{ML}{Thm.assume}\verb|Thm.assume: cterm -> thm| \\
+ \indexdef{}{ML}{Thm.forall\_intr}\verb|Thm.forall_intr: cterm -> thm -> thm| \\
+ \indexdef{}{ML}{Thm.forall\_elim}\verb|Thm.forall_elim: cterm -> thm -> thm| \\
+ \indexdef{}{ML}{Thm.implies\_intr}\verb|Thm.implies_intr: cterm -> thm -> thm| \\
+ \indexdef{}{ML}{Thm.implies\_elim}\verb|Thm.implies_elim: thm -> thm -> thm| \\
+ \indexdef{}{ML}{Thm.generalize}\verb|Thm.generalize: string list * string list -> int -> thm -> thm| \\
+ \indexdef{}{ML}{Thm.instantiate}\verb|Thm.instantiate: (ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm| \\
+ \indexdef{}{ML}{Thm.axiom}\verb|Thm.axiom: theory -> string -> thm| \\
+ \indexdef{}{ML}{Thm.add\_oracle}\verb|Thm.add_oracle: bstring * ('a -> cterm) -> theory|\isasep\isanewline%
+\verb| -> (string * ('a -> thm)) * theory| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML}{Theory.add\_axioms\_i}\verb|Theory.add_axioms_i: (binding * term) list -> theory -> theory| \\
+ \indexdef{}{ML}{Theory.add\_deps}\verb|Theory.add_deps: string -> string * typ -> (string * typ) list -> theory -> theory| \\
+ \indexdef{}{ML}{Theory.add\_defs\_i}\verb|Theory.add_defs_i: bool -> bool -> (binding * term) list -> theory -> theory| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|ctyp| and \verb|cterm| represent certified types
+ and terms, respectively. These are abstract datatypes that
+ guarantee that its values have passed the full well-formedness (and
+ well-typedness) checks, relative to the declarations of type
+ constructors, constants etc. in the theory.
+
+ \item \verb|Thm.ctyp_of|~\isa{thy\ {\isasymtau}} and \verb|Thm.cterm_of|~\isa{thy\ t} explicitly checks types and terms,
+ respectively. This also involves some basic normalizations, such
+ expansion of type and term abbreviations from the theory context.
+
+ Re-certification is relatively slow and should be avoided in tight
+ reasoning loops. There are separate operations to decompose
+ certified entities (including actual theorems).
+
+ \item \verb|thm| represents proven propositions. This is an
+ abstract datatype that guarantees that its values have been
+ constructed by basic principles of the \verb|Thm| module.
+ Every \verb|thm| value contains a sliding back-reference to the
+ enclosing theory, cf.\ \secref{sec:context-theory}.
+
+ \item \verb|proofs| determines the detail of proof recording within
+ \verb|thm| values: \verb|0| records only the names of oracles,
+ \verb|1| records oracle names and propositions, \verb|2| additionally
+ records full proof terms. Officially named theorems that contribute
+ to a result are always recorded.
+
+ \item \verb|Thm.assume|, \verb|Thm.forall_intr|, \verb|Thm.forall_elim|, \verb|Thm.implies_intr|, and \verb|Thm.implies_elim|
+ correspond to the primitive inferences of \figref{fig:prim-rules}.
+
+ \item \verb|Thm.generalize|~\isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharcomma}\ \isactrlvec x{\isacharparenright}}
+ corresponds to the \isa{generalize} rules of
+ \figref{fig:subst-rules}. Here collections of type and term
+ variables are generalized simultaneously, specified by the given
+ basic names.
+
+ \item \verb|Thm.instantiate|~\isa{{\isacharparenleft}\isactrlvec {\isasymalpha}\isactrlisub s{\isacharcomma}\ \isactrlvec x\isactrlisub {\isasymtau}{\isacharparenright}} corresponds to the \isa{instantiate} rules
+ of \figref{fig:subst-rules}. Type variables are substituted before
+ term variables. Note that the types in \isa{\isactrlvec x\isactrlisub {\isasymtau}}
+ refer to the instantiated versions.
+
+ \item \verb|Thm.axiom|~\isa{thy\ name} retrieves a named
+ axiom, cf.\ \isa{axiom} in \figref{fig:prim-rules}.
+
+ \item \verb|Thm.add_oracle|~\isa{{\isacharparenleft}name{\isacharcomma}\ oracle{\isacharparenright}} produces a named
+ oracle rule, essentially generating arbitrary axioms on the fly,
+ cf.\ \isa{axiom} in \figref{fig:prim-rules}.
+
+ \item \verb|Theory.add_axioms_i|~\isa{{\isacharbrackleft}{\isacharparenleft}name{\isacharcomma}\ A{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} declares
+ arbitrary propositions as axioms.
+
+ \item \verb|Theory.add_deps|~\isa{name\ c\isactrlisub {\isasymtau}\ \isactrlvec d\isactrlisub {\isasymsigma}} declares dependencies of a named specification
+ for constant \isa{c\isactrlisub {\isasymtau}}, relative to existing
+ specifications for constants \isa{\isactrlvec d\isactrlisub {\isasymsigma}}.
+
+ \item \verb|Theory.add_defs_i|~\isa{unchecked\ overloaded\ {\isacharbrackleft}{\isacharparenleft}name{\isacharcomma}\ c\ \isactrlvec x\ {\isasymequiv}\ t{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} states a definitional axiom for an existing
+ constant \isa{c}. Dependencies are recorded (cf.\ \verb|Theory.add_deps|), unless the \isa{unchecked} option is set.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Auxiliary definitions%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Theory \isa{Pure} provides a few auxiliary definitions, see
+ \figref{fig:pure-aux}. These special constants are normally not
+ exposed to the user, but appear in internal encodings.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{ll}
+ \isa{conjunction\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & (infix \isa{{\isacharampersand}}) \\
+ \isa{{\isasymturnstile}\ A\ {\isacharampersand}\ B\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}C{\isachardot}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isacharparenright}} \\[1ex]
+ \isa{prop\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop} & (prefix \isa{{\isacharhash}}, suppressed) \\
+ \isa{{\isacharhash}A\ {\isasymequiv}\ A} \\[1ex]
+ \isa{term\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & (prefix \isa{TERM}) \\
+ \isa{term\ x\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}A{\isachardot}\ A\ {\isasymLongrightarrow}\ A{\isacharparenright}} \\[1ex]
+ \isa{TYPE\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself} & (prefix \isa{TYPE}) \\
+ \isa{{\isacharparenleft}unspecified{\isacharparenright}} \\
+ \end{tabular}
+ \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
+ \end{center}
+ \end{figure}
+
+ Derived conjunction rules include introduction \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isacharampersand}\ B}, and destructions \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ A} and \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ B}.
+ Conjunction allows to treat simultaneous assumptions and conclusions
+ uniformly. For example, multiple claims are intermediately
+ represented as explicit conjunction, but this is refined into
+ separate sub-goals before the user continues the proof; the final
+ result is projected into a list of theorems (cf.\
+ \secref{sec:tactical-goals}).
+
+ The \isa{prop} marker (\isa{{\isacharhash}}) makes arbitrarily complex
+ propositions appear as atomic, without changing the meaning: \isa{{\isasymGamma}\ {\isasymturnstile}\ A} and \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isacharhash}A} are interchangeable. See
+ \secref{sec:tactical-goals} for specific operations.
+
+ The \isa{term} marker turns any well-typed term into a derivable
+ proposition: \isa{{\isasymturnstile}\ TERM\ t} holds unconditionally. Although
+ this is logically vacuous, it allows to treat terms and proofs
+ uniformly, similar to a type-theoretic framework.
+
+ The \isa{TYPE} constructor is the canonical representative of
+ the unspecified type \isa{{\isasymalpha}\ itself}; it essentially injects the
+ language of types into that of terms. There is specific notation
+ \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} for \isa{TYPE\isactrlbsub {\isasymtau}\ itself\isactrlesub }.
+ Although being devoid of any particular meaning, the \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} accounts for the type \isa{{\isasymtau}} within the term
+ language. In particular, \isa{TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}} may be used as formal
+ argument in primitive definitions, in order to circumvent hidden
+ polymorphism (cf.\ \secref{sec:terms}). For example, \isa{c\ TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}\ {\isasymequiv}\ A{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} defines \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself\ {\isasymRightarrow}\ prop} in terms of
+ a proposition \isa{A} that depends on an additional type
+ argument, which is essentially a predicate on types.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{Conjunction.intr}\verb|Conjunction.intr: thm -> thm -> thm| \\
+ \indexdef{}{ML}{Conjunction.elim}\verb|Conjunction.elim: thm -> thm * thm| \\
+ \indexdef{}{ML}{Drule.mk\_term}\verb|Drule.mk_term: cterm -> thm| \\
+ \indexdef{}{ML}{Drule.dest\_term}\verb|Drule.dest_term: thm -> cterm| \\
+ \indexdef{}{ML}{Logic.mk\_type}\verb|Logic.mk_type: typ -> term| \\
+ \indexdef{}{ML}{Logic.dest\_type}\verb|Logic.dest_type: term -> typ| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Conjunction.intr| derives \isa{A\ {\isacharampersand}\ B} from \isa{A} and \isa{B}.
+
+ \item \verb|Conjunction.elim| derives \isa{A} and \isa{B}
+ from \isa{A\ {\isacharampersand}\ B}.
+
+ \item \verb|Drule.mk_term| derives \isa{TERM\ t}.
+
+ \item \verb|Drule.dest_term| recovers term \isa{t} from \isa{TERM\ t}.
+
+ \item \verb|Logic.mk_type|~\isa{{\isasymtau}} produces the term \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}}.
+
+ \item \verb|Logic.dest_type|~\isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} recovers the type
+ \isa{{\isasymtau}}.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Object-level rules \label{sec:obj-rules}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The primitive inferences covered so far mostly serve foundational
+ purposes. User-level reasoning usually works via object-level rules
+ that are represented as theorems of Pure. Composition of rules
+ involves \emph{backchaining}, \emph{higher-order unification} modulo
+ \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion of \isa{{\isasymlambda}}-terms, and so-called
+ \emph{lifting} of rules into a context of \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} connectives. Thus the full power of higher-order Natural
+ Deduction in Isabelle/Pure becomes readily available.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Hereditary Harrop Formulae%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The idea of object-level rules is to model Natural Deduction
+ inferences in the style of Gentzen \cite{Gentzen:1935}, but we allow
+ arbitrary nesting similar to \cite{extensions91}. The most basic
+ rule format is that of a \emph{Horn Clause}:
+ \[
+ \infer{\isa{A}}{\isa{A\isactrlsub {\isadigit{1}}} & \isa{{\isasymdots}} & \isa{A\isactrlsub n}}
+ \]
+ where \isa{A{\isacharcomma}\ A\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlsub n} are atomic propositions
+ of the framework, usually of the form \isa{Trueprop\ B}, where
+ \isa{B} is a (compound) object-level statement. This
+ object-level inference corresponds to an iterated implication in
+ Pure like this:
+ \[
+ \isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ A\isactrlsub n\ {\isasymLongrightarrow}\ A}
+ \]
+ As an example consider conjunction introduction: \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isasymand}\ B}. Any parameters occurring in such rule statements are
+ conceptionally treated as arbitrary:
+ \[
+ \isa{{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m{\isachardot}\ A\isactrlsub {\isadigit{1}}\ x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m\ {\isasymLongrightarrow}\ {\isasymdots}\ A\isactrlsub n\ x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m\ {\isasymLongrightarrow}\ A\ x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m}
+ \]
+
+ Nesting of rules means that the positions of \isa{A\isactrlsub i} may
+ again hold compound rules, not just atomic propositions.
+ Propositions of this format are called \emph{Hereditary Harrop
+ Formulae} in the literature \cite{Miller:1991}. Here we give an
+ inductive characterization as follows:
+
+ \medskip
+ \begin{tabular}{ll}
+ \isa{\isactrlbold x} & set of variables \\
+ \isa{\isactrlbold A} & set of atomic propositions \\
+ \isa{\isactrlbold H\ \ {\isacharequal}\ \ {\isasymAnd}\isactrlbold x\isactrlsup {\isacharasterisk}{\isachardot}\ \isactrlbold H\isactrlsup {\isacharasterisk}\ {\isasymLongrightarrow}\ \isactrlbold A} & set of Hereditary Harrop Formulas \\
+ \end{tabular}
+ \medskip
+
+ \noindent Thus we essentially impose nesting levels on propositions
+ formed from \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}}. At each level there is a
+ prefix of parameters and compound premises, concluding an atomic
+ proposition. Typical examples are \isa{{\isasymlongrightarrow}}-introduction \isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymlongrightarrow}\ B} or mathematical induction \isa{P\ {\isadigit{0}}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}n{\isachardot}\ P\ n\ {\isasymLongrightarrow}\ P\ {\isacharparenleft}Suc\ n{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ P\ n}. Even deeper nesting occurs in well-founded
+ induction \isa{{\isacharparenleft}{\isasymAnd}x{\isachardot}\ {\isacharparenleft}{\isasymAnd}y{\isachardot}\ y\ {\isasymprec}\ x\ {\isasymLongrightarrow}\ P\ y{\isacharparenright}\ {\isasymLongrightarrow}\ P\ x{\isacharparenright}\ {\isasymLongrightarrow}\ P\ x}, but this
+ already marks the limit of rule complexity seen in practice.
+
+ \medskip Regular user-level inferences in Isabelle/Pure always
+ maintain the following canonical form of results:
+
+ \begin{itemize}
+
+ \item Normalization by \isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}{\isacharparenright}\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ A\ {\isasymLongrightarrow}\ B\ x{\isacharparenright}},
+ which is a theorem of Pure, means that quantifiers are pushed in
+ front of implication at each level of nesting. The normal form is a
+ Hereditary Harrop Formula.
+
+ \item The outermost prefix of parameters is represented via
+ schematic variables: instead of \isa{{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ A\ \isactrlvec x} we have \isa{\isactrlvec H\ {\isacharquery}\isactrlvec x\ {\isasymLongrightarrow}\ A\ {\isacharquery}\isactrlvec x}.
+ Note that this representation looses information about the order of
+ parameters, and vacuous quantifiers vanish automatically.
+
+ \end{itemize}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{MetaSimplifier.norm\_hhf}\verb|MetaSimplifier.norm_hhf: thm -> thm| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|MetaSimplifier.norm_hhf|~\isa{thm} normalizes the given
+ theorem according to the canonical form specified above. This is
+ occasionally helpful to repair some low-level tools that do not
+ handle Hereditary Harrop Formulae properly.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Rule composition%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The rule calculus of Isabelle/Pure provides two main inferences:
+ \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} (i.e.\ back-chaining of rules) and
+ \hyperlink{inference.assumption}{\mbox{\isa{assumption}}} (i.e.\ closing a branch), both modulo
+ higher-order unification. There are also combined variants, notably
+ \hyperlink{inference.elim-resolution}{\mbox{\isa{elim{\isacharunderscore}resolution}}} and \hyperlink{inference.dest-resolution}{\mbox{\isa{dest{\isacharunderscore}resolution}}}.
+
+ To understand the all-important \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} principle,
+ we first consider raw \indexdef{}{inference}{composition}\hypertarget{inference.composition}{\hyperlink{inference.composition}{\mbox{\isa{composition}}}} (modulo
+ higher-order unification with substitution \isa{{\isasymvartheta}}):
+ \[
+ \infer[(\indexdef{}{inference}{composition}\hypertarget{inference.composition}{\hyperlink{inference.composition}{\mbox{\isa{composition}}}})]{\isa{\isactrlvec A{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}}}
+ {\isa{\isactrlvec A\ {\isasymLongrightarrow}\ B} & \isa{B{\isacharprime}\ {\isasymLongrightarrow}\ C} & \isa{B{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}}}
+ \]
+ Here the conclusion of the first rule is unified with the premise of
+ the second; the resulting rule instance inherits the premises of the
+ first and conclusion of the second. Note that \isa{C} can again
+ consist of iterated implications. We can also permute the premises
+ of the second rule back-and-forth in order to compose with \isa{B{\isacharprime}} in any position (subsequently we shall always refer to
+ position 1 w.l.o.g.).
+
+ In \hyperlink{inference.composition}{\mbox{\isa{composition}}} the internal structure of the common
+ part \isa{B} and \isa{B{\isacharprime}} is not taken into account. For
+ proper \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} we require \isa{B} to be atomic,
+ and explicitly observe the structure \isa{{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x} of the premise of the second rule. The
+ idea is to adapt the first rule by ``lifting'' it into this context,
+ by means of iterated application of the following inferences:
+ \[
+ \infer[(\indexdef{}{inference}{imp\_lift}\hypertarget{inference.imp-lift}{\hyperlink{inference.imp-lift}{\mbox{\isa{imp{\isacharunderscore}lift}}}})]{\isa{{\isacharparenleft}\isactrlvec H\ {\isasymLongrightarrow}\ \isactrlvec A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}\isactrlvec H\ {\isasymLongrightarrow}\ B{\isacharparenright}}}{\isa{\isactrlvec A\ {\isasymLongrightarrow}\ B}}
+ \]
+ \[
+ \infer[(\indexdef{}{inference}{all\_lift}\hypertarget{inference.all-lift}{\hyperlink{inference.all-lift}{\mbox{\isa{all{\isacharunderscore}lift}}}})]{\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}}}{\isa{\isactrlvec A\ {\isacharquery}\isactrlvec a\ {\isasymLongrightarrow}\ B\ {\isacharquery}\isactrlvec a}}
+ \]
+ By combining raw composition with lifting, we get full \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} as follows:
+ \[
+ \infer[(\indexdef{}{inference}{resolution}\hypertarget{inference.resolution}{\hyperlink{inference.resolution}{\mbox{\isa{resolution}}}})]
+ {\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ \isactrlvec A\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}}}
+ {\begin{tabular}{l}
+ \isa{\isactrlvec A\ {\isacharquery}\isactrlvec a\ {\isasymLongrightarrow}\ B\ {\isacharquery}\isactrlvec a} \\
+ \isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C} \\
+ \isa{{\isacharparenleft}{\isasymlambda}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}} \\
+ \end{tabular}}
+ \]
+
+ Continued resolution of rules allows to back-chain a problem towards
+ more and sub-problems. Branches are closed either by resolving with
+ a rule of 0 premises, or by producing a ``short-circuit'' within a
+ solved situation (again modulo unification):
+ \[
+ \infer[(\indexdef{}{inference}{assumption}\hypertarget{inference.assumption}{\hyperlink{inference.assumption}{\mbox{\isa{assumption}}}})]{\isa{C{\isasymvartheta}}}
+ {\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ A\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C} & \isa{A{\isasymvartheta}\ {\isacharequal}\ H\isactrlsub i{\isasymvartheta}}~~\text{(for some~\isa{i})}}
+ \]
+
+ FIXME \indexdef{}{inference}{elim\_resolution}\hypertarget{inference.elim-resolution}{\hyperlink{inference.elim-resolution}{\mbox{\isa{elim{\isacharunderscore}resolution}}}}, \indexdef{}{inference}{dest\_resolution}\hypertarget{inference.dest-resolution}{\hyperlink{inference.dest-resolution}{\mbox{\isa{dest{\isacharunderscore}resolution}}}}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{op RS}\verb|op RS: thm * thm -> thm| \\
+ \indexdef{}{ML}{op OF}\verb|op OF: thm * thm list -> thm| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \isa{rule\isactrlsub {\isadigit{1}}\ RS\ rule\isactrlsub {\isadigit{2}}} resolves \isa{rule\isactrlsub {\isadigit{1}}} with \isa{rule\isactrlsub {\isadigit{2}}} according to the
+ \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} principle explained above. Note that the
+ corresponding attribute in the Isar language is called \hyperlink{attribute.THEN}{\mbox{\isa{THEN}}}.
+
+ \item \isa{rule\ OF\ rules} resolves a list of rules with the
+ first rule, addressing its premises \isa{{\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ length\ rules}
+ (operating from last to first). This means the newly emerging
+ premises are all concatenated, without interfering. Also note that
+ compared to \isa{RS}, the rule argument order is swapped: \isa{rule\isactrlsub {\isadigit{1}}\ RS\ rule\isactrlsub {\isadigit{2}}\ {\isacharequal}\ rule\isactrlsub {\isadigit{2}}\ OF\ {\isacharbrackleft}rule\isactrlsub {\isadigit{1}}{\isacharbrackright}}.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- a/doc-src/IsarImplementation/Thy/document/ML.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/document/ML.tex Fri Feb 27 18:50:35 2009 +0100
@@ -3,14 +3,14 @@
\def\isabellecontext{ML}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
\isatagtheory
\isacommand{theory}\isamarkupfalse%
-\ {\isachardoublequoteopen}ML{\isachardoublequoteclose}\ \isakeyword{imports}\ base\ \isakeyword{begin}%
+\ {\isachardoublequoteopen}ML{\isachardoublequoteclose}\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
\endisatagtheory
{\isafoldtheory}%
%
@@ -275,9 +275,9 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{NAMED\_CRITICAL}\verb|NAMED_CRITICAL: string -> (unit -> 'a) -> 'a| \\
- \indexml{CRITICAL}\verb|CRITICAL: (unit -> 'a) -> 'a| \\
- \indexml{setmp}\verb|setmp: 'a ref -> 'a -> ('b -> 'c) -> 'b -> 'c| \\
+ \indexdef{}{ML}{NAMED\_CRITICAL}\verb|NAMED_CRITICAL: string -> (unit -> 'a) -> 'a| \\
+ \indexdef{}{ML}{CRITICAL}\verb|CRITICAL: (unit -> 'a) -> 'a| \\
+ \indexdef{}{ML}{setmp}\verb|setmp: 'a ref -> 'a -> ('b -> 'c) -> 'b -> 'c| \\
\end{mldecls}
\begin{description}
@@ -331,7 +331,7 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op |$>$ }\verb|op |\verb,|,\verb|> : 'a * ('a -> 'b) -> 'b| \\
+ \indexdef{}{ML}{op $\mid$$>$ }\verb|op |\verb,|,\verb|> : 'a * ('a -> 'b) -> 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -410,10 +410,10 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op |-$>$ }\verb|op |\verb,|,\verb|-> : ('c * 'a) * ('c -> 'a -> 'b) -> 'b| \\
- \indexml{op |$>$$>$ }\verb|op |\verb,|,\verb|>> : ('a * 'c) * ('a -> 'b) -> 'b * 'c| \\
- \indexml{op ||$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|> : ('c * 'a) * ('a -> 'b) -> 'c * 'b| \\
- \indexml{op ||$>$$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|>> : ('c * 'a) * ('a -> 'd * 'b) -> ('c * 'd) * 'b| \\
+ \indexdef{}{ML}{op $\mid$-$>$ }\verb|op |\verb,|,\verb|-> : ('c * 'a) * ('c -> 'a -> 'b) -> 'b| \\
+ \indexdef{}{ML}{op $\mid$$>$$>$ }\verb|op |\verb,|,\verb|>> : ('a * 'c) * ('a -> 'b) -> 'b * 'c| \\
+ \indexdef{}{ML}{op $\mid$$\mid$$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|> : ('c * 'a) * ('a -> 'b) -> 'c * 'b| \\
+ \indexdef{}{ML}{op $\mid$$\mid$$>$$>$ }\verb|op |\verb,|,\verb||\verb,|,\verb|>> : ('c * 'a) * ('a -> 'd * 'b) -> ('c * 'd) * 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -483,8 +483,8 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{fold}\verb|fold: ('a -> 'b -> 'b) -> 'a list -> 'b -> 'b| \\
- \indexml{fold\_map}\verb|fold_map: ('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b| \\
+ \indexdef{}{ML}{fold}\verb|fold: ('a -> 'b -> 'b) -> 'a list -> 'b -> 'b| \\
+ \indexdef{}{ML}{fold\_map}\verb|fold_map: ('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -545,11 +545,11 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op \#$>$ }\verb|op #> : ('a -> 'b) * ('b -> 'c) -> 'a -> 'c| \\
- \indexml{op \#-$>$ }\verb|op #-> : ('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd| \\
- \indexml{op \#$>$$>$ }\verb|op #>> : ('a -> 'c * 'b) * ('c -> 'd) -> 'a -> 'd * 'b| \\
- \indexml{op \#\#$>$ }\verb|op ##> : ('a -> 'c * 'b) * ('b -> 'd) -> 'a -> 'c * 'd| \\
- \indexml{op \#\#$>$$>$ }\verb|op ##>> : ('a -> 'c * 'b) * ('b -> 'e * 'd) -> 'a -> ('c * 'e) * 'd| \\
+ \indexdef{}{ML}{op \#$>$ }\verb|op #> : ('a -> 'b) * ('b -> 'c) -> 'a -> 'c| \\
+ \indexdef{}{ML}{op \#-$>$ }\verb|op #-> : ('a -> 'c * 'b) * ('c -> 'b -> 'd) -> 'a -> 'd| \\
+ \indexdef{}{ML}{op \#$>$$>$ }\verb|op #>> : ('a -> 'c * 'b) * ('c -> 'd) -> 'a -> 'd * 'b| \\
+ \indexdef{}{ML}{op \#\#$>$ }\verb|op ##> : ('a -> 'c * 'b) * ('b -> 'd) -> 'a -> 'c * 'd| \\
+ \indexdef{}{ML}{op \#\#$>$$>$ }\verb|op ##>> : ('a -> 'c * 'b) * ('b -> 'e * 'd) -> 'a -> ('c * 'e) * 'd| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -576,8 +576,8 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{op ` }\verb|op ` : ('b -> 'a) -> 'b -> 'a * 'b| \\
- \indexml{tap}\verb|tap: ('b -> 'a) -> 'b -> 'b| \\
+ \indexdef{}{ML}{op ` }\verb|op ` : ('b -> 'a) -> 'b -> 'a * 'b| \\
+ \indexdef{}{ML}{tap}\verb|tap: ('b -> 'a) -> 'b -> 'b| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -619,14 +619,14 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{is\_some}\verb|is_some: 'a option -> bool| \\
- \indexml{is\_none}\verb|is_none: 'a option -> bool| \\
- \indexml{the}\verb|the: 'a option -> 'a| \\
- \indexml{these}\verb|these: 'a list option -> 'a list| \\
- \indexml{the\_list}\verb|the_list: 'a option -> 'a list| \\
- \indexml{the\_default}\verb|the_default: 'a -> 'a option -> 'a| \\
- \indexml{try}\verb|try: ('a -> 'b) -> 'a -> 'b option| \\
- \indexml{can}\verb|can: ('a -> 'b) -> 'a -> bool| \\
+ \indexdef{}{ML}{is\_some}\verb|is_some: 'a option -> bool| \\
+ \indexdef{}{ML}{is\_none}\verb|is_none: 'a option -> bool| \\
+ \indexdef{}{ML}{the}\verb|the: 'a option -> 'a| \\
+ \indexdef{}{ML}{these}\verb|these: 'a list option -> 'a list| \\
+ \indexdef{}{ML}{the\_list}\verb|the_list: 'a option -> 'a list| \\
+ \indexdef{}{ML}{the\_default}\verb|the_default: 'a -> 'a option -> 'a| \\
+ \indexdef{}{ML}{try}\verb|try: ('a -> 'b) -> 'a -> 'b option| \\
+ \indexdef{}{ML}{can}\verb|can: ('a -> 'b) -> 'a -> bool| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -659,10 +659,10 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{member}\verb|member: ('b * 'a -> bool) -> 'a list -> 'b -> bool| \\
- \indexml{insert}\verb|insert: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list| \\
- \indexml{remove}\verb|remove: ('b * 'a -> bool) -> 'b -> 'a list -> 'a list| \\
- \indexml{merge}\verb|merge: ('a * 'a -> bool) -> 'a list * 'a list -> 'a list| \\
+ \indexdef{}{ML}{member}\verb|member: ('b * 'a -> bool) -> 'a list -> 'b -> bool| \\
+ \indexdef{}{ML}{insert}\verb|insert: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list| \\
+ \indexdef{}{ML}{remove}\verb|remove: ('b * 'a -> bool) -> 'b -> 'a list -> 'a list| \\
+ \indexdef{}{ML}{merge}\verb|merge: ('a * 'a -> bool) -> 'a list * 'a list -> 'a list| \\
\end{mldecls}%
\end{isamarkuptext}%
\isamarkuptrue%
@@ -690,19 +690,19 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexmlexception{AList.DUP}\verb|exception AList.DUP| \\
- \indexml{AList.lookup}\verb|AList.lookup: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option| \\
- \indexml{AList.defined}\verb|AList.defined: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool| \\
- \indexml{AList.update}\verb|AList.update: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
- \indexml{AList.default}\verb|AList.default: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
- \indexml{AList.delete}\verb|AList.delete: ('a * 'b -> bool) -> 'a -> ('b * 'c) list -> ('b * 'c) list| \\
- \indexml{AList.map\_entry}\verb|AList.map_entry: ('a * 'b -> bool) -> 'a|\isasep\isanewline%
+ \indexdef{}{ML exception}{AList.DUP}\verb|exception AList.DUP| \\
+ \indexdef{}{ML}{AList.lookup}\verb|AList.lookup: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> 'c option| \\
+ \indexdef{}{ML}{AList.defined}\verb|AList.defined: ('a * 'b -> bool) -> ('b * 'c) list -> 'a -> bool| \\
+ \indexdef{}{ML}{AList.update}\verb|AList.update: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
+ \indexdef{}{ML}{AList.default}\verb|AList.default: ('a * 'a -> bool) -> ('a * 'b) -> ('a * 'b) list -> ('a * 'b) list| \\
+ \indexdef{}{ML}{AList.delete}\verb|AList.delete: ('a * 'b -> bool) -> 'a -> ('b * 'c) list -> ('b * 'c) list| \\
+ \indexdef{}{ML}{AList.map\_entry}\verb|AList.map_entry: ('a * 'b -> bool) -> 'a|\isasep\isanewline%
\verb| -> ('c -> 'c) -> ('b * 'c) list -> ('b * 'c) list| \\
- \indexml{AList.map\_default}\verb|AList.map_default: ('a * 'a -> bool) -> 'a * 'b -> ('b -> 'b)|\isasep\isanewline%
+ \indexdef{}{ML}{AList.map\_default}\verb|AList.map_default: ('a * 'a -> bool) -> 'a * 'b -> ('b -> 'b)|\isasep\isanewline%
\verb| -> ('a * 'b) list -> ('a * 'b) list| \\
- \indexml{AList.join}\verb|AList.join: ('a * 'a -> bool) -> ('a -> 'b * 'b -> 'b) (*exception DUP*)|\isasep\isanewline%
+ \indexdef{}{ML}{AList.join}\verb|AList.join: ('a * 'a -> bool) -> ('a -> 'b * 'b -> 'b) (*exception DUP*)|\isasep\isanewline%
\verb| -> ('a * 'b) list * ('a * 'b) list -> ('a * 'b) list (*exception AList.DUP*)| \\
- \indexml{AList.merge}\verb|AList.merge: ('a * 'a -> bool) -> ('b * 'b -> bool)|\isasep\isanewline%
+ \indexdef{}{ML}{AList.merge}\verb|AList.merge: ('a * 'a -> bool) -> ('b * 'b -> bool)|\isasep\isanewline%
\verb| -> ('a * 'b) list * ('a * 'b) list -> ('a * 'b) list (*exception AList.DUP*)|
\end{mldecls}%
\end{isamarkuptext}%
@@ -732,25 +732,25 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexmltype{'a Symtab.table}\verb|type 'a Symtab.table| \\
- \indexmlexception{Symtab.DUP}\verb|exception Symtab.DUP of string| \\
- \indexmlexception{Symtab.SAME}\verb|exception Symtab.SAME| \\
- \indexmlexception{Symtab.UNDEF}\verb|exception Symtab.UNDEF of string| \\
- \indexml{Symtab.empty}\verb|Symtab.empty: 'a Symtab.table| \\
- \indexml{Symtab.lookup}\verb|Symtab.lookup: 'a Symtab.table -> string -> 'a option| \\
- \indexml{Symtab.defined}\verb|Symtab.defined: 'a Symtab.table -> string -> bool| \\
- \indexml{Symtab.update}\verb|Symtab.update: (string * 'a) -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.default}\verb|Symtab.default: string * 'a -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.delete}\verb|Symtab.delete: string|\isasep\isanewline%
+ \indexdef{}{ML type}{'a Symtab.table}\verb|type 'a Symtab.table| \\
+ \indexdef{}{ML exception}{Symtab.DUP}\verb|exception Symtab.DUP of string| \\
+ \indexdef{}{ML exception}{Symtab.SAME}\verb|exception Symtab.SAME| \\
+ \indexdef{}{ML exception}{Symtab.UNDEF}\verb|exception Symtab.UNDEF of string| \\
+ \indexdef{}{ML}{Symtab.empty}\verb|Symtab.empty: 'a Symtab.table| \\
+ \indexdef{}{ML}{Symtab.lookup}\verb|Symtab.lookup: 'a Symtab.table -> string -> 'a option| \\
+ \indexdef{}{ML}{Symtab.defined}\verb|Symtab.defined: 'a Symtab.table -> string -> bool| \\
+ \indexdef{}{ML}{Symtab.update}\verb|Symtab.update: (string * 'a) -> 'a Symtab.table -> 'a Symtab.table| \\
+ \indexdef{}{ML}{Symtab.default}\verb|Symtab.default: string * 'a -> 'a Symtab.table -> 'a Symtab.table| \\
+ \indexdef{}{ML}{Symtab.delete}\verb|Symtab.delete: string|\isasep\isanewline%
\verb| -> 'a Symtab.table -> 'a Symtab.table (*exception Symtab.UNDEF*)| \\
- \indexml{Symtab.map\_entry}\verb|Symtab.map_entry: string -> ('a -> 'a)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.map\_entry}\verb|Symtab.map_entry: string -> ('a -> 'a)|\isasep\isanewline%
\verb| -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.map\_default}\verb|Symtab.map_default: (string * 'a) -> ('a -> 'a)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.map\_default}\verb|Symtab.map_default: (string * 'a) -> ('a -> 'a)|\isasep\isanewline%
\verb| -> 'a Symtab.table -> 'a Symtab.table| \\
- \indexml{Symtab.join}\verb|Symtab.join: (string -> 'a * 'a -> 'a) (*exception Symtab.DUP/Symtab.SAME*)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.join}\verb|Symtab.join: (string -> 'a * 'a -> 'a) (*exception Symtab.DUP/Symtab.SAME*)|\isasep\isanewline%
\verb| -> 'a Symtab.table * 'a Symtab.table|\isasep\isanewline%
\verb| -> 'a Symtab.table (*exception Symtab.DUP*)| \\
- \indexml{Symtab.merge}\verb|Symtab.merge: ('a * 'a -> bool)|\isasep\isanewline%
+ \indexdef{}{ML}{Symtab.merge}\verb|Symtab.merge: ('a * 'a -> bool)|\isasep\isanewline%
\verb| -> 'a Symtab.table * 'a Symtab.table|\isasep\isanewline%
\verb| -> 'a Symtab.table (*exception Symtab.DUP*)|
\end{mldecls}%
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Prelim.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,896 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Prelim}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Prelim\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Preliminaries%
+}
+\isamarkuptrue%
+%
+\isamarkupsection{Contexts \label{sec:context}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A logical context represents the background that is required for
+ formulating statements and composing proofs. It acts as a medium to
+ produce formal content, depending on earlier material (declarations,
+ results etc.).
+
+ For example, derivations within the Isabelle/Pure logic can be
+ described as a judgment \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}}, which means that a
+ proposition \isa{{\isasymphi}} is derivable from hypotheses \isa{{\isasymGamma}}
+ within the theory \isa{{\isasymTheta}}. There are logical reasons for
+ keeping \isa{{\isasymTheta}} and \isa{{\isasymGamma}} separate: theories can be
+ liberal about supporting type constructors and schematic
+ polymorphism of constants and axioms, while the inner calculus of
+ \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}} is strictly limited to Simple Type Theory (with
+ fixed type variables in the assumptions).
+
+ \medskip Contexts and derivations are linked by the following key
+ principles:
+
+ \begin{itemize}
+
+ \item Transfer: monotonicity of derivations admits results to be
+ transferred into a \emph{larger} context, i.e.\ \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}} implies \isa{{\isasymGamma}{\isacharprime}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\isactrlsub {\isacharprime}\ {\isasymphi}} for contexts \isa{{\isasymTheta}{\isacharprime}\ {\isasymsupseteq}\ {\isasymTheta}} and \isa{{\isasymGamma}{\isacharprime}\ {\isasymsupseteq}\ {\isasymGamma}}.
+
+ \item Export: discharge of hypotheses admits results to be exported
+ into a \emph{smaller} context, i.e.\ \isa{{\isasymGamma}{\isacharprime}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}}
+ implies \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymDelta}\ {\isasymLongrightarrow}\ {\isasymphi}} where \isa{{\isasymGamma}{\isacharprime}\ {\isasymsupseteq}\ {\isasymGamma}} and
+ \isa{{\isasymDelta}\ {\isacharequal}\ {\isasymGamma}{\isacharprime}\ {\isacharminus}\ {\isasymGamma}}. Note that \isa{{\isasymTheta}} remains unchanged here,
+ only the \isa{{\isasymGamma}} part is affected.
+
+ \end{itemize}
+
+ \medskip By modeling the main characteristics of the primitive
+ \isa{{\isasymTheta}} and \isa{{\isasymGamma}} above, and abstracting over any
+ particular logical content, we arrive at the fundamental notions of
+ \emph{theory context} and \emph{proof context} in Isabelle/Isar.
+ These implement a certain policy to manage arbitrary \emph{context
+ data}. There is a strongly-typed mechanism to declare new kinds of
+ data at compile time.
+
+ The internal bootstrap process of Isabelle/Pure eventually reaches a
+ stage where certain data slots provide the logical content of \isa{{\isasymTheta}} and \isa{{\isasymGamma}} sketched above, but this does not stop there!
+ Various additional data slots support all kinds of mechanisms that
+ are not necessarily part of the core logic.
+
+ For example, there would be data for canonical introduction and
+ elimination rules for arbitrary operators (depending on the
+ object-logic and application), which enables users to perform
+ standard proof steps implicitly (cf.\ the \isa{rule} method
+ \cite{isabelle-isar-ref}).
+
+ \medskip Thus Isabelle/Isar is able to bring forth more and more
+ concepts successively. In particular, an object-logic like
+ Isabelle/HOL continues the Isabelle/Pure setup by adding specific
+ components for automated reasoning (classical reasoner, tableau
+ prover, structured induction etc.) and derived specification
+ mechanisms (inductive predicates, recursive functions etc.). All of
+ this is ultimately based on the generic data management by theory
+ and proof contexts introduced here.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Theory context \label{sec:context-theory}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{theory} is a data container with explicit name and unique
+ identifier. Theories are related by a (nominal) sub-theory
+ relation, which corresponds to the dependency graph of the original
+ construction; each theory is derived from a certain sub-graph of
+ ancestor theories.
+
+ The \isa{merge} operation produces the least upper bound of two
+ theories, which actually degenerates into absorption of one theory
+ into the other (due to the nominal sub-theory relation).
+
+ The \isa{begin} operation starts a new theory by importing
+ several parent theories and entering a special \isa{draft} mode,
+ which is sustained until the final \isa{end} operation. A draft
+ theory acts like a linear type, where updates invalidate earlier
+ versions. An invalidated draft is called ``stale''.
+
+ The \isa{checkpoint} operation produces an intermediate stepping
+ stone that will survive the next update: both the original and the
+ changed theory remain valid and are related by the sub-theory
+ relation. Checkpointing essentially recovers purely functional
+ theory values, at the expense of some extra internal bookkeeping.
+
+ The \isa{copy} operation produces an auxiliary version that has
+ the same data content, but is unrelated to the original: updates of
+ the copy do not affect the original, neither does the sub-theory
+ relation hold.
+
+ \medskip The example in \figref{fig:ex-theory} below shows a theory
+ graph derived from \isa{Pure}, with theory \isa{Length}
+ importing \isa{Nat} and \isa{List}. The body of \isa{Length} consists of a sequence of updates, working mostly on
+ drafts. Intermediate checkpoints may occur as well, due to the
+ history mechanism provided by the Isar top-level, cf.\
+ \secref{sec:isar-toplevel}.
+
+ \begin{figure}[htb]
+ \begin{center}
+ \begin{tabular}{rcccl}
+ & & \isa{Pure} \\
+ & & \isa{{\isasymdown}} \\
+ & & \isa{FOL} \\
+ & $\swarrow$ & & $\searrow$ & \\
+ \isa{Nat} & & & & \isa{List} \\
+ & $\searrow$ & & $\swarrow$ \\
+ & & \isa{Length} \\
+ & & \multicolumn{3}{l}{~~\hyperlink{keyword.imports}{\mbox{\isa{\isakeyword{imports}}}}} \\
+ & & \multicolumn{3}{l}{~~\hyperlink{keyword.begin}{\mbox{\isa{\isakeyword{begin}}}}} \\
+ & & $\vdots$~~ \\
+ & & \isa{{\isasymbullet}}~~ \\
+ & & $\vdots$~~ \\
+ & & \isa{{\isasymbullet}}~~ \\
+ & & $\vdots$~~ \\
+ & & \multicolumn{3}{l}{~~\hyperlink{command.end}{\mbox{\isa{\isacommand{end}}}}} \\
+ \end{tabular}
+ \caption{A theory definition depending on ancestors}\label{fig:ex-theory}
+ \end{center}
+ \end{figure}
+
+ \medskip There is a separate notion of \emph{theory reference} for
+ maintaining a live link to an evolving theory context: updates on
+ drafts are propagated automatically. Dynamic updating stops after
+ an explicit \isa{end} only.
+
+ Derived entities may store a theory reference in order to indicate
+ the context they belong to. This implicitly assumes monotonic
+ reasoning, because the referenced context may become larger without
+ further notice.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{theory}\verb|type theory| \\
+ \indexdef{}{ML}{Theory.subthy}\verb|Theory.subthy: theory * theory -> bool| \\
+ \indexdef{}{ML}{Theory.merge}\verb|Theory.merge: theory * theory -> theory| \\
+ \indexdef{}{ML}{Theory.checkpoint}\verb|Theory.checkpoint: theory -> theory| \\
+ \indexdef{}{ML}{Theory.copy}\verb|Theory.copy: theory -> theory| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML type}{theory\_ref}\verb|type theory_ref| \\
+ \indexdef{}{ML}{Theory.deref}\verb|Theory.deref: theory_ref -> theory| \\
+ \indexdef{}{ML}{Theory.check\_thy}\verb|Theory.check_thy: theory -> theory_ref| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|theory| represents theory contexts. This is
+ essentially a linear type! Most operations destroy the original
+ version, which then becomes ``stale''.
+
+ \item \verb|Theory.subthy|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}}
+ compares theories according to the inherent graph structure of the
+ construction. This sub-theory relation is a nominal approximation
+ of inclusion (\isa{{\isasymsubseteq}}) of the corresponding content.
+
+ \item \verb|Theory.merge|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}}
+ absorbs one theory into the other. This fails for unrelated
+ theories!
+
+ \item \verb|Theory.checkpoint|~\isa{thy} produces a safe
+ stepping stone in the linear development of \isa{thy}. The next
+ update will result in two related, valid theories.
+
+ \item \verb|Theory.copy|~\isa{thy} produces a variant of \isa{thy} that holds a copy of the same data. The result is not
+ related to the original; the original is unchanged.
+
+ \item \verb|theory_ref| represents a sliding reference to an
+ always valid theory; updates on the original are propagated
+ automatically.
+
+ \item \verb|Theory.deref|~\isa{thy{\isacharunderscore}ref} turns a \verb|theory_ref| into an \verb|theory| value. As the referenced
+ theory evolves monotonically over time, later invocations of \verb|Theory.deref| may refer to a larger context.
+
+ \item \verb|Theory.check_thy|~\isa{thy} produces a \verb|theory_ref| from a valid \verb|theory| value.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Proof context \label{sec:context-proof}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A proof context is a container for pure data with a back-reference
+ to the theory it belongs to. The \isa{init} operation creates a
+ proof context from a given theory. Modifications to draft theories
+ are propagated to the proof context as usual, but there is also an
+ explicit \isa{transfer} operation to force resynchronization
+ with more substantial updates to the underlying theory. The actual
+ context data does not require any special bookkeeping, thanks to the
+ lack of destructive features.
+
+ Entities derived in a proof context need to record inherent logical
+ requirements explicitly, since there is no separate context
+ identification as for theories. For example, hypotheses used in
+ primitive derivations (cf.\ \secref{sec:thms}) are recorded
+ separately within the sequent \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}}, just to make double
+ sure. Results could still leak into an alien proof context due to
+ programming errors, but Isabelle/Isar includes some extra validity
+ checks in critical positions, notably at the end of a sub-proof.
+
+ Proof contexts may be manipulated arbitrarily, although the common
+ discipline is to follow block structure as a mental model: a given
+ context is extended consecutively, and results are exported back
+ into the original context. Note that the Isar proof states model
+ block-structured reasoning explicitly, using a stack of proof
+ contexts internally.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{Proof.context}\verb|type Proof.context| \\
+ \indexdef{}{ML}{ProofContext.init}\verb|ProofContext.init: theory -> Proof.context| \\
+ \indexdef{}{ML}{ProofContext.theory\_of}\verb|ProofContext.theory_of: Proof.context -> theory| \\
+ \indexdef{}{ML}{ProofContext.transfer}\verb|ProofContext.transfer: theory -> Proof.context -> Proof.context| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Proof.context| represents proof contexts. Elements
+ of this type are essentially pure values, with a sliding reference
+ to the background theory.
+
+ \item \verb|ProofContext.init|~\isa{thy} produces a proof context
+ derived from \isa{thy}, initializing all data.
+
+ \item \verb|ProofContext.theory_of|~\isa{ctxt} selects the
+ background theory from \isa{ctxt}, dereferencing its internal
+ \verb|theory_ref|.
+
+ \item \verb|ProofContext.transfer|~\isa{thy\ ctxt} promotes the
+ background theory of \isa{ctxt} to the super theory \isa{thy}.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Generic contexts \label{sec:generic-context}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A generic context is the disjoint sum of either a theory or proof
+ context. Occasionally, this enables uniform treatment of generic
+ context data, typically extra-logical information. Operations on
+ generic contexts include the usual injections, partial selections,
+ and combinators for lifting operations on either component of the
+ disjoint sum.
+
+ Moreover, there are total operations \isa{theory{\isacharunderscore}of} and \isa{proof{\isacharunderscore}of} to convert a generic context into either kind: a theory
+ can always be selected from the sum, while a proof context might
+ have to be constructed by an ad-hoc \isa{init} operation.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{Context.generic}\verb|type Context.generic| \\
+ \indexdef{}{ML}{Context.theory\_of}\verb|Context.theory_of: Context.generic -> theory| \\
+ \indexdef{}{ML}{Context.proof\_of}\verb|Context.proof_of: Context.generic -> Proof.context| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Context.generic| is the direct sum of \verb|theory| and \verb|Proof.context|, with the datatype
+ constructors \verb|Context.Theory| and \verb|Context.Proof|.
+
+ \item \verb|Context.theory_of|~\isa{context} always produces a
+ theory from the generic \isa{context}, using \verb|ProofContext.theory_of| as required.
+
+ \item \verb|Context.proof_of|~\isa{context} always produces a
+ proof context from the generic \isa{context}, using \verb|ProofContext.init| as required (note that this re-initializes the
+ context data with each invocation).
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Context data \label{sec:context-data}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The main purpose of theory and proof contexts is to manage arbitrary
+ data. New data types can be declared incrementally at compile time.
+ There are separate declaration mechanisms for any of the three kinds
+ of contexts: theory, proof, generic.
+
+ \paragraph{Theory data} may refer to destructive entities, which are
+ maintained in direct correspondence to the linear evolution of
+ theory values, including explicit copies.\footnote{Most existing
+ instances of destructive theory data are merely historical relics
+ (e.g.\ the destructive theorem storage, and destructive hints for
+ the Simplifier and Classical rules).} A theory data declaration
+ needs to implement the following SML signature:
+
+ \medskip
+ \begin{tabular}{ll}
+ \isa{{\isasymtype}\ T} & representing type \\
+ \isa{{\isasymval}\ empty{\isacharcolon}\ T} & empty default value \\
+ \isa{{\isasymval}\ copy{\isacharcolon}\ T\ {\isasymrightarrow}\ T} & refresh impure data \\
+ \isa{{\isasymval}\ extend{\isacharcolon}\ T\ {\isasymrightarrow}\ T} & re-initialize on import \\
+ \isa{{\isasymval}\ merge{\isacharcolon}\ T\ {\isasymtimes}\ T\ {\isasymrightarrow}\ T} & join on import \\
+ \end{tabular}
+ \medskip
+
+ \noindent The \isa{empty} value acts as initial default for
+ \emph{any} theory that does not declare actual data content; \isa{copy} maintains persistent integrity for impure data, it is just
+ the identity for pure values; \isa{extend} is acts like a
+ unitary version of \isa{merge}, both operations should also
+ include the functionality of \isa{copy} for impure data.
+
+ \paragraph{Proof context data} is purely functional. A declaration
+ needs to implement the following SML signature:
+
+ \medskip
+ \begin{tabular}{ll}
+ \isa{{\isasymtype}\ T} & representing type \\
+ \isa{{\isasymval}\ init{\isacharcolon}\ theory\ {\isasymrightarrow}\ T} & produce initial value \\
+ \end{tabular}
+ \medskip
+
+ \noindent The \isa{init} operation is supposed to produce a pure
+ value from the given background theory.
+
+ \paragraph{Generic data} provides a hybrid interface for both theory
+ and proof data. The declaration is essentially the same as for
+ (pure) theory data, without \isa{copy}. The \isa{init}
+ operation for proof contexts merely selects the current data value
+ from the background theory.
+
+ \bigskip A data declaration of type \isa{T} results in the
+ following interface:
+
+ \medskip
+ \begin{tabular}{ll}
+ \isa{init{\isacharcolon}\ theory\ {\isasymrightarrow}\ T} \\
+ \isa{get{\isacharcolon}\ context\ {\isasymrightarrow}\ T} \\
+ \isa{put{\isacharcolon}\ T\ {\isasymrightarrow}\ context\ {\isasymrightarrow}\ context} \\
+ \isa{map{\isacharcolon}\ {\isacharparenleft}T\ {\isasymrightarrow}\ T{\isacharparenright}\ {\isasymrightarrow}\ context\ {\isasymrightarrow}\ context} \\
+ \end{tabular}
+ \medskip
+
+ \noindent Here \isa{init} is only applicable to impure theory
+ data to install a fresh copy persistently (destructive update on
+ uninitialized has no permanent effect). The other operations provide
+ access for the particular kind of context (theory, proof, or generic
+ context). Note that this is a safe interface: there is no other way
+ to access the corresponding data slot of a context. By keeping
+ these operations private, a component may maintain abstract values
+ authentically, without other components interfering.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML functor}{TheoryDataFun}\verb|functor TheoryDataFun| \\
+ \indexdef{}{ML functor}{ProofDataFun}\verb|functor ProofDataFun| \\
+ \indexdef{}{ML functor}{GenericDataFun}\verb|functor GenericDataFun| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|TheoryDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} declares data for
+ type \verb|theory| according to the specification provided as
+ argument structure. The resulting structure provides data init and
+ access operations as described above.
+
+ \item \verb|ProofDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} is analogous to
+ \verb|TheoryDataFun| for type \verb|Proof.context|.
+
+ \item \verb|GenericDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} is analogous to
+ \verb|TheoryDataFun| for type \verb|Context.generic|.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Names \label{sec:names}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+In principle, a name is just a string, but there are various
+ convention for encoding additional structure. For example, ``\isa{Foo{\isachardot}bar{\isachardot}baz}'' is considered as a qualified name consisting of
+ three basic name components. The individual constituents of a name
+ may have further substructure, e.g.\ the string
+ ``\verb,\,\verb,<alpha>,'' encodes as a single symbol.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Strings of symbols%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{symbol} constitutes the smallest textual unit in Isabelle
+ --- raw characters are normally not encountered at all. Isabelle
+ strings consist of a sequence of symbols, represented as a packed
+ string or a list of strings. Each symbol is in itself a small
+ string, which has either one of the following forms:
+
+ \begin{enumerate}
+
+ \item a single ASCII character ``\isa{c}'', for example
+ ``\verb,a,'',
+
+ \item a regular symbol ``\verb,\,\verb,<,\isa{ident}\verb,>,'',
+ for example ``\verb,\,\verb,<alpha>,'',
+
+ \item a control symbol ``\verb,\,\verb,<^,\isa{ident}\verb,>,'',
+ for example ``\verb,\,\verb,<^bold>,'',
+
+ \item a raw symbol ``\verb,\,\verb,<^raw:,\isa{text}\verb,>,''
+ where \isa{text} constists of printable characters excluding
+ ``\verb,.,'' and ``\verb,>,'', for example
+ ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
+
+ \item a numbered raw control symbol ``\verb,\,\verb,<^raw,\isa{n}\verb,>, where \isa{n} consists of digits, for example
+ ``\verb,\,\verb,<^raw42>,''.
+
+ \end{enumerate}
+
+ \noindent The \isa{ident} syntax for symbol names is \isa{letter\ {\isacharparenleft}letter\ {\isacharbar}\ digit{\isacharparenright}\isactrlsup {\isacharasterisk}}, where \isa{letter\ {\isacharequal}\ A{\isachardot}{\isachardot}Za{\isachardot}{\isachardot}z} and \isa{digit\ {\isacharequal}\ {\isadigit{0}}{\isachardot}{\isachardot}{\isadigit{9}}}. There are infinitely many
+ regular symbols and control symbols, but a fixed collection of
+ standard symbols is treated specifically. For example,
+ ``\verb,\,\verb,<alpha>,'' is classified as a letter, which means it
+ may occur within regular Isabelle identifiers.
+
+ Since the character set underlying Isabelle symbols is 7-bit ASCII
+ and 8-bit characters are passed through transparently, Isabelle may
+ also process Unicode/UCS data in UTF-8 encoding. Unicode provides
+ its own collection of mathematical symbols, but there is no built-in
+ link to the standard collection of Isabelle.
+
+ \medskip Output of Isabelle symbols depends on the print mode
+ (\secref{print-mode}). For example, the standard {\LaTeX} setup of
+ the Isabelle document preparation system would present
+ ``\verb,\,\verb,<alpha>,'' as \isa{{\isasymalpha}}, and
+ ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as \isa{\isactrlbold {\isasymalpha}}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{Symbol.symbol}\verb|type Symbol.symbol| \\
+ \indexdef{}{ML}{Symbol.explode}\verb|Symbol.explode: string -> Symbol.symbol list| \\
+ \indexdef{}{ML}{Symbol.is\_letter}\verb|Symbol.is_letter: Symbol.symbol -> bool| \\
+ \indexdef{}{ML}{Symbol.is\_digit}\verb|Symbol.is_digit: Symbol.symbol -> bool| \\
+ \indexdef{}{ML}{Symbol.is\_quasi}\verb|Symbol.is_quasi: Symbol.symbol -> bool| \\
+ \indexdef{}{ML}{Symbol.is\_blank}\verb|Symbol.is_blank: Symbol.symbol -> bool| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML type}{Symbol.sym}\verb|type Symbol.sym| \\
+ \indexdef{}{ML}{Symbol.decode}\verb|Symbol.decode: Symbol.symbol -> Symbol.sym| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Symbol.symbol| represents individual Isabelle
+ symbols; this is an alias for \verb|string|.
+
+ \item \verb|Symbol.explode|~\isa{str} produces a symbol list
+ from the packed form. This function supercedes \verb|String.explode| for virtually all purposes of manipulating text in
+ Isabelle!
+
+ \item \verb|Symbol.is_letter|, \verb|Symbol.is_digit|, \verb|Symbol.is_quasi|, \verb|Symbol.is_blank| classify standard
+ symbols according to fixed syntactic conventions of Isabelle, cf.\
+ \cite{isabelle-isar-ref}.
+
+ \item \verb|Symbol.sym| is a concrete datatype that represents
+ the different kinds of symbols explicitly, with constructors \verb|Symbol.Char|, \verb|Symbol.Sym|, \verb|Symbol.Ctrl|, \verb|Symbol.Raw|.
+
+ \item \verb|Symbol.decode| converts the string representation of a
+ symbol into the datatype version.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Basic names \label{sec:basic-names}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{basic name} essentially consists of a single Isabelle
+ identifier. There are conventions to mark separate classes of basic
+ names, by attaching a suffix of underscores: one underscore means
+ \emph{internal name}, two underscores means \emph{Skolem name},
+ three underscores means \emph{internal Skolem name}.
+
+ For example, the basic name \isa{foo} has the internal version
+ \isa{foo{\isacharunderscore}}, with Skolem versions \isa{foo{\isacharunderscore}{\isacharunderscore}} and \isa{foo{\isacharunderscore}{\isacharunderscore}{\isacharunderscore}}, respectively.
+
+ These special versions provide copies of the basic name space, apart
+ from anything that normally appears in the user text. For example,
+ system generated variables in Isar proof contexts are usually marked
+ as internal, which prevents mysterious name references like \isa{xaa} to appear in the text.
+
+ \medskip Manipulating binding scopes often requires on-the-fly
+ renamings. A \emph{name context} contains a collection of already
+ used names. The \isa{declare} operation adds names to the
+ context.
+
+ The \isa{invents} operation derives a number of fresh names from
+ a given starting point. For example, the first three names derived
+ from \isa{a} are \isa{a}, \isa{b}, \isa{c}.
+
+ The \isa{variants} operation produces fresh names by
+ incrementing tentative names as base-26 numbers (with digits \isa{a{\isachardot}{\isachardot}z}) until all clashes are resolved. For example, name \isa{foo} results in variants \isa{fooa}, \isa{foob}, \isa{fooc}, \dots, \isa{fooaa}, \isa{fooab} etc.; each renaming
+ step picks the next unused variant from this sequence.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{Name.internal}\verb|Name.internal: string -> string| \\
+ \indexdef{}{ML}{Name.skolem}\verb|Name.skolem: string -> string| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML type}{Name.context}\verb|type Name.context| \\
+ \indexdef{}{ML}{Name.context}\verb|Name.context: Name.context| \\
+ \indexdef{}{ML}{Name.declare}\verb|Name.declare: string -> Name.context -> Name.context| \\
+ \indexdef{}{ML}{Name.invents}\verb|Name.invents: Name.context -> string -> int -> string list| \\
+ \indexdef{}{ML}{Name.variants}\verb|Name.variants: string list -> Name.context -> string list * Name.context| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Name.internal|~\isa{name} produces an internal name
+ by adding one underscore.
+
+ \item \verb|Name.skolem|~\isa{name} produces a Skolem name by
+ adding two underscores.
+
+ \item \verb|Name.context| represents the context of already used
+ names; the initial value is \verb|Name.context|.
+
+ \item \verb|Name.declare|~\isa{name} enters a used name into the
+ context.
+
+ \item \verb|Name.invents|~\isa{context\ name\ n} produces \isa{n} fresh names derived from \isa{name}.
+
+ \item \verb|Name.variants|~\isa{names\ context} produces fresh
+ variants of \isa{names}; the result is entered into the context.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Indexed names%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+An \emph{indexed name} (or \isa{indexname}) is a pair of a basic
+ name and a natural number. This representation allows efficient
+ renaming by incrementing the second component only. The canonical
+ way to rename two collections of indexnames apart from each other is
+ this: determine the maximum index \isa{maxidx} of the first
+ collection, then increment all indexes of the second collection by
+ \isa{maxidx\ {\isacharplus}\ {\isadigit{1}}}; the maximum index of an empty collection is
+ \isa{{\isacharminus}{\isadigit{1}}}.
+
+ Occasionally, basic names and indexed names are injected into the
+ same pair type: the (improper) indexname \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}} is used
+ to encode basic names.
+
+ \medskip Isabelle syntax observes the following rules for
+ representing an indexname \isa{{\isacharparenleft}x{\isacharcomma}\ i{\isacharparenright}} as a packed string:
+
+ \begin{itemize}
+
+ \item \isa{{\isacharquery}x} if \isa{x} does not end with a digit and \isa{i\ {\isacharequal}\ {\isadigit{0}}},
+
+ \item \isa{{\isacharquery}xi} if \isa{x} does not end with a digit,
+
+ \item \isa{{\isacharquery}x{\isachardot}i} otherwise.
+
+ \end{itemize}
+
+ Indexnames may acquire large index numbers over time. Results are
+ normalized towards \isa{{\isadigit{0}}} at certain checkpoints, notably at
+ the end of a proof. This works by producing variants of the
+ corresponding basic name components. For example, the collection
+ \isa{{\isacharquery}x{\isadigit{1}}{\isacharcomma}\ {\isacharquery}x{\isadigit{7}}{\isacharcomma}\ {\isacharquery}x{\isadigit{4}}{\isadigit{2}}} becomes \isa{{\isacharquery}x{\isacharcomma}\ {\isacharquery}xa{\isacharcomma}\ {\isacharquery}xb}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{indexname}\verb|type indexname| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|indexname| represents indexed names. This is an
+ abbreviation for \verb|string * int|. The second component is
+ usually non-negative, except for situations where \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}}
+ is used to embed basic names into this type.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Qualified names and name spaces%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{qualified name} consists of a non-empty sequence of basic
+ name components. The packed representation uses a dot as separator,
+ as in ``\isa{A{\isachardot}b{\isachardot}c}''. The last component is called \emph{base}
+ name, the remaining prefix \emph{qualifier} (which may be empty).
+ The idea of qualified names is to encode nested structures by
+ recording the access paths as qualifiers. For example, an item
+ named ``\isa{A{\isachardot}b{\isachardot}c}'' may be understood as a local entity \isa{c}, within a local structure \isa{b}, within a global
+ structure \isa{A}. Typically, name space hierarchies consist of
+ 1--2 levels of qualification, but this need not be always so.
+
+ The empty name is commonly used as an indication of unnamed
+ entities, whenever this makes any sense. The basic operations on
+ qualified names are smart enough to pass through such improper names
+ unchanged.
+
+ \medskip A \isa{naming} policy tells how to turn a name
+ specification into a fully qualified internal name (by the \isa{full} operation), and how fully qualified names may be accessed
+ externally. For example, the default naming policy is to prefix an
+ implicit path: \isa{full\ x} produces \isa{path{\isachardot}x}, and the
+ standard accesses for \isa{path{\isachardot}x} include both \isa{x} and
+ \isa{path{\isachardot}x}. Normally, the naming is implicit in the theory or
+ proof context; there are separate versions of the corresponding.
+
+ \medskip A \isa{name\ space} manages a collection of fully
+ internalized names, together with a mapping between external names
+ and internal names (in both directions). The corresponding \isa{intern} and \isa{extern} operations are mostly used for
+ parsing and printing only! The \isa{declare} operation augments
+ a name space according to the accesses determined by the naming
+ policy.
+
+ \medskip As a general principle, there is a separate name space for
+ each kind of formal entity, e.g.\ logical constant, type
+ constructor, type class, theorem. It is usually clear from the
+ occurrence in concrete syntax (or from the scope) which kind of
+ entity a name refers to. For example, the very same name \isa{c} may be used uniformly for a constant, type constructor, and
+ type class.
+
+ There are common schemes to name theorems systematically, according
+ to the name of the main logical entity involved, e.g.\ \isa{c{\isachardot}intro} for a canonical theorem related to constant \isa{c}.
+ This technique of mapping names from one space into another requires
+ some care in order to avoid conflicts. In particular, theorem names
+ derived from a type constructor or type class are better suffixed in
+ addition to the usual qualification, e.g.\ \isa{c{\isacharunderscore}type{\isachardot}intro}
+ and \isa{c{\isacharunderscore}class{\isachardot}intro} for theorems related to type \isa{c}
+ and class \isa{c}, respectively.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{NameSpace.base}\verb|NameSpace.base: string -> string| \\
+ \indexdef{}{ML}{NameSpace.qualifier}\verb|NameSpace.qualifier: string -> string| \\
+ \indexdef{}{ML}{NameSpace.append}\verb|NameSpace.append: string -> string -> string| \\
+ \indexdef{}{ML}{NameSpace.implode}\verb|NameSpace.implode: string list -> string| \\
+ \indexdef{}{ML}{NameSpace.explode}\verb|NameSpace.explode: string -> string list| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML type}{NameSpace.naming}\verb|type NameSpace.naming| \\
+ \indexdef{}{ML}{NameSpace.default\_naming}\verb|NameSpace.default_naming: NameSpace.naming| \\
+ \indexdef{}{ML}{NameSpace.add\_path}\verb|NameSpace.add_path: string -> NameSpace.naming -> NameSpace.naming| \\
+ \indexdef{}{ML}{NameSpace.full\_name}\verb|NameSpace.full_name: NameSpace.naming -> binding -> string| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML type}{NameSpace.T}\verb|type NameSpace.T| \\
+ \indexdef{}{ML}{NameSpace.empty}\verb|NameSpace.empty: NameSpace.T| \\
+ \indexdef{}{ML}{NameSpace.merge}\verb|NameSpace.merge: NameSpace.T * NameSpace.T -> NameSpace.T| \\
+ \indexdef{}{ML}{NameSpace.declare}\verb|NameSpace.declare: NameSpace.naming -> binding -> NameSpace.T -> string * NameSpace.T| \\
+ \indexdef{}{ML}{NameSpace.intern}\verb|NameSpace.intern: NameSpace.T -> string -> string| \\
+ \indexdef{}{ML}{NameSpace.extern}\verb|NameSpace.extern: NameSpace.T -> string -> string| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|NameSpace.base|~\isa{name} returns the base name of a
+ qualified name.
+
+ \item \verb|NameSpace.qualifier|~\isa{name} returns the qualifier
+ of a qualified name.
+
+ \item \verb|NameSpace.append|~\isa{name\isactrlisub {\isadigit{1}}\ name\isactrlisub {\isadigit{2}}}
+ appends two qualified names.
+
+ \item \verb|NameSpace.implode|~\isa{name} and \verb|NameSpace.explode|~\isa{names} convert between the packed string
+ representation and the explicit list form of qualified names.
+
+ \item \verb|NameSpace.naming| represents the abstract concept of
+ a naming policy.
+
+ \item \verb|NameSpace.default_naming| is the default naming policy.
+ In a theory context, this is usually augmented by a path prefix
+ consisting of the theory name.
+
+ \item \verb|NameSpace.add_path|~\isa{path\ naming} augments the
+ naming policy by extending its path component.
+
+ \item \verb|NameSpace.full_name|\isa{naming\ binding} turns a name
+ binding (usually a basic name) into the fully qualified
+ internal name, according to the given naming policy.
+
+ \item \verb|NameSpace.T| represents name spaces.
+
+ \item \verb|NameSpace.empty| and \verb|NameSpace.merge|~\isa{{\isacharparenleft}space\isactrlisub {\isadigit{1}}{\isacharcomma}\ space\isactrlisub {\isadigit{2}}{\isacharparenright}} are the canonical operations for
+ maintaining name spaces according to theory data management
+ (\secref{sec:context-data}).
+
+ \item \verb|NameSpace.declare|~\isa{naming\ bindings\ space} enters a
+ name binding as fully qualified internal name into the name space,
+ with external accesses determined by the naming policy.
+
+ \item \verb|NameSpace.intern|~\isa{space\ name} internalizes a
+ (partially qualified) external name.
+
+ This operation is mostly for parsing! Note that fully qualified
+ names stemming from declarations are produced via \verb|NameSpace.full_name| and \verb|NameSpace.declare|
+ (or their derivatives for \verb|theory| and
+ \verb|Proof.context|).
+
+ \item \verb|NameSpace.extern|~\isa{space\ name} externalizes a
+ (fully qualified) internal name.
+
+ This operation is mostly for printing! Note unqualified names are
+ produced via \verb|NameSpace.base|.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Proof.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,394 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Proof}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Proof\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Structured proofs%
+}
+\isamarkuptrue%
+%
+\isamarkupsection{Variables \label{sec:variables}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Any variable that is not explicitly bound by \isa{{\isasymlambda}}-abstraction
+ is considered as ``free''. Logically, free variables act like
+ outermost universal quantification at the sequent level: \isa{A\isactrlisub {\isadigit{1}}{\isacharparenleft}x{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n{\isacharparenleft}x{\isacharparenright}\ {\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}} means that the result
+ holds \emph{for all} values of \isa{x}. Free variables for
+ terms (not types) can be fully internalized into the logic: \isa{{\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}} and \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} are interchangeable, provided
+ that \isa{x} does not occur elsewhere in the context.
+ Inspecting \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} more closely, we see that inside the
+ quantifier, \isa{x} is essentially ``arbitrary, but fixed'',
+ while from outside it appears as a place-holder for instantiation
+ (thanks to \isa{{\isasymAnd}} elimination).
+
+ The Pure logic represents the idea of variables being either inside
+ or outside the current scope by providing separate syntactic
+ categories for \emph{fixed variables} (e.g.\ \isa{x}) vs.\
+ \emph{schematic variables} (e.g.\ \isa{{\isacharquery}x}). Incidently, a
+ universal result \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} has the HHF normal form \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}x{\isacharparenright}}, which represents its generality nicely without requiring
+ an explicit quantifier. The same principle works for type
+ variables: \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}{\isasymalpha}{\isacharparenright}} represents the idea of ``\isa{{\isasymturnstile}\ {\isasymforall}{\isasymalpha}{\isachardot}\ B{\isacharparenleft}{\isasymalpha}{\isacharparenright}}'' without demanding a truly polymorphic framework.
+
+ \medskip Additional care is required to treat type variables in a
+ way that facilitates type-inference. In principle, term variables
+ depend on type variables, which means that type variables would have
+ to be declared first. For example, a raw type-theoretic framework
+ would demand the context to be constructed in stages as follows:
+ \isa{{\isasymGamma}\ {\isacharequal}\ {\isasymalpha}{\isacharcolon}\ type{\isacharcomma}\ x{\isacharcolon}\ {\isasymalpha}{\isacharcomma}\ a{\isacharcolon}\ A{\isacharparenleft}x\isactrlisub {\isasymalpha}{\isacharparenright}}.
+
+ We allow a slightly less formalistic mode of operation: term
+ variables \isa{x} are fixed without specifying a type yet
+ (essentially \emph{all} potential occurrences of some instance
+ \isa{x\isactrlisub {\isasymtau}} are fixed); the first occurrence of \isa{x}
+ within a specific term assigns its most general type, which is then
+ maintained consistently in the context. The above example becomes
+ \isa{{\isasymGamma}\ {\isacharequal}\ x{\isacharcolon}\ term{\isacharcomma}\ {\isasymalpha}{\isacharcolon}\ type{\isacharcomma}\ A{\isacharparenleft}x\isactrlisub {\isasymalpha}{\isacharparenright}}, where type \isa{{\isasymalpha}} is fixed \emph{after} term \isa{x}, and the constraint
+ \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}} is an implicit consequence of the occurrence of
+ \isa{x\isactrlisub {\isasymalpha}} in the subsequent proposition.
+
+ This twist of dependencies is also accommodated by the reverse
+ operation of exporting results from a context: a type variable
+ \isa{{\isasymalpha}} is considered fixed as long as it occurs in some fixed
+ term variable of the context. For example, exporting \isa{x{\isacharcolon}\ term{\isacharcomma}\ {\isasymalpha}{\isacharcolon}\ type\ {\isasymturnstile}\ x\isactrlisub {\isasymalpha}\ {\isacharequal}\ x\isactrlisub {\isasymalpha}} produces in the first step
+ \isa{x{\isacharcolon}\ term\ {\isasymturnstile}\ x\isactrlisub {\isasymalpha}\ {\isacharequal}\ x\isactrlisub {\isasymalpha}} for fixed \isa{{\isasymalpha}},
+ and only in the second step \isa{{\isasymturnstile}\ {\isacharquery}x\isactrlisub {\isacharquery}\isactrlisub {\isasymalpha}\ {\isacharequal}\ {\isacharquery}x\isactrlisub {\isacharquery}\isactrlisub {\isasymalpha}} for schematic \isa{{\isacharquery}x} and \isa{{\isacharquery}{\isasymalpha}}.
+
+ \medskip The Isabelle/Isar proof context manages the gory details of
+ term vs.\ type variables, with high-level principles for moving the
+ frontier between fixed and schematic variables.
+
+ The \isa{add{\isacharunderscore}fixes} operation explictly declares fixed
+ variables; the \isa{declare{\isacharunderscore}term} operation absorbs a term into
+ a context by fixing new type variables and adding syntactic
+ constraints.
+
+ The \isa{export} operation is able to perform the main work of
+ generalizing term and type variables as sketched above, assuming
+ that fixing variables and terms have been declared properly.
+
+ There \isa{import} operation makes a generalized fact a genuine
+ part of the context, by inventing fixed variables for the schematic
+ ones. The effect can be reversed by using \isa{export} later,
+ potentially with an extended context; the result is equivalent to
+ the original modulo renaming of schematic variables.
+
+ The \isa{focus} operation provides a variant of \isa{import}
+ for nested propositions (with explicit quantification): \isa{{\isasymAnd}x\isactrlisub {\isadigit{1}}\ {\isasymdots}\ x\isactrlisub n{\isachardot}\ B{\isacharparenleft}x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n{\isacharparenright}} is
+ decomposed by inventing fixed variables \isa{x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n} for the body.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{Variable.add\_fixes}\verb|Variable.add_fixes: |\isasep\isanewline%
+\verb| string list -> Proof.context -> string list * Proof.context| \\
+ \indexdef{}{ML}{Variable.variant\_fixes}\verb|Variable.variant_fixes: |\isasep\isanewline%
+\verb| string list -> Proof.context -> string list * Proof.context| \\
+ \indexdef{}{ML}{Variable.declare\_term}\verb|Variable.declare_term: term -> Proof.context -> Proof.context| \\
+ \indexdef{}{ML}{Variable.declare\_constraints}\verb|Variable.declare_constraints: term -> Proof.context -> Proof.context| \\
+ \indexdef{}{ML}{Variable.export}\verb|Variable.export: Proof.context -> Proof.context -> thm list -> thm list| \\
+ \indexdef{}{ML}{Variable.polymorphic}\verb|Variable.polymorphic: Proof.context -> term list -> term list| \\
+ \indexdef{}{ML}{Variable.import\_thms}\verb|Variable.import_thms: bool -> thm list -> Proof.context ->|\isasep\isanewline%
+\verb| ((ctyp list * cterm list) * thm list) * Proof.context| \\
+ \indexdef{}{ML}{Variable.focus}\verb|Variable.focus: cterm -> Proof.context -> (cterm list * cterm) * Proof.context| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Variable.add_fixes|~\isa{xs\ ctxt} fixes term
+ variables \isa{xs}, returning the resulting internal names. By
+ default, the internal representation coincides with the external
+ one, which also means that the given variables must not be fixed
+ already. There is a different policy within a local proof body: the
+ given names are just hints for newly invented Skolem variables.
+
+ \item \verb|Variable.variant_fixes| is similar to \verb|Variable.add_fixes|, but always produces fresh variants of the given
+ names.
+
+ \item \verb|Variable.declare_term|~\isa{t\ ctxt} declares term
+ \isa{t} to belong to the context. This automatically fixes new
+ type variables, but not term variables. Syntactic constraints for
+ type and term variables are declared uniformly, though.
+
+ \item \verb|Variable.declare_constraints|~\isa{t\ ctxt} declares
+ syntactic constraints from term \isa{t}, without making it part
+ of the context yet.
+
+ \item \verb|Variable.export|~\isa{inner\ outer\ thms} generalizes
+ fixed type and term variables in \isa{thms} according to the
+ difference of the \isa{inner} and \isa{outer} context,
+ following the principles sketched above.
+
+ \item \verb|Variable.polymorphic|~\isa{ctxt\ ts} generalizes type
+ variables in \isa{ts} as far as possible, even those occurring
+ in fixed term variables. The default policy of type-inference is to
+ fix newly introduced type variables, which is essentially reversed
+ with \verb|Variable.polymorphic|: here the given terms are detached
+ from the context as far as possible.
+
+ \item \verb|Variable.import_thms|~\isa{open\ thms\ ctxt} invents fixed
+ type and term variables for the schematic ones occurring in \isa{thms}. The \isa{open} flag indicates whether the fixed names
+ should be accessible to the user, otherwise newly introduced names
+ are marked as ``internal'' (\secref{sec:names}).
+
+ \item \verb|Variable.focus|~\isa{B} decomposes the outermost \isa{{\isasymAnd}} prefix of proposition \isa{B}.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Assumptions \label{sec:assumptions}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+An \emph{assumption} is a proposition that it is postulated in the
+ current context. Local conclusions may use assumptions as
+ additional facts, but this imposes implicit hypotheses that weaken
+ the overall statement.
+
+ Assumptions are restricted to fixed non-schematic statements, i.e.\
+ all generality needs to be expressed by explicit quantifiers.
+ Nevertheless, the result will be in HHF normal form with outermost
+ quantifiers stripped. For example, by assuming \isa{{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ P\ x} we get \isa{{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ P\ x\ {\isasymturnstile}\ P\ {\isacharquery}x} for schematic \isa{{\isacharquery}x}
+ of fixed type \isa{{\isasymalpha}}. Local derivations accumulate more and
+ more explicit references to hypotheses: \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n\ {\isasymturnstile}\ B} where \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n} needs to
+ be covered by the assumptions of the current context.
+
+ \medskip The \isa{add{\isacharunderscore}assms} operation augments the context by
+ local assumptions, which are parameterized by an arbitrary \isa{export} rule (see below).
+
+ The \isa{export} operation moves facts from a (larger) inner
+ context into a (smaller) outer context, by discharging the
+ difference of the assumptions as specified by the associated export
+ rules. Note that the discharged portion is determined by the
+ difference contexts, not the facts being exported! There is a
+ separate flag to indicate a goal context, where the result is meant
+ to refine an enclosing sub-goal of a structured proof state.
+
+ \medskip The most basic export rule discharges assumptions directly
+ by means of the \isa{{\isasymLongrightarrow}} introduction rule:
+ \[
+ \infer[(\isa{{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
+ \]
+
+ The variant for goal refinements marks the newly introduced
+ premises, which causes the canonical Isar goal refinement scheme to
+ enforce unification with local premises within the goal:
+ \[
+ \infer[(\isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ {\isacharhash}A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
+ \]
+
+ \medskip Alternative versions of assumptions may perform arbitrary
+ transformations on export, as long as the corresponding portion of
+ hypotheses is removed from the given facts. For example, a local
+ definition works by fixing \isa{x} and assuming \isa{x\ {\isasymequiv}\ t},
+ with the following export rule to reverse the effect:
+ \[
+ \infer[(\isa{{\isasymequiv}{\isacharminus}expand})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ x\ {\isasymequiv}\ t\ {\isasymturnstile}\ B\ t}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B\ x}}
+ \]
+ This works, because the assumption \isa{x\ {\isasymequiv}\ t} was introduced in
+ a context with \isa{x} being fresh, so \isa{x} does not
+ occur in \isa{{\isasymGamma}} here.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{Assumption.export}\verb|type Assumption.export| \\
+ \indexdef{}{ML}{Assumption.assume}\verb|Assumption.assume: cterm -> thm| \\
+ \indexdef{}{ML}{Assumption.add\_assms}\verb|Assumption.add_assms: Assumption.export ->|\isasep\isanewline%
+\verb| cterm list -> Proof.context -> thm list * Proof.context| \\
+ \indexdef{}{ML}{Assumption.add\_assumes}\verb|Assumption.add_assumes: |\isasep\isanewline%
+\verb| cterm list -> Proof.context -> thm list * Proof.context| \\
+ \indexdef{}{ML}{Assumption.export}\verb|Assumption.export: bool -> Proof.context -> Proof.context -> thm -> thm| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Assumption.export| represents arbitrary export
+ rules, which is any function of type \verb|bool -> cterm list -> thm -> thm|,
+ where the \verb|bool| indicates goal mode, and the \verb|cterm list| the collection of assumptions to be discharged
+ simultaneously.
+
+ \item \verb|Assumption.assume|~\isa{A} turns proposition \isa{A} into a raw assumption \isa{A\ {\isasymturnstile}\ A{\isacharprime}}, where the conclusion
+ \isa{A{\isacharprime}} is in HHF normal form.
+
+ \item \verb|Assumption.add_assms|~\isa{r\ As} augments the context
+ by assumptions \isa{As} with export rule \isa{r}. The
+ resulting facts are hypothetical theorems as produced by the raw
+ \verb|Assumption.assume|.
+
+ \item \verb|Assumption.add_assumes|~\isa{As} is a special case of
+ \verb|Assumption.add_assms| where the export rule performs \isa{{\isasymLongrightarrow}{\isacharunderscore}intro} or \isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro}, depending on goal mode.
+
+ \item \verb|Assumption.export|~\isa{is{\isacharunderscore}goal\ inner\ outer\ thm}
+ exports result \isa{thm} from the the \isa{inner} context
+ back into the \isa{outer} one; \isa{is{\isacharunderscore}goal\ {\isacharequal}\ true} means
+ this is a goal context. The result is in HHF normal form. Note
+ that \verb|ProofContext.export| combines \verb|Variable.export|
+ and \verb|Assumption.export| in the canonical way.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Results \label{sec:results}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Local results are established by monotonic reasoning from facts
+ within a context. This allows common combinations of theorems,
+ e.g.\ via \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} elimination, resolution rules, or equational
+ reasoning, see \secref{sec:thms}. Unaccounted context manipulations
+ should be avoided, notably raw \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} introduction or ad-hoc
+ references to free variables or assumptions not present in the proof
+ context.
+
+ \medskip The \isa{SUBPROOF} combinator allows to structure a
+ tactical proof recursively by decomposing a selected sub-goal:
+ \isa{{\isacharparenleft}{\isasymAnd}x{\isachardot}\ A{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ B{\isacharparenleft}x{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymdots}} is turned into \isa{B{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymdots}}
+ after fixing \isa{x} and assuming \isa{A{\isacharparenleft}x{\isacharparenright}}. This means
+ the tactic needs to solve the conclusion, but may use the premise as
+ a local fact, for locally fixed variables.
+
+ The \isa{prove} operation provides an interface for structured
+ backwards reasoning under program control, with some explicit sanity
+ checks of the result. The goal context can be augmented by
+ additional fixed variables (cf.\ \secref{sec:variables}) and
+ assumptions (cf.\ \secref{sec:assumptions}), which will be available
+ as local facts during the proof and discharged into implications in
+ the result. Type and term variables are generalized as usual,
+ according to the context.
+
+ The \isa{obtain} operation produces results by eliminating
+ existing facts by means of a given tactic. This acts like a dual
+ conclusion: the proof demonstrates that the context may be augmented
+ by certain fixed variables and assumptions. See also
+ \cite{isabelle-isar-ref} for the user-level \isa{{\isasymOBTAIN}} and
+ \isa{{\isasymGUESS}} elements. Final results, which may not refer to
+ the parameters in the conclusion, need to exported explicitly into
+ the original context.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{SUBPROOF}\verb|SUBPROOF: ({context: Proof.context, schematics: ctyp list * cterm list,|\isasep\isanewline%
+\verb| params: cterm list, asms: cterm list, concl: cterm,|\isasep\isanewline%
+\verb| prems: thm list} -> tactic) -> Proof.context -> int -> tactic| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML}{Goal.prove}\verb|Goal.prove: Proof.context -> string list -> term list -> term ->|\isasep\isanewline%
+\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm| \\
+ \indexdef{}{ML}{Goal.prove\_multi}\verb|Goal.prove_multi: Proof.context -> string list -> term list -> term list ->|\isasep\isanewline%
+\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm list| \\
+ \end{mldecls}
+ \begin{mldecls}
+ \indexdef{}{ML}{Obtain.result}\verb|Obtain.result: (Proof.context -> tactic) ->|\isasep\isanewline%
+\verb| thm list -> Proof.context -> (cterm list * thm list) * Proof.context| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|SUBPROOF|~\isa{tac\ ctxt\ i} decomposes the structure
+ of the specified sub-goal, producing an extended context and a
+ reduced goal, which needs to be solved by the given tactic. All
+ schematic parameters of the goal are imported into the context as
+ fixed ones, which may not be instantiated in the sub-proof.
+
+ \item \verb|Goal.prove|~\isa{ctxt\ xs\ As\ C\ tac} states goal \isa{C} in the context augmented by fixed variables \isa{xs} and
+ assumptions \isa{As}, and applies tactic \isa{tac} to solve
+ it. The latter may depend on the local assumptions being presented
+ as facts. The result is in HHF normal form.
+
+ \item \verb|Goal.prove_multi| is simular to \verb|Goal.prove|, but
+ states several conclusions simultaneously. The goal is encoded by
+ means of Pure conjunction; \verb|Goal.conjunction_tac| will turn this
+ into a collection of individual subgoals.
+
+ \item \verb|Obtain.result|~\isa{tac\ thms\ ctxt} eliminates the
+ given facts using a tactic, which results in additional fixed
+ variables and assumptions in the context. Final results need to be
+ exported explicitly.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Syntax.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,48 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Syntax}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Syntax\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Syntax and type-checking%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarImplementation/Thy/document/Tactic.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,497 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Tactic}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Tactic\isanewline
+\isakeyword{imports}\ Base\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{Tactical reasoning%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Tactical reasoning works by refining the initial claim in a
+ backwards fashion, until a solved form is reached. A \isa{goal}
+ consists of several subgoals that need to be solved in order to
+ achieve the main statement; zero subgoals means that the proof may
+ be finished. A \isa{tactic} is a refinement operation that maps
+ a goal to a lazy sequence of potential successors. A \isa{tactical} is a combinator for composing tactics.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{Goals \label{sec:tactical-goals}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Isabelle/Pure represents a goal as a theorem stating that the
+ subgoals imply the main goal: \isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}. The outermost goal structure is that of a Horn Clause: i.e.\
+ an iterated implication without any quantifiers\footnote{Recall that
+ outermost \isa{{\isasymAnd}x{\isachardot}\ {\isasymphi}{\isacharbrackleft}x{\isacharbrackright}} is always represented via schematic
+ variables in the body: \isa{{\isasymphi}{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}. These variables may get
+ instantiated during the course of reasoning.}. For \isa{n\ {\isacharequal}\ {\isadigit{0}}}
+ a goal is called ``solved''.
+
+ The structure of each subgoal \isa{A\isactrlsub i} is that of a
+ general Hereditary Harrop Formula \isa{{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ {\isasymAnd}x\isactrlsub k{\isachardot}\ H\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ H\isactrlsub m\ {\isasymLongrightarrow}\ B}. Here \isa{x\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlsub k} are goal parameters, i.e.\
+ arbitrary-but-fixed entities of certain types, and \isa{H\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ H\isactrlsub m} are goal hypotheses, i.e.\ facts that may
+ be assumed locally. Together, this forms the goal context of the
+ conclusion \isa{B} to be established. The goal hypotheses may be
+ again arbitrary Hereditary Harrop Formulas, although the level of
+ nesting rarely exceeds 1--2 in practice.
+
+ The main conclusion \isa{C} is internally marked as a protected
+ proposition, which is represented explicitly by the notation \isa{{\isacharhash}C}. This ensures that the decomposition into subgoals and main
+ conclusion is well-defined for arbitrarily structured claims.
+
+ \medskip Basic goal management is performed via the following
+ Isabelle/Pure rules:
+
+ \[
+ \infer[\isa{{\isacharparenleft}init{\isacharparenright}}]{\isa{C\ {\isasymLongrightarrow}\ {\isacharhash}C}}{} \qquad
+ \infer[\isa{{\isacharparenleft}finish{\isacharparenright}}]{\isa{C}}{\isa{{\isacharhash}C}}
+ \]
+
+ \medskip The following low-level variants admit general reasoning
+ with protected propositions:
+
+ \[
+ \infer[\isa{{\isacharparenleft}protect{\isacharparenright}}]{\isa{{\isacharhash}C}}{\isa{C}} \qquad
+ \infer[\isa{{\isacharparenleft}conclude{\isacharparenright}}]{\isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}}{\isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ {\isacharhash}C}}
+ \]%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{Goal.init}\verb|Goal.init: cterm -> thm| \\
+ \indexdef{}{ML}{Goal.finish}\verb|Goal.finish: thm -> thm| \\
+ \indexdef{}{ML}{Goal.protect}\verb|Goal.protect: thm -> thm| \\
+ \indexdef{}{ML}{Goal.conclude}\verb|Goal.conclude: thm -> thm| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|Goal.init|~\isa{C} initializes a tactical goal from
+ the well-formed proposition \isa{C}.
+
+ \item \verb|Goal.finish|~\isa{thm} checks whether theorem
+ \isa{thm} is a solved goal (no subgoals), and concludes the
+ result by removing the goal protection.
+
+ \item \verb|Goal.protect|~\isa{thm} protects the full statement
+ of theorem \isa{thm}.
+
+ \item \verb|Goal.conclude|~\isa{thm} removes the goal
+ protection, even if there are pending subgoals.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Tactics%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \isa{tactic} is a function \isa{goal\ {\isasymrightarrow}\ goal\isactrlsup {\isacharasterisk}\isactrlsup {\isacharasterisk}} that
+ maps a given goal state (represented as a theorem, cf.\
+ \secref{sec:tactical-goals}) to a lazy sequence of potential
+ successor states. The underlying sequence implementation is lazy
+ both in head and tail, and is purely functional in \emph{not}
+ supporting memoing.\footnote{The lack of memoing and the strict
+ nature of SML requires some care when working with low-level
+ sequence operations, to avoid duplicate or premature evaluation of
+ results.}
+
+ An \emph{empty result sequence} means that the tactic has failed: in
+ a compound tactic expressions other tactics might be tried instead,
+ or the whole refinement step might fail outright, producing a
+ toplevel error message. When implementing tactics from scratch, one
+ should take care to observe the basic protocol of mapping regular
+ error conditions to an empty result; only serious faults should
+ emerge as exceptions.
+
+ By enumerating \emph{multiple results}, a tactic can easily express
+ the potential outcome of an internal search process. There are also
+ combinators for building proof tools that involve search
+ systematically, see also \secref{sec:tacticals}.
+
+ \medskip As explained in \secref{sec:tactical-goals}, a goal state
+ essentially consists of a list of subgoals that imply the main goal
+ (conclusion). Tactics may operate on all subgoals or on a
+ particularly specified subgoal, but must not change the main
+ conclusion (apart from instantiating schematic goal variables).
+
+ Tactics with explicit \emph{subgoal addressing} are of the form
+ \isa{int\ {\isasymrightarrow}\ tactic} and may be applied to a particular subgoal
+ (counting from 1). If the subgoal number is out of range, the
+ tactic should fail with an empty result sequence, but must not raise
+ an exception!
+
+ Operating on a particular subgoal means to replace it by an interval
+ of zero or more subgoals in the same place; other subgoals must not
+ be affected, apart from instantiating schematic variables ranging
+ over the whole goal state.
+
+ A common pattern of composing tactics with subgoal addressing is to
+ try the first one, and then the second one only if the subgoal has
+ not been solved yet. Special care is required here to avoid bumping
+ into unrelated subgoals that happen to come after the original
+ subgoal. Assuming that there is only a single initial subgoal is a
+ very common error when implementing tactics!
+
+ Tactics with internal subgoal addressing should expose the subgoal
+ index as \isa{int} argument in full generality; a hardwired
+ subgoal 1 inappropriate.
+
+ \medskip The main well-formedness conditions for proper tactics are
+ summarized as follows.
+
+ \begin{itemize}
+
+ \item General tactic failure is indicated by an empty result, only
+ serious faults may produce an exception.
+
+ \item The main conclusion must not be changed, apart from
+ instantiating schematic variables.
+
+ \item A tactic operates either uniformly on all subgoals, or
+ specifically on a selected subgoal (without bumping into unrelated
+ subgoals).
+
+ \item Range errors in subgoal addressing produce an empty result.
+
+ \end{itemize}
+
+ Some of these conditions are checked by higher-level goal
+ infrastructure (\secref{sec:results}); others are not checked
+ explicitly, and violating them merely results in ill-behaved tactics
+ experienced by the user (e.g.\ tactics that insist in being
+ applicable only to singleton goals, or disallow composition with
+ basic tacticals).%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML type}{tactic}\verb|type tactic = thm -> thm Seq.seq| \\
+ \indexdef{}{ML}{no\_tac}\verb|no_tac: tactic| \\
+ \indexdef{}{ML}{all\_tac}\verb|all_tac: tactic| \\
+ \indexdef{}{ML}{print\_tac}\verb|print_tac: string -> tactic| \\[1ex]
+ \indexdef{}{ML}{PRIMITIVE}\verb|PRIMITIVE: (thm -> thm) -> tactic| \\[1ex]
+ \indexdef{}{ML}{SUBGOAL}\verb|SUBGOAL: (term * int -> tactic) -> int -> tactic| \\
+ \indexdef{}{ML}{CSUBGOAL}\verb|CSUBGOAL: (cterm * int -> tactic) -> int -> tactic| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|tactic| represents tactics. The well-formedness
+ conditions described above need to be observed. See also \hyperlink{file.~~/src/Pure/General/seq.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}General{\isacharslash}seq{\isachardot}ML}}}} for the underlying implementation of
+ lazy sequences.
+
+ \item \verb|int -> tactic| represents tactics with explicit
+ subgoal addressing, with well-formedness conditions as described
+ above.
+
+ \item \verb|no_tac| is a tactic that always fails, returning the
+ empty sequence.
+
+ \item \verb|all_tac| is a tactic that always succeeds, returning a
+ singleton sequence with unchanged goal state.
+
+ \item \verb|print_tac|~\isa{message} is like \verb|all_tac|, but
+ prints a message together with the goal state on the tracing
+ channel.
+
+ \item \verb|PRIMITIVE|~\isa{rule} turns a primitive inference rule
+ into a tactic with unique result. Exception \verb|THM| is considered
+ a regular tactic failure and produces an empty result; other
+ exceptions are passed through.
+
+ \item \verb|SUBGOAL|~\isa{{\isacharparenleft}fn\ {\isacharparenleft}subgoal{\isacharcomma}\ i{\isacharparenright}\ {\isacharequal}{\isachargreater}\ tactic{\isacharparenright}} is the
+ most basic form to produce a tactic with subgoal addressing. The
+ given abstraction over the subgoal term and subgoal number allows to
+ peek at the relevant information of the full goal state. The
+ subgoal range is checked as required above.
+
+ \item \verb|CSUBGOAL| is similar to \verb|SUBGOAL|, but passes the
+ subgoal as \verb|cterm| instead of raw \verb|term|. This
+ avoids expensive re-certification in situations where the subgoal is
+ used directly for primitive inferences.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Resolution and assumption tactics \label{sec:resolve-assume-tac}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+\emph{Resolution} is the most basic mechanism for refining a
+ subgoal using a theorem as object-level rule.
+ \emph{Elim-resolution} is particularly suited for elimination rules:
+ it resolves with a rule, proves its first premise by assumption, and
+ finally deletes that assumption from any new subgoals.
+ \emph{Destruct-resolution} is like elim-resolution, but the given
+ destruction rules are first turned into canonical elimination
+ format. \emph{Forward-resolution} is like destruct-resolution, but
+ without deleting the selected assumption. The \isa{r{\isacharslash}e{\isacharslash}d{\isacharslash}f}
+ naming convention is maintained for several different kinds of
+ resolution rules and tactics.
+
+ Assumption tactics close a subgoal by unifying some of its premises
+ against its conclusion.
+
+ \medskip All the tactics in this section operate on a subgoal
+ designated by a positive integer. Other subgoals might be affected
+ indirectly, due to instantiation of schematic variables.
+
+ There are various sources of non-determinism, the tactic result
+ sequence enumerates all possibilities of the following choices (if
+ applicable):
+
+ \begin{enumerate}
+
+ \item selecting one of the rules given as argument to the tactic;
+
+ \item selecting a subgoal premise to eliminate, unifying it against
+ the first premise of the rule;
+
+ \item unifying the conclusion of the subgoal to the conclusion of
+ the rule.
+
+ \end{enumerate}
+
+ Recall that higher-order unification may produce multiple results
+ that are enumerated here.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{resolve\_tac}\verb|resolve_tac: thm list -> int -> tactic| \\
+ \indexdef{}{ML}{eresolve\_tac}\verb|eresolve_tac: thm list -> int -> tactic| \\
+ \indexdef{}{ML}{dresolve\_tac}\verb|dresolve_tac: thm list -> int -> tactic| \\
+ \indexdef{}{ML}{forward\_tac}\verb|forward_tac: thm list -> int -> tactic| \\[1ex]
+ \indexdef{}{ML}{assume\_tac}\verb|assume_tac: int -> tactic| \\
+ \indexdef{}{ML}{eq\_assume\_tac}\verb|eq_assume_tac: int -> tactic| \\[1ex]
+ \indexdef{}{ML}{match\_tac}\verb|match_tac: thm list -> int -> tactic| \\
+ \indexdef{}{ML}{ematch\_tac}\verb|ematch_tac: thm list -> int -> tactic| \\
+ \indexdef{}{ML}{dmatch\_tac}\verb|dmatch_tac: thm list -> int -> tactic| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|resolve_tac|~\isa{thms\ i} refines the goal state
+ using the given theorems, which should normally be introduction
+ rules. The tactic resolves a rule's conclusion with subgoal \isa{i}, replacing it by the corresponding versions of the rule's
+ premises.
+
+ \item \verb|eresolve_tac|~\isa{thms\ i} performs elim-resolution
+ with the given theorems, which should normally be elimination rules.
+
+ \item \verb|dresolve_tac|~\isa{thms\ i} performs
+ destruct-resolution with the given theorems, which should normally
+ be destruction rules. This replaces an assumption by the result of
+ applying one of the rules.
+
+ \item \verb|forward_tac| is like \verb|dresolve_tac| except that the
+ selected assumption is not deleted. It applies a rule to an
+ assumption, adding the result as a new assumption.
+
+ \item \verb|assume_tac|~\isa{i} attempts to solve subgoal \isa{i}
+ by assumption (modulo higher-order unification).
+
+ \item \verb|eq_assume_tac| is similar to \verb|assume_tac|, but checks
+ only for immediate \isa{{\isasymalpha}}-convertibility instead of using
+ unification. It succeeds (with a unique next state) if one of the
+ assumptions is equal to the subgoal's conclusion. Since it does not
+ instantiate variables, it cannot make other subgoals unprovable.
+
+ \item \verb|match_tac|, \verb|ematch_tac|, and \verb|dmatch_tac| are
+ similar to \verb|resolve_tac|, \verb|eresolve_tac|, and \verb|dresolve_tac|, respectively, but do not instantiate schematic
+ variables in the goal state.
+
+ Flexible subgoals are not updated at will, but are left alone.
+ Strictly speaking, matching means to treat the unknowns in the goal
+ state as constants; these tactics merely discard unifiers that would
+ update the goal state.
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsubsection{Explicit instantiation within a subgoal context%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The main resolution tactics (\secref{sec:resolve-assume-tac})
+ use higher-order unification, which works well in many practical
+ situations despite its daunting theoretical properties.
+ Nonetheless, there are important problem classes where unguided
+ higher-order unification is not so useful. This typically involves
+ rules like universal elimination, existential introduction, or
+ equational substitution. Here the unification problem involves
+ fully flexible \isa{{\isacharquery}P\ {\isacharquery}x} schemes, which are hard to manage
+ without further hints.
+
+ By providing a (small) rigid term for \isa{{\isacharquery}x} explicitly, the
+ remaining unification problem is to assign a (large) term to \isa{{\isacharquery}P}, according to the shape of the given subgoal. This is
+ sufficiently well-behaved in most practical situations.
+
+ \medskip Isabelle provides separate versions of the standard \isa{r{\isacharslash}e{\isacharslash}d{\isacharslash}f} resolution tactics that allow to provide explicit
+ instantiations of unknowns of the given rule, wrt.\ terms that refer
+ to the implicit context of the selected subgoal.
+
+ An instantiation consists of a list of pairs of the form \isa{{\isacharparenleft}{\isacharquery}x{\isacharcomma}\ t{\isacharparenright}}, where \isa{{\isacharquery}x} is a schematic variable occurring in
+ the given rule, and \isa{t} is a term from the current proof
+ context, augmented by the local goal parameters of the selected
+ subgoal; cf.\ the \isa{focus} operation described in
+ \secref{sec:variables}.
+
+ Entering the syntactic context of a subgoal is a brittle operation,
+ because its exact form is somewhat accidental, and the choice of
+ bound variable names depends on the presence of other local and
+ global names. Explicit renaming of subgoal parameters prior to
+ explicit instantiation might help to achieve a bit more robustness.
+
+ Type instantiations may be given as well, via pairs like \isa{{\isacharparenleft}{\isacharquery}{\isacharprime}a{\isacharcomma}\ {\isasymtau}{\isacharparenright}}. Type instantiations are distinguished from term
+ instantiations by the syntactic form of the schematic variable.
+ Types are instantiated before terms are. Since term instantiation
+ already performs type-inference as expected, explicit type
+ instantiations are seldom necessary.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isatagmlref
+%
+\begin{isamarkuptext}%
+\begin{mldecls}
+ \indexdef{}{ML}{res\_inst\_tac}\verb|res_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\
+ \indexdef{}{ML}{eres\_inst\_tac}\verb|eres_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\
+ \indexdef{}{ML}{dres\_inst\_tac}\verb|dres_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\
+ \indexdef{}{ML}{forw\_inst\_tac}\verb|forw_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\[1ex]
+ \indexdef{}{ML}{rename\_tac}\verb|rename_tac: string list -> int -> tactic| \\
+ \end{mldecls}
+
+ \begin{description}
+
+ \item \verb|res_inst_tac|~\isa{ctxt\ insts\ thm\ i} instantiates the
+ rule \isa{thm} with the instantiations \isa{insts}, as described
+ above, and then performs resolution on subgoal \isa{i}.
+
+ \item \verb|eres_inst_tac| is like \verb|res_inst_tac|, but performs
+ elim-resolution.
+
+ \item \verb|dres_inst_tac| is like \verb|res_inst_tac|, but performs
+ destruct-resolution.
+
+ \item \verb|forw_inst_tac| is like \verb|dres_inst_tac| except that
+ the selected assumption is not deleted.
+
+ \item \verb|rename_tac|~\isa{names\ i} renames the innermost
+ parameters of subgoal \isa{i} according to the provided \isa{names} (which need to be distinct indentifiers).
+
+ \end{description}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\endisatagmlref
+{\isafoldmlref}%
+%
+\isadelimmlref
+%
+\endisadelimmlref
+%
+\isamarkupsection{Tacticals \label{sec:tacticals}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+A \emph{tactical} is a functional combinator for building up complex
+ tactics from simpler ones. Typical tactical perform sequential
+ composition, disjunction (choice), iteration, or goal addressing.
+ Various search strategies may be expressed via tacticals.
+
+ \medskip FIXME%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- a/doc-src/IsarImplementation/Thy/document/base.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{base}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ base\isanewline
-\isakeyword{imports}\ Pure\isanewline
-\isakeyword{uses}\ {\isachardoublequoteopen}{\isachardot}{\isachardot}{\isacharslash}{\isachardot}{\isachardot}{\isacharslash}antiquote{\isacharunderscore}setup{\isachardot}ML{\isachardoublequoteclose}\isanewline
-\isakeyword{begin}\isanewline
-\isanewline
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-\isanewline
-%
-\endisadelimtheory
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/integration.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,521 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{integration}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ integration\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{System integration%
-}
-\isamarkuptrue%
-%
-\isamarkupsection{Isar toplevel \label{sec:isar-toplevel}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The Isar toplevel may be considered the centeral hub of the
- Isabelle/Isar system, where all key components and sub-systems are
- integrated into a single read-eval-print loop of Isar commands. We
- shall even incorporate the existing {\ML} toplevel of the compiler
- and run-time system (cf.\ \secref{sec:ML-toplevel}).
-
- Isabelle/Isar departs from the original ``LCF system architecture''
- where {\ML} was really The Meta Language for defining theories and
- conducting proofs. Instead, {\ML} now only serves as the
- implementation language for the system (and user extensions), while
- the specific Isar toplevel supports the concepts of theory and proof
- development natively. This includes the graph structure of theories
- and the block structure of proofs, support for unlimited undo,
- facilities for tracing, debugging, timing, profiling etc.
-
- \medskip The toplevel maintains an implicit state, which is
- transformed by a sequence of transitions -- either interactively or
- in batch-mode. In interactive mode, Isar state transitions are
- encapsulated as safe transactions, such that both failure and undo
- are handled conveniently without destroying the underlying draft
- theory (cf.~\secref{sec:context-theory}). In batch mode,
- transitions operate in a linear (destructive) fashion, such that
- error conditions abort the present attempt to construct a theory or
- proof altogether.
-
- The toplevel state is a disjoint sum of empty \isa{toplevel}, or
- \isa{theory}, or \isa{proof}. On entering the main Isar loop we
- start with an empty toplevel. A theory is commenced by giving a
- \isa{{\isasymTHEORY}} header; within a theory we may issue theory
- commands such as \isa{{\isasymDEFINITION}}, or state a \isa{{\isasymTHEOREM}} to be proven. Now we are within a proof state, with a
- rich collection of Isar proof commands for structured proof
- composition, or unstructured proof scripts. When the proof is
- concluded we get back to the theory, which is then updated by
- storing the resulting fact. Further theory declarations or theorem
- statements with proofs may follow, until we eventually conclude the
- theory development by issuing \isa{{\isasymEND}}. The resulting theory
- is then stored within the theory database and we are back to the
- empty toplevel.
-
- In addition to these proper state transformations, there are also
- some diagnostic commands for peeking at the toplevel state without
- modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term},
- \isakeyword{print-cases}).%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{Toplevel.state}\verb|type Toplevel.state| \\
- \indexml{Toplevel.UNDEF}\verb|Toplevel.UNDEF: exn| \\
- \indexml{Toplevel.is\_toplevel}\verb|Toplevel.is_toplevel: Toplevel.state -> bool| \\
- \indexml{Toplevel.theory\_of}\verb|Toplevel.theory_of: Toplevel.state -> theory| \\
- \indexml{Toplevel.proof\_of}\verb|Toplevel.proof_of: Toplevel.state -> Proof.state| \\
- \indexml{Toplevel.debug}\verb|Toplevel.debug: bool ref| \\
- \indexml{Toplevel.timing}\verb|Toplevel.timing: bool ref| \\
- \indexml{Toplevel.profiling}\verb|Toplevel.profiling: int ref| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Toplevel.state| represents Isar toplevel states,
- which are normally manipulated through the concept of toplevel
- transitions only (\secref{sec:toplevel-transition}). Also note that
- a raw toplevel state is subject to the same linearity restrictions
- as a theory context (cf.~\secref{sec:context-theory}).
-
- \item \verb|Toplevel.UNDEF| is raised for undefined toplevel
- operations. Many operations work only partially for certain cases,
- since \verb|Toplevel.state| is a sum type.
-
- \item \verb|Toplevel.is_toplevel|~\isa{state} checks for an empty
- toplevel state.
-
- \item \verb|Toplevel.theory_of|~\isa{state} selects the theory of
- a theory or proof (!), otherwise raises \verb|Toplevel.UNDEF|.
-
- \item \verb|Toplevel.proof_of|~\isa{state} selects the Isar proof
- state if available, otherwise raises \verb|Toplevel.UNDEF|.
-
- \item \verb|set Toplevel.debug| makes the toplevel print further
- details about internal error conditions, exceptions being raised
- etc.
-
- \item \verb|set Toplevel.timing| makes the toplevel print timing
- information for each Isar command being executed.
-
- \item \verb|Toplevel.profiling|~\verb|:=|~\isa{n} controls
- low-level profiling of the underlying {\ML} runtime system. For
- Poly/ML, \isa{n\ {\isacharequal}\ {\isadigit{1}}} means time and \isa{n\ {\isacharequal}\ {\isadigit{2}}} space
- profiling.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Toplevel transitions \label{sec:toplevel-transition}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-An Isar toplevel transition consists of a partial function on the
- toplevel state, with additional information for diagnostics and
- error reporting: there are fields for command name, source position,
- optional source text, as well as flags for interactive-only commands
- (which issue a warning in batch-mode), printing of result state,
- etc.
-
- The operational part is represented as the sequential union of a
- list of partial functions, which are tried in turn until the first
- one succeeds. This acts like an outer case-expression for various
- alternative state transitions. For example, \isakeyword{qed} acts
- differently for a local proofs vs.\ the global ending of the main
- proof.
-
- Toplevel transitions are composed via transition transformers.
- Internally, Isar commands are put together from an empty transition
- extended by name and source position (and optional source text). It
- is then left to the individual command parser to turn the given
- concrete syntax into a suitable transition transformer that adjoin
- actual operations on a theory or proof state etc.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{Toplevel.print}\verb|Toplevel.print: Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.no\_timing}\verb|Toplevel.no_timing: Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.keep}\verb|Toplevel.keep: (Toplevel.state -> unit) ->|\isasep\isanewline%
-\verb| Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.theory}\verb|Toplevel.theory: (theory -> theory) ->|\isasep\isanewline%
-\verb| Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.theory\_to\_proof}\verb|Toplevel.theory_to_proof: (theory -> Proof.state) ->|\isasep\isanewline%
-\verb| Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.proof}\verb|Toplevel.proof: (Proof.state -> Proof.state) ->|\isasep\isanewline%
-\verb| Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.proofs}\verb|Toplevel.proofs: (Proof.state -> Proof.state Seq.seq) ->|\isasep\isanewline%
-\verb| Toplevel.transition -> Toplevel.transition| \\
- \indexml{Toplevel.end\_proof}\verb|Toplevel.end_proof: (bool -> Proof.state -> Proof.context) ->|\isasep\isanewline%
-\verb| Toplevel.transition -> Toplevel.transition| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Toplevel.print|~\isa{tr} sets the print flag, which
- causes the toplevel loop to echo the result state (in interactive
- mode).
-
- \item \verb|Toplevel.no_timing|~\isa{tr} indicates that the
- transition should never show timing information, e.g.\ because it is
- a diagnostic command.
-
- \item \verb|Toplevel.keep|~\isa{tr} adjoins a diagnostic
- function.
-
- \item \verb|Toplevel.theory|~\isa{tr} adjoins a theory
- transformer.
-
- \item \verb|Toplevel.theory_to_proof|~\isa{tr} adjoins a global
- goal function, which turns a theory into a proof state. The theory
- may be changed before entering the proof; the generic Isar goal
- setup includes an argument that specifies how to apply the proven
- result to the theory, when the proof is finished.
-
- \item \verb|Toplevel.proof|~\isa{tr} adjoins a deterministic
- proof command, with a singleton result.
-
- \item \verb|Toplevel.proofs|~\isa{tr} adjoins a general proof
- command, with zero or more result states (represented as a lazy
- list).
-
- \item \verb|Toplevel.end_proof|~\isa{tr} adjoins a concluding
- proof command, that returns the resulting theory, after storing the
- resulting facts in the context etc.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Toplevel control%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-There are a few special control commands that modify the behavior
- the toplevel itself, and only make sense in interactive mode. Under
- normal circumstances, the user encounters these only implicitly as
- part of the protocol between the Isabelle/Isar system and a
- user-interface such as ProofGeneral.
-
- \begin{description}
-
- \item \isacommand{undo} follows the three-level hierarchy of empty
- toplevel vs.\ theory vs.\ proof: undo within a proof reverts to the
- previous proof context, undo after a proof reverts to the theory
- before the initial goal statement, undo of a theory command reverts
- to the previous theory value, undo of a theory header discontinues
- the current theory development and removes it from the theory
- database (\secref{sec:theory-database}).
-
- \item \isacommand{kill} aborts the current level of development:
- kill in a proof context reverts to the theory before the initial
- goal statement, kill in a theory context aborts the current theory
- development, removing it from the database.
-
- \item \isacommand{exit} drops out of the Isar toplevel into the
- underlying {\ML} toplevel (\secref{sec:ML-toplevel}). The Isar
- toplevel state is preserved and may be continued later.
-
- \item \isacommand{quit} terminates the Isabelle/Isar process without
- saving.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{ML toplevel \label{sec:ML-toplevel}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The {\ML} toplevel provides a read-compile-eval-print loop for {\ML}
- values, types, structures, and functors. {\ML} declarations operate
- on the global system state, which consists of the compiler
- environment plus the values of {\ML} reference variables. There is
- no clean way to undo {\ML} declarations, except for reverting to a
- previously saved state of the whole Isabelle process. {\ML} input
- is either read interactively from a TTY, or from a string (usually
- within a theory text), or from a source file (usually loaded from a
- theory).
-
- Whenever the {\ML} toplevel is active, the current Isabelle theory
- context is passed as an internal reference variable. Thus {\ML}
- code may access the theory context during compilation, it may even
- change the value of a theory being under construction --- while
- observing the usual linearity restrictions
- (cf.~\secref{sec:context-theory}).%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{the\_context}\verb|the_context: unit -> theory| \\
- \indexml{Context.$>$$>$ }\verb|Context.>> : (Context.generic -> Context.generic) -> unit| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|the_context ()| refers to the theory context of the
- {\ML} toplevel --- at compile time! {\ML} code needs to take care
- to refer to \verb|the_context ()| correctly. Recall that
- evaluation of a function body is delayed until actual runtime.
- Moreover, persistent {\ML} toplevel bindings to an unfinished theory
- should be avoided: code should either project out the desired
- information immediately, or produce an explicit \verb|theory_ref| (cf.\ \secref{sec:context-theory}).
-
- \item \verb|Context.>>|~\isa{f} applies context transformation
- \isa{f} to the implicit context of the {\ML} toplevel.
-
- \end{description}
-
- It is very important to note that the above functions are really
- restricted to the compile time, even though the {\ML} compiler is
- invoked at runtime! The majority of {\ML} code uses explicit
- functional arguments of a theory or proof context instead. Thus it
- may be invoked for an arbitrary context later on, without having to
- worry about any operational details.
-
- \bigskip
-
- \begin{mldecls}
- \indexml{Isar.main}\verb|Isar.main: unit -> unit| \\
- \indexml{Isar.loop}\verb|Isar.loop: unit -> unit| \\
- \indexml{Isar.state}\verb|Isar.state: unit -> Toplevel.state| \\
- \indexml{Isar.exn}\verb|Isar.exn: unit -> (exn * string) option| \\
- \indexml{Isar.context}\verb|Isar.context: unit -> Proof.context| \\
- \indexml{Isar.goal}\verb|Isar.goal: unit -> thm| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Isar.main ()| invokes the Isar toplevel from {\ML},
- initializing an empty toplevel state.
-
- \item \verb|Isar.loop ()| continues the Isar toplevel with the
- current state, after having dropped out of the Isar toplevel loop.
-
- \item \verb|Isar.state ()| and \verb|Isar.exn ()| get current
- toplevel state and error condition, respectively. This only works
- after having dropped out of the Isar toplevel loop.
-
- \item \verb|Isar.context ()| produces the proof context from \verb|Isar.state ()|, analogous to \verb|Context.proof_of|
- (\secref{sec:generic-context}).
-
- \item \verb|Isar.goal ()| picks the tactical goal from \verb|Isar.state ()|, represented as a theorem according to
- \secref{sec:tactical-goals}.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Theory database \label{sec:theory-database}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The theory database maintains a collection of theories, together
- with some administrative information about their original sources,
- which are held in an external store (i.e.\ some directory within the
- regular file system).
-
- The theory database is organized as a directed acyclic graph;
- entries are referenced by theory name. Although some additional
- interfaces allow to include a directory specification as well, this
- is only a hint to the underlying theory loader. The internal theory
- name space is flat!
-
- Theory \isa{A} is associated with the main theory file \isa{A}\verb,.thy,, which needs to be accessible through the theory
- loader path. Any number of additional {\ML} source files may be
- associated with each theory, by declaring these dependencies in the
- theory header as \isa{{\isasymUSES}}, and loading them consecutively
- within the theory context. The system keeps track of incoming {\ML}
- sources and associates them with the current theory. The file
- \isa{A}\verb,.ML, is loaded after a theory has been concluded, in
- order to support legacy proof {\ML} proof scripts.
-
- The basic internal actions of the theory database are \isa{update}, \isa{outdate}, and \isa{remove}:
-
- \begin{itemize}
-
- \item \isa{update\ A} introduces a link of \isa{A} with a
- \isa{theory} value of the same name; it asserts that the theory
- sources are now consistent with that value;
-
- \item \isa{outdate\ A} invalidates the link of a theory database
- entry to its sources, but retains the present theory value;
-
- \item \isa{remove\ A} deletes entry \isa{A} from the theory
- database.
-
- \end{itemize}
-
- These actions are propagated to sub- or super-graphs of a theory
- entry as expected, in order to preserve global consistency of the
- state of all loaded theories with the sources of the external store.
- This implies certain causalities between actions: \isa{update}
- or \isa{outdate} of an entry will \isa{outdate} all
- descendants; \isa{remove} will \isa{remove} all descendants.
-
- \medskip There are separate user-level interfaces to operate on the
- theory database directly or indirectly. The primitive actions then
- just happen automatically while working with the system. In
- particular, processing a theory header \isa{{\isasymTHEORY}\ A\ {\isasymIMPORTS}\ B\isactrlsub {\isadigit{1}}\ {\isasymdots}\ B\isactrlsub n\ {\isasymBEGIN}} ensures that the
- sub-graph of the collective imports \isa{B\isactrlsub {\isadigit{1}}\ {\isasymdots}\ B\isactrlsub n}
- is up-to-date, too. Earlier theories are reloaded as required, with
- \isa{update} actions proceeding in topological order according to
- theory dependencies. There may be also a wave of implied \isa{outdate} actions for derived theory nodes until a stable situation
- is achieved eventually.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{theory}\verb|theory: string -> theory| \\
- \indexml{use\_thy}\verb|use_thy: string -> unit| \\
- \indexml{use\_thys}\verb|use_thys: string list -> unit| \\
- \indexml{ThyInfo.touch\_thy}\verb|ThyInfo.touch_thy: string -> unit| \\
- \indexml{ThyInfo.remove\_thy}\verb|ThyInfo.remove_thy: string -> unit| \\[1ex]
- \indexml{ThyInfo.begin\_theory}\verb|ThyInfo.begin_theory|\verb|: ... -> bool -> theory| \\
- \indexml{ThyInfo.end\_theory}\verb|ThyInfo.end_theory: theory -> unit| \\
- \indexml{ThyInfo.register\_theory}\verb|ThyInfo.register_theory: theory -> unit| \\[1ex]
- \verb|datatype action = Update |\verb,|,\verb| Outdate |\verb,|,\verb| Remove| \\
- \indexml{ThyInfo.add\_hook}\verb|ThyInfo.add_hook: (ThyInfo.action -> string -> unit) -> unit| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|theory|~\isa{A} retrieves the theory value presently
- associated with name \isa{A}. Note that the result might be
- outdated.
-
- \item \verb|use_thy|~\isa{A} ensures that theory \isa{A} is fully
- up-to-date wrt.\ the external file store, reloading outdated
- ancestors as required.
-
- \item \verb|use_thys| is similar to \verb|use_thy|, but handles
- several theories simultaneously. Thus it acts like processing the
- import header of a theory, without performing the merge of the
- result, though.
-
- \item \verb|ThyInfo.touch_thy|~\isa{A} performs and \isa{outdate} action
- on theory \isa{A} and all descendants.
-
- \item \verb|ThyInfo.remove_thy|~\isa{A} deletes theory \isa{A} and all
- descendants from the theory database.
-
- \item \verb|ThyInfo.begin_theory| is the basic operation behind a
- \isa{{\isasymTHEORY}} header declaration. This is {\ML} functions is
- normally not invoked directly.
-
- \item \verb|ThyInfo.end_theory| concludes the loading of a theory
- proper and stores the result in the theory database.
-
- \item \verb|ThyInfo.register_theory|~\isa{text\ thy} registers an
- existing theory value with the theory loader database. There is no
- management of associated sources.
-
- \item \verb|ThyInfo.add_hook|~\isa{f} registers function \isa{f} as a hook for theory database actions. The function will be
- invoked with the action and theory name being involved; thus derived
- actions may be performed in associated system components, e.g.\
- maintaining the state of an editor for the theory sources.
-
- The kind and order of actions occurring in practice depends both on
- user interactions and the internal process of resolving theory
- imports. Hooks should not rely on a particular policy here! Any
- exceptions raised by the hook are ignored.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/isar.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{isar}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ isar\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{Isar proof texts%
-}
-\isamarkuptrue%
-%
-\isamarkupsection{Proof context%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Proof state \label{sec:isar-proof-state}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME
-
-\glossary{Proof state}{The whole configuration of a structured proof,
-consisting of a \seeglossary{proof context} and an optional
-\seeglossary{structured goal}. Internally, an Isar proof state is
-organized as a stack to accomodate block structure of proof texts.
-For historical reasons, a low-level \seeglossary{tactical goal} is
-occasionally called ``proof state'' as well.}
-
-\glossary{Structured goal}{FIXME}
-
-\glossary{Goal}{See \seeglossary{tactical goal} or \seeglossary{structured goal}. \norefpage}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Proof methods%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Attributes%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME ?!%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/locale.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{locale}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ {\isachardoublequoteopen}locale{\isachardoublequoteclose}\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{Structured specifications%
-}
-\isamarkuptrue%
-%
-\isamarkupsection{Specification elements%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Type-inference%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Local theories%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME
-
- \glossary{Local theory}{FIXME}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/logic.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,886 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{logic}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ logic\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{Primitive logic \label{ch:logic}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The logical foundations of Isabelle/Isar are that of the Pure logic,
- which has been introduced as a natural-deduction framework in
- \cite{paulson700}. This is essentially the same logic as ``\isa{{\isasymlambda}HOL}'' in the more abstract setting of Pure Type Systems (PTS)
- \cite{Barendregt-Geuvers:2001}, although there are some key
- differences in the specific treatment of simple types in
- Isabelle/Pure.
-
- Following type-theoretic parlance, the Pure logic consists of three
- levels of \isa{{\isasymlambda}}-calculus with corresponding arrows, \isa{{\isasymRightarrow}} for syntactic function space (terms depending on terms), \isa{{\isasymAnd}} for universal quantification (proofs depending on terms), and
- \isa{{\isasymLongrightarrow}} for implication (proofs depending on proofs).
-
- Derivations are relative to a logical theory, which declares type
- constructors, constants, and axioms. Theory declarations support
- schematic polymorphism, which is strictly speaking outside the
- logic.\footnote{This is the deeper logical reason, why the theory
- context \isa{{\isasymTheta}} is separate from the proof context \isa{{\isasymGamma}}
- of the core calculus.}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Types \label{sec:types}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The language of types is an uninterpreted order-sorted first-order
- algebra; types are qualified by ordered type classes.
-
- \medskip A \emph{type class} is an abstract syntactic entity
- declared in the theory context. The \emph{subclass relation} \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}} is specified by stating an acyclic
- generating relation; the transitive closure is maintained
- internally. The resulting relation is an ordering: reflexive,
- transitive, and antisymmetric.
-
- A \emph{sort} is a list of type classes written as \isa{s\ {\isacharequal}\ {\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub m{\isacharbraceright}}, which represents symbolic
- intersection. Notationally, the curly braces are omitted for
- singleton intersections, i.e.\ any class \isa{c} may be read as
- a sort \isa{{\isacharbraceleft}c{\isacharbraceright}}. The ordering on type classes is extended to
- sorts according to the meaning of intersections: \isa{{\isacharbraceleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}\ c\isactrlisub m{\isacharbraceright}\ {\isasymsubseteq}\ {\isacharbraceleft}d\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ d\isactrlisub n{\isacharbraceright}} iff
- \isa{{\isasymforall}j{\isachardot}\ {\isasymexists}i{\isachardot}\ c\isactrlisub i\ {\isasymsubseteq}\ d\isactrlisub j}. The empty intersection
- \isa{{\isacharbraceleft}{\isacharbraceright}} refers to the universal sort, which is the largest
- element wrt.\ the sort order. The intersections of all (finitely
- many) classes declared in the current theory are the minimal
- elements wrt.\ the sort order.
-
- \medskip A \emph{fixed type variable} is a pair of a basic name
- (starting with a \isa{{\isacharprime}} character) and a sort constraint, e.g.\
- \isa{{\isacharparenleft}{\isacharprime}a{\isacharcomma}\ s{\isacharparenright}} which is usually printed as \isa{{\isasymalpha}\isactrlisub s}.
- A \emph{schematic type variable} is a pair of an indexname and a
- sort constraint, e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}{\isacharprime}a{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ s{\isacharparenright}} which is usually
- printed as \isa{{\isacharquery}{\isasymalpha}\isactrlisub s}.
-
- Note that \emph{all} syntactic components contribute to the identity
- of type variables, including the sort constraint. The core logic
- handles type variables with the same name but different sorts as
- different, although some outer layers of the system make it hard to
- produce anything like this.
-
- A \emph{type constructor} \isa{{\isasymkappa}} is a \isa{k}-ary operator
- on types declared in the theory. Type constructor application is
- written postfix as \isa{{\isacharparenleft}{\isasymalpha}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlisub k{\isacharparenright}{\isasymkappa}}. For
- \isa{k\ {\isacharequal}\ {\isadigit{0}}} the argument tuple is omitted, e.g.\ \isa{prop}
- instead of \isa{{\isacharparenleft}{\isacharparenright}prop}. For \isa{k\ {\isacharequal}\ {\isadigit{1}}} the parentheses
- are omitted, e.g.\ \isa{{\isasymalpha}\ list} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharparenright}list}.
- Further notation is provided for specific constructors, notably the
- right-associative infix \isa{{\isasymalpha}\ {\isasymRightarrow}\ {\isasymbeta}} instead of \isa{{\isacharparenleft}{\isasymalpha}{\isacharcomma}\ {\isasymbeta}{\isacharparenright}fun}.
-
- A \emph{type} is defined inductively over type variables and type
- constructors as follows: \isa{{\isasymtau}\ {\isacharequal}\ {\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharquery}{\isasymalpha}\isactrlisub s\ {\isacharbar}\ {\isacharparenleft}{\isasymtau}\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlsub k{\isacharparenright}{\isasymkappa}}.
-
- A \emph{type abbreviation} is a syntactic definition \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} of an arbitrary type expression \isa{{\isasymtau}} over
- variables \isa{\isactrlvec {\isasymalpha}}. Type abbreviations appear as type
- constructors in the syntax, but are expanded before entering the
- logical core.
-
- A \emph{type arity} declares the image behavior of a type
- constructor wrt.\ the algebra of sorts: \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ s\isactrlisub k{\isacharparenright}s} means that \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub k{\isacharparenright}{\isasymkappa}} is
- of sort \isa{s} if every argument type \isa{{\isasymtau}\isactrlisub i} is
- of sort \isa{s\isactrlisub i}. Arity declarations are implicitly
- completed, i.e.\ \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}c} entails \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}c{\isacharprime}} for any \isa{c{\isacharprime}\ {\isasymsupseteq}\ c}.
-
- \medskip The sort algebra is always maintained as \emph{coregular},
- which means that type arities are consistent with the subclass
- relation: for any type constructor \isa{{\isasymkappa}}, and classes \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}}, and arities \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s\isactrlisub {\isadigit{1}}{\isacharparenright}c\isactrlisub {\isadigit{1}}} and \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s\isactrlisub {\isadigit{2}}{\isacharparenright}c\isactrlisub {\isadigit{2}}} holds \isa{\isactrlvec s\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ \isactrlvec s\isactrlisub {\isadigit{2}}} component-wise.
-
- The key property of a coregular order-sorted algebra is that sort
- constraints can be solved in a most general fashion: for each type
- constructor \isa{{\isasymkappa}} and sort \isa{s} there is a most general
- vector of argument sorts \isa{{\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ s\isactrlisub k{\isacharparenright}} such
- that a type scheme \isa{{\isacharparenleft}{\isasymalpha}\isactrlbsub s\isactrlisub {\isadigit{1}}\isactrlesub {\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymalpha}\isactrlbsub s\isactrlisub k\isactrlesub {\isacharparenright}{\isasymkappa}} is of sort \isa{s}.
- Consequently, type unification has most general solutions (modulo
- equivalence of sorts), so type-inference produces primary types as
- expected \cite{nipkow-prehofer}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{class}\verb|type class| \\
- \indexmltype{sort}\verb|type sort| \\
- \indexmltype{arity}\verb|type arity| \\
- \indexmltype{typ}\verb|type typ| \\
- \indexml{map\_atyps}\verb|map_atyps: (typ -> typ) -> typ -> typ| \\
- \indexml{fold\_atyps}\verb|fold_atyps: (typ -> 'a -> 'a) -> typ -> 'a -> 'a| \\
- \end{mldecls}
- \begin{mldecls}
- \indexml{Sign.subsort}\verb|Sign.subsort: theory -> sort * sort -> bool| \\
- \indexml{Sign.of\_sort}\verb|Sign.of_sort: theory -> typ * sort -> bool| \\
- \indexml{Sign.add\_types}\verb|Sign.add_types: (string * int * mixfix) list -> theory -> theory| \\
- \indexml{Sign.add\_tyabbrs\_i}\verb|Sign.add_tyabbrs_i: |\isasep\isanewline%
-\verb| (string * string list * typ * mixfix) list -> theory -> theory| \\
- \indexml{Sign.primitive\_class}\verb|Sign.primitive_class: string * class list -> theory -> theory| \\
- \indexml{Sign.primitive\_classrel}\verb|Sign.primitive_classrel: class * class -> theory -> theory| \\
- \indexml{Sign.primitive\_arity}\verb|Sign.primitive_arity: arity -> theory -> theory| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|class| represents type classes; this is an alias for
- \verb|string|.
-
- \item \verb|sort| represents sorts; this is an alias for
- \verb|class list|.
-
- \item \verb|arity| represents type arities; this is an alias for
- triples of the form \isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} for \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s} described above.
-
- \item \verb|typ| represents types; this is a datatype with
- constructors \verb|TFree|, \verb|TVar|, \verb|Type|.
-
- \item \verb|map_atyps|~\isa{f\ {\isasymtau}} applies the mapping \isa{f}
- to all atomic types (\verb|TFree|, \verb|TVar|) occurring in \isa{{\isasymtau}}.
-
- \item \verb|fold_atyps|~\isa{f\ {\isasymtau}} iterates the operation \isa{f} over all occurrences of atomic types (\verb|TFree|, \verb|TVar|)
- in \isa{{\isasymtau}}; the type structure is traversed from left to right.
-
- \item \verb|Sign.subsort|~\isa{thy\ {\isacharparenleft}s\isactrlisub {\isadigit{1}}{\isacharcomma}\ s\isactrlisub {\isadigit{2}}{\isacharparenright}}
- tests the subsort relation \isa{s\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ s\isactrlisub {\isadigit{2}}}.
-
- \item \verb|Sign.of_sort|~\isa{thy\ {\isacharparenleft}{\isasymtau}{\isacharcomma}\ s{\isacharparenright}} tests whether type
- \isa{{\isasymtau}} is of sort \isa{s}.
-
- \item \verb|Sign.add_types|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ k{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} declares a new
- type constructors \isa{{\isasymkappa}} with \isa{k} arguments and
- optional mixfix syntax.
-
- \item \verb|Sign.add_tyabbrs_i|~\isa{{\isacharbrackleft}{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec {\isasymalpha}{\isacharcomma}\ {\isasymtau}{\isacharcomma}\ mx{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}}
- defines a new type abbreviation \isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}\ {\isacharequal}\ {\isasymtau}} with
- optional mixfix syntax.
-
- \item \verb|Sign.primitive_class|~\isa{{\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ c\isactrlisub n{\isacharbrackright}{\isacharparenright}} declares a new class \isa{c}, together with class
- relations \isa{c\ {\isasymsubseteq}\ c\isactrlisub i}, for \isa{i\ {\isacharequal}\ {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ n}.
-
- \item \verb|Sign.primitive_classrel|~\isa{{\isacharparenleft}c\isactrlisub {\isadigit{1}}{\isacharcomma}\ c\isactrlisub {\isadigit{2}}{\isacharparenright}} declares the class relation \isa{c\isactrlisub {\isadigit{1}}\ {\isasymsubseteq}\ c\isactrlisub {\isadigit{2}}}.
-
- \item \verb|Sign.primitive_arity|~\isa{{\isacharparenleft}{\isasymkappa}{\isacharcomma}\ \isactrlvec s{\isacharcomma}\ s{\isacharparenright}} declares
- the arity \isa{{\isasymkappa}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}\isactrlvec s{\isacharparenright}s}.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Terms \label{sec:terms}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\glossary{Term}{FIXME}
-
- The language of terms is that of simply-typed \isa{{\isasymlambda}}-calculus
- with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
- or \cite{paulson-ml2}), with the types being determined determined
- by the corresponding binders. In contrast, free variables and
- constants are have an explicit name and type in each occurrence.
-
- \medskip A \emph{bound variable} is a natural number \isa{b},
- which accounts for the number of intermediate binders between the
- variable occurrence in the body and its binding position. For
- example, the de-Bruijn term \isa{{\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}\isactrlbsub nat\isactrlesub {\isachardot}\ {\isadigit{1}}\ {\isacharplus}\ {\isadigit{0}}} would
- correspond to \isa{{\isasymlambda}x\isactrlbsub nat\isactrlesub {\isachardot}\ {\isasymlambda}y\isactrlbsub nat\isactrlesub {\isachardot}\ x\ {\isacharplus}\ y} in a named
- representation. Note that a bound variable may be represented by
- different de-Bruijn indices at different occurrences, depending on
- the nesting of abstractions.
-
- A \emph{loose variable} is a bound variable that is outside the
- scope of local binders. The types (and names) for loose variables
- can be managed as a separate context, that is maintained as a stack
- of hypothetical binders. The core logic operates on closed terms,
- without any loose variables.
-
- A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
- \isa{{\isacharparenleft}x{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed \isa{x\isactrlisub {\isasymtau}}. A
- \emph{schematic variable} is a pair of an indexname and a type,
- e.g.\ \isa{{\isacharparenleft}{\isacharparenleft}x{\isacharcomma}\ {\isadigit{0}}{\isacharparenright}{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{{\isacharquery}x\isactrlisub {\isasymtau}}.
-
- \medskip A \emph{constant} is a pair of a basic name and a type,
- e.g.\ \isa{{\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} which is usually printed as \isa{c\isactrlisub {\isasymtau}}. Constants are declared in the context as polymorphic
- families \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}, meaning that all substitution instances
- \isa{c\isactrlisub {\isasymtau}} for \isa{{\isasymtau}\ {\isacharequal}\ {\isasymsigma}{\isasymvartheta}} are valid.
-
- The vector of \emph{type arguments} of constant \isa{c\isactrlisub {\isasymtau}}
- wrt.\ the declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is defined as the codomain of
- the matcher \isa{{\isasymvartheta}\ {\isacharequal}\ {\isacharbraceleft}{\isacharquery}{\isasymalpha}\isactrlisub {\isadigit{1}}\ {\isasymmapsto}\ {\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isacharquery}{\isasymalpha}\isactrlisub n\ {\isasymmapsto}\ {\isasymtau}\isactrlisub n{\isacharbraceright}} presented in canonical order \isa{{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}}. Within a given theory context,
- there is a one-to-one correspondence between any constant \isa{c\isactrlisub {\isasymtau}} and the application \isa{c{\isacharparenleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharparenright}} of its type arguments. For example, with \isa{plus\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}}, the instance \isa{plus\isactrlbsub nat\ {\isasymRightarrow}\ nat\ {\isasymRightarrow}\ nat\isactrlesub } corresponds to \isa{plus{\isacharparenleft}nat{\isacharparenright}}.
-
- Constant declarations \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} may contain sort constraints
- for type variables in \isa{{\isasymsigma}}. These are observed by
- type-inference as expected, but \emph{ignored} by the core logic.
- This means the primitive logic is able to reason with instances of
- polymorphic constants that the user-level type-checker would reject
- due to violation of type class restrictions.
-
- \medskip An \emph{atomic} term is either a variable or constant. A
- \emph{term} is defined inductively over atomic terms, with
- abstraction and application as follows: \isa{t\ {\isacharequal}\ b\ {\isacharbar}\ x\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isacharquery}x\isactrlisub {\isasymtau}\ {\isacharbar}\ c\isactrlisub {\isasymtau}\ {\isacharbar}\ {\isasymlambda}\isactrlisub {\isasymtau}{\isachardot}\ t\ {\isacharbar}\ t\isactrlisub {\isadigit{1}}\ t\isactrlisub {\isadigit{2}}}.
- Parsing and printing takes care of converting between an external
- representation with named bound variables. Subsequently, we shall
- use the latter notation instead of internal de-Bruijn
- representation.
-
- The inductive relation \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} assigns a (unique) type to a
- term according to the structure of atomic terms, abstractions, and
- applicatins:
- \[
- \infer{\isa{a\isactrlisub {\isasymtau}\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}}}{}
- \qquad
- \infer{\isa{{\isacharparenleft}{\isasymlambda}x\isactrlsub {\isasymtau}{\isachardot}\ t{\isacharparenright}\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymsigma}}}{\isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}}
- \qquad
- \infer{\isa{t\ u\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}}{\isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}\ {\isasymRightarrow}\ {\isasymsigma}} & \isa{u\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}}}
- \]
- A \emph{well-typed term} is a term that can be typed according to these rules.
-
- Typing information can be omitted: type-inference is able to
- reconstruct the most general type of a raw term, while assigning
- most general types to all of its variables and constants.
- Type-inference depends on a context of type constraints for fixed
- variables, and declarations for polymorphic constants.
-
- The identity of atomic terms consists both of the name and the type
- component. This means that different variables \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{1}}\isactrlesub } and \isa{x\isactrlbsub {\isasymtau}\isactrlisub {\isadigit{2}}\isactrlesub } may become the same after type
- instantiation. Some outer layers of the system make it hard to
- produce variables of the same name, but different types. In
- contrast, mixed instances of polymorphic constants occur frequently.
-
- \medskip The \emph{hidden polymorphism} of a term \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}}
- is the set of type variables occurring in \isa{t}, but not in
- \isa{{\isasymsigma}}. This means that the term implicitly depends on type
- arguments that are not accounted in the result type, i.e.\ there are
- different type instances \isa{t{\isasymvartheta}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} and \isa{t{\isasymvartheta}{\isacharprime}\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with the same type. This slightly
- pathological situation notoriously demands additional care.
-
- \medskip A \emph{term abbreviation} is a syntactic definition \isa{c\isactrlisub {\isasymsigma}\ {\isasymequiv}\ t} of a closed term \isa{t} of type \isa{{\isasymsigma}},
- without any hidden polymorphism. A term abbreviation looks like a
- constant in the syntax, but is expanded before entering the logical
- core. Abbreviations are usually reverted when printing terms, using
- \isa{t\ {\isasymrightarrow}\ c\isactrlisub {\isasymsigma}} as rules for higher-order rewriting.
-
- \medskip Canonical operations on \isa{{\isasymlambda}}-terms include \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion: \isa{{\isasymalpha}}-conversion refers to capture-free
- renaming of bound variables; \isa{{\isasymbeta}}-conversion contracts an
- abstraction applied to an argument term, substituting the argument
- in the body: \isa{{\isacharparenleft}{\isasymlambda}x{\isachardot}\ b{\isacharparenright}a} becomes \isa{b{\isacharbrackleft}a{\isacharslash}x{\isacharbrackright}}; \isa{{\isasymeta}}-conversion contracts vacuous application-abstraction: \isa{{\isasymlambda}x{\isachardot}\ f\ x} becomes \isa{f}, provided that the bound variable
- does not occur in \isa{f}.
-
- Terms are normally treated modulo \isa{{\isasymalpha}}-conversion, which is
- implicit in the de-Bruijn representation. Names for bound variables
- in abstractions are maintained separately as (meaningless) comments,
- mostly for parsing and printing. Full \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion is
- commonplace in various standard operations (\secref{sec:obj-rules})
- that are based on higher-order unification and matching.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{term}\verb|type term| \\
- \indexml{op aconv}\verb|op aconv: term * term -> bool| \\
- \indexml{map\_types}\verb|map_types: (typ -> typ) -> term -> term| \\
- \indexml{fold\_types}\verb|fold_types: (typ -> 'a -> 'a) -> term -> 'a -> 'a| \\
- \indexml{map\_aterms}\verb|map_aterms: (term -> term) -> term -> term| \\
- \indexml{fold\_aterms}\verb|fold_aterms: (term -> 'a -> 'a) -> term -> 'a -> 'a| \\
- \end{mldecls}
- \begin{mldecls}
- \indexml{fastype\_of}\verb|fastype_of: term -> typ| \\
- \indexml{lambda}\verb|lambda: term -> term -> term| \\
- \indexml{betapply}\verb|betapply: term * term -> term| \\
- \indexml{Sign.declare\_const}\verb|Sign.declare_const: Properties.T -> (binding * typ) * mixfix ->|\isasep\isanewline%
-\verb| theory -> term * theory| \\
- \indexml{Sign.add\_abbrev}\verb|Sign.add_abbrev: string -> Properties.T -> binding * term ->|\isasep\isanewline%
-\verb| theory -> (term * term) * theory| \\
- \indexml{Sign.const\_typargs}\verb|Sign.const_typargs: theory -> string * typ -> typ list| \\
- \indexml{Sign.const\_instance}\verb|Sign.const_instance: theory -> string * typ list -> typ| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|term| represents de-Bruijn terms, with comments in
- abstractions, and explicitly named free variables and constants;
- this is a datatype with constructors \verb|Bound|, \verb|Free|, \verb|Var|, \verb|Const|, \verb|Abs|, \verb|op $|.
-
- \item \isa{t}~\verb|aconv|~\isa{u} checks \isa{{\isasymalpha}}-equivalence of two terms. This is the basic equality relation
- on type \verb|term|; raw datatype equality should only be used
- for operations related to parsing or printing!
-
- \item \verb|map_types|~\isa{f\ t} applies the mapping \isa{f} to all types occurring in \isa{t}.
-
- \item \verb|fold_types|~\isa{f\ t} iterates the operation \isa{f} over all occurrences of types in \isa{t}; the term
- structure is traversed from left to right.
-
- \item \verb|map_aterms|~\isa{f\ t} applies the mapping \isa{f}
- to all atomic terms (\verb|Bound|, \verb|Free|, \verb|Var|, \verb|Const|) occurring in \isa{t}.
-
- \item \verb|fold_aterms|~\isa{f\ t} iterates the operation \isa{f} over all occurrences of atomic terms (\verb|Bound|, \verb|Free|,
- \verb|Var|, \verb|Const|) in \isa{t}; the term structure is
- traversed from left to right.
-
- \item \verb|fastype_of|~\isa{t} determines the type of a
- well-typed term. This operation is relatively slow, despite the
- omission of any sanity checks.
-
- \item \verb|lambda|~\isa{a\ b} produces an abstraction \isa{{\isasymlambda}a{\isachardot}\ b}, where occurrences of the atomic term \isa{a} in the
- body \isa{b} are replaced by bound variables.
-
- \item \verb|betapply|~\isa{{\isacharparenleft}t{\isacharcomma}\ u{\isacharparenright}} produces an application \isa{t\ u}, with topmost \isa{{\isasymbeta}}-conversion if \isa{t} is an
- abstraction.
-
- \item \verb|Sign.declare_const|~\isa{properties\ {\isacharparenleft}{\isacharparenleft}c{\isacharcomma}\ {\isasymsigma}{\isacharparenright}{\isacharcomma}\ mx{\isacharparenright}}
- declares a new constant \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} with optional mixfix
- syntax.
-
- \item \verb|Sign.add_abbrev|~\isa{print{\isacharunderscore}mode\ properties\ {\isacharparenleft}c{\isacharcomma}\ t{\isacharparenright}}
- introduces a new term abbreviation \isa{c\ {\isasymequiv}\ t}.
-
- \item \verb|Sign.const_typargs|~\isa{thy\ {\isacharparenleft}c{\isacharcomma}\ {\isasymtau}{\isacharparenright}} and \verb|Sign.const_instance|~\isa{thy\ {\isacharparenleft}c{\isacharcomma}\ {\isacharbrackleft}{\isasymtau}\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ {\isasymtau}\isactrlisub n{\isacharbrackright}{\isacharparenright}}
- convert between two representations of polymorphic constants: full
- type instance vs.\ compact type arguments form.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Theorems \label{sec:thms}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\glossary{Proposition}{FIXME A \seeglossary{term} of
- \seeglossary{type} \isa{prop}. Internally, there is nothing
- special about propositions apart from their type, but the concrete
- syntax enforces a clear distinction. Propositions are structured
- via implication \isa{A\ {\isasymLongrightarrow}\ B} or universal quantification \isa{{\isasymAnd}x{\isachardot}\ B\ x} --- anything else is considered atomic. The canonical
- form for propositions is that of a \seeglossary{Hereditary Harrop
- Formula}. FIXME}
-
- \glossary{Theorem}{A proven proposition within a certain theory and
- proof context, formally \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}}; both contexts are
- rarely spelled out explicitly. Theorems are usually normalized
- according to the \seeglossary{HHF} format. FIXME}
-
- \glossary{Fact}{Sometimes used interchangeably for
- \seeglossary{theorem}. Strictly speaking, a list of theorems,
- essentially an extra-logical conjunction. Facts emerge either as
- local assumptions, or as results of local goal statements --- both
- may be simultaneous, hence the list representation. FIXME}
-
- \glossary{Schematic variable}{FIXME}
-
- \glossary{Fixed variable}{A variable that is bound within a certain
- proof context; an arbitrary-but-fixed entity within a portion of
- proof text. FIXME}
-
- \glossary{Free variable}{Synonymous for \seeglossary{fixed
- variable}. FIXME}
-
- \glossary{Bound variable}{FIXME}
-
- \glossary{Variable}{See \seeglossary{schematic variable},
- \seeglossary{fixed variable}, \seeglossary{bound variable}, or
- \seeglossary{type variable}. The distinguishing feature of
- different variables is their binding scope. FIXME}
-
- A \emph{proposition} is a well-typed term of type \isa{prop}, a
- \emph{theorem} is a proven proposition (depending on a context of
- hypotheses and the background theory). Primitive inferences include
- plain natural deduction rules for the primary connectives \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} of the framework. There is also a builtin
- notion of equality/equivalence \isa{{\isasymequiv}}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Primitive connectives and rules \label{sec:prim-rules}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The theory \isa{Pure} contains constant declarations for the
- primitive connectives \isa{{\isasymAnd}}, \isa{{\isasymLongrightarrow}}, and \isa{{\isasymequiv}} of
- the logical framework, see \figref{fig:pure-connectives}. The
- derivability judgment \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n\ {\isasymturnstile}\ B} is
- defined inductively by the primitive inferences given in
- \figref{fig:prim-rules}, with the global restriction that the
- hypotheses must \emph{not} contain any schematic variables. The
- builtin equality is conceptually axiomatized as shown in
- \figref{fig:pure-equality}, although the implementation works
- directly with derived inferences.
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{ll}
- \isa{all\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}{\isasymalpha}\ {\isasymRightarrow}\ prop{\isacharparenright}\ {\isasymRightarrow}\ prop} & universal quantification (binder \isa{{\isasymAnd}}) \\
- \isa{{\isasymLongrightarrow}\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & implication (right associative infix) \\
- \isa{{\isasymequiv}\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & equality relation (infix) \\
- \end{tabular}
- \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
- \end{center}
- \end{figure}
-
- \begin{figure}[htb]
- \begin{center}
- \[
- \infer[\isa{{\isacharparenleft}axiom{\isacharparenright}}]{\isa{{\isasymturnstile}\ A}}{\isa{A\ {\isasymin}\ {\isasymTheta}}}
- \qquad
- \infer[\isa{{\isacharparenleft}assume{\isacharparenright}}]{\isa{A\ {\isasymturnstile}\ A}}{}
- \]
- \[
- \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}}
- \qquad
- \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ b{\isacharbrackleft}a{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}}}
- \]
- \[
- \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}intro{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
- \qquad
- \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}elim{\isacharparenright}}]{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymunion}\ {\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ B}}{\isa{{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B} & \isa{{\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ A}}
- \]
- \caption{Primitive inferences of Pure}\label{fig:prim-rules}
- \end{center}
- \end{figure}
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{ll}
- \isa{{\isasymturnstile}\ {\isacharparenleft}{\isasymlambda}x{\isachardot}\ b{\isacharbrackleft}x{\isacharbrackright}{\isacharparenright}\ a\ {\isasymequiv}\ b{\isacharbrackleft}a{\isacharbrackright}} & \isa{{\isasymbeta}}-conversion \\
- \isa{{\isasymturnstile}\ x\ {\isasymequiv}\ x} & reflexivity \\
- \isa{{\isasymturnstile}\ x\ {\isasymequiv}\ y\ {\isasymLongrightarrow}\ P\ x\ {\isasymLongrightarrow}\ P\ y} & substitution \\
- \isa{{\isasymturnstile}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ f\ x\ {\isasymequiv}\ g\ x{\isacharparenright}\ {\isasymLongrightarrow}\ f\ {\isasymequiv}\ g} & extensionality \\
- \isa{{\isasymturnstile}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}B\ {\isasymLongrightarrow}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymequiv}\ B} & logical equivalence \\
- \end{tabular}
- \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
- \end{center}
- \end{figure}
-
- The introduction and elimination rules for \isa{{\isasymAnd}} and \isa{{\isasymLongrightarrow}} are analogous to formation of dependently typed \isa{{\isasymlambda}}-terms representing the underlying proof objects. Proof terms
- are irrelevant in the Pure logic, though; they cannot occur within
- propositions. The system provides a runtime option to record
- explicit proof terms for primitive inferences. Thus all three
- levels of \isa{{\isasymlambda}}-calculus become explicit: \isa{{\isasymRightarrow}} for
- terms, and \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} for proofs (cf.\
- \cite{Berghofer-Nipkow:2000:TPHOL}).
-
- Observe that locally fixed parameters (as in \isa{{\isasymAnd}{\isacharunderscore}intro}) need
- not be recorded in the hypotheses, because the simple syntactic
- types of Pure are always inhabitable. ``Assumptions'' \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymtau}} for type-membership are only present as long as some \isa{x\isactrlisub {\isasymtau}} occurs in the statement body.\footnote{This is the key
- difference to ``\isa{{\isasymlambda}HOL}'' in the PTS framework
- \cite{Barendregt-Geuvers:2001}, where hypotheses \isa{x\ {\isacharcolon}\ A} are
- treated uniformly for propositions and types.}
-
- \medskip The axiomatization of a theory is implicitly closed by
- forming all instances of type and term variables: \isa{{\isasymturnstile}\ A{\isasymvartheta}} holds for any substitution instance of an axiom
- \isa{{\isasymturnstile}\ A}. By pushing substitutions through derivations
- inductively, we also get admissible \isa{generalize} and \isa{instance} rules as shown in \figref{fig:subst-rules}.
-
- \begin{figure}[htb]
- \begin{center}
- \[
- \infer{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}{\isasymalpha}{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} & \isa{{\isasymalpha}\ {\isasymnotin}\ {\isasymGamma}}}
- \quad
- \infer[\quad\isa{{\isacharparenleft}generalize{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}x{\isacharbrackright}} & \isa{x\ {\isasymnotin}\ {\isasymGamma}}}
- \]
- \[
- \infer{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isasymtau}{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}{\isasymalpha}{\isacharbrackright}}}
- \quad
- \infer[\quad\isa{{\isacharparenleft}instantiate{\isacharparenright}}]{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}t{\isacharbrackright}}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}}
- \]
- \caption{Admissible substitution rules}\label{fig:subst-rules}
- \end{center}
- \end{figure}
-
- Note that \isa{instantiate} does not require an explicit
- side-condition, because \isa{{\isasymGamma}} may never contain schematic
- variables.
-
- In principle, variables could be substituted in hypotheses as well,
- but this would disrupt the monotonicity of reasoning: deriving
- \isa{{\isasymGamma}{\isasymvartheta}\ {\isasymturnstile}\ B{\isasymvartheta}} from \isa{{\isasymGamma}\ {\isasymturnstile}\ B} is
- correct, but \isa{{\isasymGamma}{\isasymvartheta}\ {\isasymsupseteq}\ {\isasymGamma}} does not necessarily hold:
- the result belongs to a different proof context.
-
- \medskip An \emph{oracle} is a function that produces axioms on the
- fly. Logically, this is an instance of the \isa{axiom} rule
- (\figref{fig:prim-rules}), but there is an operational difference.
- The system always records oracle invocations within derivations of
- theorems. Tracing plain axioms (and named theorems) is optional.
-
- Axiomatizations should be limited to the bare minimum, typically as
- part of the initial logical basis of an object-logic formalization.
- Later on, theories are usually developed in a strictly definitional
- fashion, by stating only certain equalities over new constants.
-
- A \emph{simple definition} consists of a constant declaration \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} together with an axiom \isa{{\isasymturnstile}\ c\ {\isasymequiv}\ t}, where \isa{t\ {\isacharcolon}{\isacharcolon}\ {\isasymsigma}} is a closed term without any hidden polymorphism. The RHS
- may depend on further defined constants, but not \isa{c} itself.
- Definitions of functions may be presented as \isa{c\ \isactrlvec x\ {\isasymequiv}\ t} instead of the puristic \isa{c\ {\isasymequiv}\ {\isasymlambda}\isactrlvec x{\isachardot}\ t}.
-
- An \emph{overloaded definition} consists of a collection of axioms
- for the same constant, with zero or one equations \isa{c{\isacharparenleft}{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharparenright}{\isasymkappa}{\isacharparenright}\ {\isasymequiv}\ t} for each type constructor \isa{{\isasymkappa}} (for
- distinct variables \isa{\isactrlvec {\isasymalpha}}). The RHS may mention
- previously defined constants as above, or arbitrary constants \isa{d{\isacharparenleft}{\isasymalpha}\isactrlisub i{\isacharparenright}} for some \isa{{\isasymalpha}\isactrlisub i} projected from \isa{\isactrlvec {\isasymalpha}}. Thus overloaded definitions essentially work by
- primitive recursion over the syntactic structure of a single type
- argument.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{ctyp}\verb|type ctyp| \\
- \indexmltype{cterm}\verb|type cterm| \\
- \indexml{Thm.ctyp\_of}\verb|Thm.ctyp_of: theory -> typ -> ctyp| \\
- \indexml{Thm.cterm\_of}\verb|Thm.cterm_of: theory -> term -> cterm| \\
- \end{mldecls}
- \begin{mldecls}
- \indexmltype{thm}\verb|type thm| \\
- \indexml{proofs}\verb|proofs: int ref| \\
- \indexml{Thm.assume}\verb|Thm.assume: cterm -> thm| \\
- \indexml{Thm.forall\_intr}\verb|Thm.forall_intr: cterm -> thm -> thm| \\
- \indexml{Thm.forall\_elim}\verb|Thm.forall_elim: cterm -> thm -> thm| \\
- \indexml{Thm.implies\_intr}\verb|Thm.implies_intr: cterm -> thm -> thm| \\
- \indexml{Thm.implies\_elim}\verb|Thm.implies_elim: thm -> thm -> thm| \\
- \indexml{Thm.generalize}\verb|Thm.generalize: string list * string list -> int -> thm -> thm| \\
- \indexml{Thm.instantiate}\verb|Thm.instantiate: (ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm| \\
- \indexml{Thm.axiom}\verb|Thm.axiom: theory -> string -> thm| \\
- \indexml{Thm.add\_oracle}\verb|Thm.add_oracle: bstring * ('a -> cterm) -> theory|\isasep\isanewline%
-\verb| -> (string * ('a -> thm)) * theory| \\
- \end{mldecls}
- \begin{mldecls}
- \indexml{Theory.add\_axioms\_i}\verb|Theory.add_axioms_i: (binding * term) list -> theory -> theory| \\
- \indexml{Theory.add\_deps}\verb|Theory.add_deps: string -> string * typ -> (string * typ) list -> theory -> theory| \\
- \indexml{Theory.add\_defs\_i}\verb|Theory.add_defs_i: bool -> bool -> (binding * term) list -> theory -> theory| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|ctyp| and \verb|cterm| represent certified types
- and terms, respectively. These are abstract datatypes that
- guarantee that its values have passed the full well-formedness (and
- well-typedness) checks, relative to the declarations of type
- constructors, constants etc. in the theory.
-
- \item \verb|ctyp_of|~\isa{thy\ {\isasymtau}} and \verb|cterm_of|~\isa{thy\ t} explicitly checks types and terms, respectively. This also
- involves some basic normalizations, such expansion of type and term
- abbreviations from the theory context.
-
- Re-certification is relatively slow and should be avoided in tight
- reasoning loops. There are separate operations to decompose
- certified entities (including actual theorems).
-
- \item \verb|thm| represents proven propositions. This is an
- abstract datatype that guarantees that its values have been
- constructed by basic principles of the \verb|Thm| module.
- Every \verb|thm| value contains a sliding back-reference to the
- enclosing theory, cf.\ \secref{sec:context-theory}.
-
- \item \verb|proofs| determines the detail of proof recording within
- \verb|thm| values: \verb|0| records only oracles, \verb|1| records
- oracles, axioms and named theorems, \verb|2| records full proof
- terms.
-
- \item \verb|Thm.assume|, \verb|Thm.forall_intr|, \verb|Thm.forall_elim|, \verb|Thm.implies_intr|, and \verb|Thm.implies_elim|
- correspond to the primitive inferences of \figref{fig:prim-rules}.
-
- \item \verb|Thm.generalize|~\isa{{\isacharparenleft}\isactrlvec {\isasymalpha}{\isacharcomma}\ \isactrlvec x{\isacharparenright}}
- corresponds to the \isa{generalize} rules of
- \figref{fig:subst-rules}. Here collections of type and term
- variables are generalized simultaneously, specified by the given
- basic names.
-
- \item \verb|Thm.instantiate|~\isa{{\isacharparenleft}\isactrlvec {\isasymalpha}\isactrlisub s{\isacharcomma}\ \isactrlvec x\isactrlisub {\isasymtau}{\isacharparenright}} corresponds to the \isa{instantiate} rules
- of \figref{fig:subst-rules}. Type variables are substituted before
- term variables. Note that the types in \isa{\isactrlvec x\isactrlisub {\isasymtau}}
- refer to the instantiated versions.
-
- \item \verb|Thm.axiom|~\isa{thy\ name} retrieves a named
- axiom, cf.\ \isa{axiom} in \figref{fig:prim-rules}.
-
- \item \verb|Thm.add_oracle|~\isa{{\isacharparenleft}name{\isacharcomma}\ oracle{\isacharparenright}} produces a named
- oracle rule, essentially generating arbitrary axioms on the fly,
- cf.\ \isa{axiom} in \figref{fig:prim-rules}.
-
- \item \verb|Theory.add_axioms_i|~\isa{{\isacharbrackleft}{\isacharparenleft}name{\isacharcomma}\ A{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} declares
- arbitrary propositions as axioms.
-
- \item \verb|Theory.add_deps|~\isa{name\ c\isactrlisub {\isasymtau}\ \isactrlvec d\isactrlisub {\isasymsigma}} declares dependencies of a named specification
- for constant \isa{c\isactrlisub {\isasymtau}}, relative to existing
- specifications for constants \isa{\isactrlvec d\isactrlisub {\isasymsigma}}.
-
- \item \verb|Theory.add_defs_i|~\isa{unchecked\ overloaded\ {\isacharbrackleft}{\isacharparenleft}name{\isacharcomma}\ c\ \isactrlvec x\ {\isasymequiv}\ t{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharbrackright}} states a definitional axiom for an existing
- constant \isa{c}. Dependencies are recorded (cf.\ \verb|Theory.add_deps|), unless the \isa{unchecked} option is set.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Auxiliary definitions%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Theory \isa{Pure} provides a few auxiliary definitions, see
- \figref{fig:pure-aux}. These special constants are normally not
- exposed to the user, but appear in internal encodings.
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{ll}
- \isa{conjunction\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop\ {\isasymRightarrow}\ prop} & (infix \isa{{\isacharampersand}}) \\
- \isa{{\isasymturnstile}\ A\ {\isacharampersand}\ B\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}C{\isachardot}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isacharparenright}} \\[1ex]
- \isa{prop\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop} & (prefix \isa{{\isacharhash}}, suppressed) \\
- \isa{{\isacharhash}A\ {\isasymequiv}\ A} \\[1ex]
- \isa{term\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ prop} & (prefix \isa{TERM}) \\
- \isa{term\ x\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}A{\isachardot}\ A\ {\isasymLongrightarrow}\ A{\isacharparenright}} \\[1ex]
- \isa{TYPE\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself} & (prefix \isa{TYPE}) \\
- \isa{{\isacharparenleft}unspecified{\isacharparenright}} \\
- \end{tabular}
- \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
- \end{center}
- \end{figure}
-
- Derived conjunction rules include introduction \isa{A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isacharampersand}\ B}, and destructions \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ A} and \isa{A\ {\isacharampersand}\ B\ {\isasymLongrightarrow}\ B}.
- Conjunction allows to treat simultaneous assumptions and conclusions
- uniformly. For example, multiple claims are intermediately
- represented as explicit conjunction, but this is refined into
- separate sub-goals before the user continues the proof; the final
- result is projected into a list of theorems (cf.\
- \secref{sec:tactical-goals}).
-
- The \isa{prop} marker (\isa{{\isacharhash}}) makes arbitrarily complex
- propositions appear as atomic, without changing the meaning: \isa{{\isasymGamma}\ {\isasymturnstile}\ A} and \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isacharhash}A} are interchangeable. See
- \secref{sec:tactical-goals} for specific operations.
-
- The \isa{term} marker turns any well-typed term into a derivable
- proposition: \isa{{\isasymturnstile}\ TERM\ t} holds unconditionally. Although
- this is logically vacuous, it allows to treat terms and proofs
- uniformly, similar to a type-theoretic framework.
-
- The \isa{TYPE} constructor is the canonical representative of
- the unspecified type \isa{{\isasymalpha}\ itself}; it essentially injects the
- language of types into that of terms. There is specific notation
- \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} for \isa{TYPE\isactrlbsub {\isasymtau}\ itself\isactrlesub }.
- Although being devoid of any particular meaning, the \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} accounts for the type \isa{{\isasymtau}} within the term
- language. In particular, \isa{TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}} may be used as formal
- argument in primitive definitions, in order to circumvent hidden
- polymorphism (cf.\ \secref{sec:terms}). For example, \isa{c\ TYPE{\isacharparenleft}{\isasymalpha}{\isacharparenright}\ {\isasymequiv}\ A{\isacharbrackleft}{\isasymalpha}{\isacharbrackright}} defines \isa{c\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ itself\ {\isasymRightarrow}\ prop} in terms of
- a proposition \isa{A} that depends on an additional type
- argument, which is essentially a predicate on types.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{Conjunction.intr}\verb|Conjunction.intr: thm -> thm -> thm| \\
- \indexml{Conjunction.elim}\verb|Conjunction.elim: thm -> thm * thm| \\
- \indexml{Drule.mk\_term}\verb|Drule.mk_term: cterm -> thm| \\
- \indexml{Drule.dest\_term}\verb|Drule.dest_term: thm -> cterm| \\
- \indexml{Logic.mk\_type}\verb|Logic.mk_type: typ -> term| \\
- \indexml{Logic.dest\_type}\verb|Logic.dest_type: term -> typ| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Conjunction.intr| derives \isa{A\ {\isacharampersand}\ B} from \isa{A} and \isa{B}.
-
- \item \verb|Conjunction.elim| derives \isa{A} and \isa{B}
- from \isa{A\ {\isacharampersand}\ B}.
-
- \item \verb|Drule.mk_term| derives \isa{TERM\ t}.
-
- \item \verb|Drule.dest_term| recovers term \isa{t} from \isa{TERM\ t}.
-
- \item \verb|Logic.mk_type|~\isa{{\isasymtau}} produces the term \isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}}.
-
- \item \verb|Logic.dest_type|~\isa{TYPE{\isacharparenleft}{\isasymtau}{\isacharparenright}} recovers the type
- \isa{{\isasymtau}}.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Object-level rules \label{sec:obj-rules}%
-}
-\isamarkuptrue%
-%
-\isadelimFIXME
-%
-\endisadelimFIXME
-%
-\isatagFIXME
-%
-\begin{isamarkuptext}%
-FIXME
-
- A \emph{rule} is any Pure theorem in HHF normal form; there is a
- separate calculus for rule composition, which is modeled after
- Gentzen's Natural Deduction \cite{Gentzen:1935}, but allows
- rules to be nested arbitrarily, similar to \cite{extensions91}.
-
- Normally, all theorems accessible to the user are proper rules.
- Low-level inferences are occasional required internally, but the
- result should be always presented in canonical form. The higher
- interfaces of Isabelle/Isar will always produce proper rules. It is
- important to maintain this invariant in add-on applications!
-
- There are two main principles of rule composition: \isa{resolution} (i.e.\ backchaining of rules) and \isa{by{\isacharminus}assumption} (i.e.\ closing a branch); both principles are
- combined in the variants of \isa{elim{\isacharminus}resolution} and \isa{dest{\isacharminus}resolution}. Raw \isa{composition} is occasionally
- useful as well, also it is strictly speaking outside of the proper
- rule calculus.
-
- Rules are treated modulo general higher-order unification, which is
- unification modulo the equational theory of \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-conversion
- on \isa{{\isasymlambda}}-terms. Moreover, propositions are understood modulo
- the (derived) equivalence \isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}{\isacharparenright}\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ A\ {\isasymLongrightarrow}\ B\ x{\isacharparenright}}.
-
- This means that any operations within the rule calculus may be
- subject to spontaneous \isa{{\isasymalpha}{\isasymbeta}{\isasymeta}}-HHF conversions. It is common
- practice not to contract or expand unnecessarily. Some mechanisms
- prefer an one form, others the opposite, so there is a potential
- danger to produce some oscillation!
-
- Only few operations really work \emph{modulo} HHF conversion, but
- expect a normal form: quantifiers \isa{{\isasymAnd}} before implications
- \isa{{\isasymLongrightarrow}} at each level of nesting.
-
-\glossary{Hereditary Harrop Formula}{The set of propositions in HHF
-format is defined inductively as \isa{H\ {\isacharequal}\ {\isacharparenleft}{\isasymAnd}x\isactrlsup {\isacharasterisk}{\isachardot}\ H\isactrlsup {\isacharasterisk}\ {\isasymLongrightarrow}\ A{\isacharparenright}}, for variables \isa{x} and atomic propositions \isa{A}.
-Any proposition may be put into HHF form by normalizing with the rule
-\isa{{\isacharparenleft}A\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}{\isacharparenright}\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ A\ {\isasymLongrightarrow}\ B\ x{\isacharparenright}}. In Isabelle, the outermost
-quantifier prefix is represented via \seeglossary{schematic
-variables}, such that the top-level structure is merely that of a
-\seeglossary{Horn Clause}}.
-
-\glossary{HHF}{See \seeglossary{Hereditary Harrop Formula}.}
-
-
- \[
- \infer[\isa{{\isacharparenleft}assumption{\isacharparenright}}]{\isa{C{\isasymvartheta}}}
- {\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ A\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C} & \isa{A{\isasymvartheta}\ {\isacharequal}\ H\isactrlsub i{\isasymvartheta}}~~\text{(for some~\isa{i})}}
- \]
-
-
- \[
- \infer[\isa{{\isacharparenleft}compose{\isacharparenright}}]{\isa{\isactrlvec A{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}}}
- {\isa{\isactrlvec A\ {\isasymLongrightarrow}\ B} & \isa{B{\isacharprime}\ {\isasymLongrightarrow}\ C} & \isa{B{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}}}
- \]
-
-
- \[
- \infer[\isa{{\isacharparenleft}{\isasymAnd}{\isacharunderscore}lift{\isacharparenright}}]{\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}}}{\isa{\isactrlvec A\ {\isacharquery}\isactrlvec a\ {\isasymLongrightarrow}\ B\ {\isacharquery}\isactrlvec a}}
- \]
- \[
- \infer[\isa{{\isacharparenleft}{\isasymLongrightarrow}{\isacharunderscore}lift{\isacharparenright}}]{\isa{{\isacharparenleft}\isactrlvec H\ {\isasymLongrightarrow}\ \isactrlvec A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}\isactrlvec H\ {\isasymLongrightarrow}\ B{\isacharparenright}}}{\isa{\isactrlvec A\ {\isasymLongrightarrow}\ B}}
- \]
-
- The \isa{resolve} scheme is now acquired from \isa{{\isasymAnd}{\isacharunderscore}lift},
- \isa{{\isasymLongrightarrow}{\isacharunderscore}lift}, and \isa{compose}.
-
- \[
- \infer[\isa{{\isacharparenleft}resolution{\isacharparenright}}]
- {\isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ \isactrlvec A\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}}}
- {\begin{tabular}{l}
- \isa{\isactrlvec A\ {\isacharquery}\isactrlvec a\ {\isasymLongrightarrow}\ B\ {\isacharquery}\isactrlvec a} \\
- \isa{{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C} \\
- \isa{{\isacharparenleft}{\isasymlambda}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}{\isacharquery}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}} \\
- \end{tabular}}
- \]
-
-
- FIXME \isa{elim{\isacharunderscore}resolution}, \isa{dest{\isacharunderscore}resolution}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagFIXME
-{\isafoldFIXME}%
-%
-\isadelimFIXME
-%
-\endisadelimFIXME
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/prelim.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,911 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{prelim}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ prelim\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{Preliminaries%
-}
-\isamarkuptrue%
-%
-\isamarkupsection{Contexts \label{sec:context}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-A logical context represents the background that is required for
- formulating statements and composing proofs. It acts as a medium to
- produce formal content, depending on earlier material (declarations,
- results etc.).
-
- For example, derivations within the Isabelle/Pure logic can be
- described as a judgment \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}}, which means that a
- proposition \isa{{\isasymphi}} is derivable from hypotheses \isa{{\isasymGamma}}
- within the theory \isa{{\isasymTheta}}. There are logical reasons for
- keeping \isa{{\isasymTheta}} and \isa{{\isasymGamma}} separate: theories can be
- liberal about supporting type constructors and schematic
- polymorphism of constants and axioms, while the inner calculus of
- \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}} is strictly limited to Simple Type Theory (with
- fixed type variables in the assumptions).
-
- \medskip Contexts and derivations are linked by the following key
- principles:
-
- \begin{itemize}
-
- \item Transfer: monotonicity of derivations admits results to be
- transferred into a \emph{larger} context, i.e.\ \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}} implies \isa{{\isasymGamma}{\isacharprime}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\isactrlsub {\isacharprime}\ {\isasymphi}} for contexts \isa{{\isasymTheta}{\isacharprime}\ {\isasymsupseteq}\ {\isasymTheta}} and \isa{{\isasymGamma}{\isacharprime}\ {\isasymsupseteq}\ {\isasymGamma}}.
-
- \item Export: discharge of hypotheses admits results to be exported
- into a \emph{smaller} context, i.e.\ \isa{{\isasymGamma}{\isacharprime}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymphi}}
- implies \isa{{\isasymGamma}\ {\isasymturnstile}\isactrlsub {\isasymTheta}\ {\isasymDelta}\ {\isasymLongrightarrow}\ {\isasymphi}} where \isa{{\isasymGamma}{\isacharprime}\ {\isasymsupseteq}\ {\isasymGamma}} and
- \isa{{\isasymDelta}\ {\isacharequal}\ {\isasymGamma}{\isacharprime}\ {\isacharminus}\ {\isasymGamma}}. Note that \isa{{\isasymTheta}} remains unchanged here,
- only the \isa{{\isasymGamma}} part is affected.
-
- \end{itemize}
-
- \medskip By modeling the main characteristics of the primitive
- \isa{{\isasymTheta}} and \isa{{\isasymGamma}} above, and abstracting over any
- particular logical content, we arrive at the fundamental notions of
- \emph{theory context} and \emph{proof context} in Isabelle/Isar.
- These implement a certain policy to manage arbitrary \emph{context
- data}. There is a strongly-typed mechanism to declare new kinds of
- data at compile time.
-
- The internal bootstrap process of Isabelle/Pure eventually reaches a
- stage where certain data slots provide the logical content of \isa{{\isasymTheta}} and \isa{{\isasymGamma}} sketched above, but this does not stop there!
- Various additional data slots support all kinds of mechanisms that
- are not necessarily part of the core logic.
-
- For example, there would be data for canonical introduction and
- elimination rules for arbitrary operators (depending on the
- object-logic and application), which enables users to perform
- standard proof steps implicitly (cf.\ the \isa{rule} method
- \cite{isabelle-isar-ref}).
-
- \medskip Thus Isabelle/Isar is able to bring forth more and more
- concepts successively. In particular, an object-logic like
- Isabelle/HOL continues the Isabelle/Pure setup by adding specific
- components for automated reasoning (classical reasoner, tableau
- prover, structured induction etc.) and derived specification
- mechanisms (inductive predicates, recursive functions etc.). All of
- this is ultimately based on the generic data management by theory
- and proof contexts introduced here.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Theory context \label{sec:context-theory}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\glossary{Theory}{FIXME}
-
- A \emph{theory} is a data container with explicit named and unique
- identifier. Theories are related by a (nominal) sub-theory
- relation, which corresponds to the dependency graph of the original
- construction; each theory is derived from a certain sub-graph of
- ancestor theories.
-
- The \isa{merge} operation produces the least upper bound of two
- theories, which actually degenerates into absorption of one theory
- into the other (due to the nominal sub-theory relation).
-
- The \isa{begin} operation starts a new theory by importing
- several parent theories and entering a special \isa{draft} mode,
- which is sustained until the final \isa{end} operation. A draft
- theory acts like a linear type, where updates invalidate earlier
- versions. An invalidated draft is called ``stale''.
-
- The \isa{checkpoint} operation produces an intermediate stepping
- stone that will survive the next update: both the original and the
- changed theory remain valid and are related by the sub-theory
- relation. Checkpointing essentially recovers purely functional
- theory values, at the expense of some extra internal bookkeeping.
-
- The \isa{copy} operation produces an auxiliary version that has
- the same data content, but is unrelated to the original: updates of
- the copy do not affect the original, neither does the sub-theory
- relation hold.
-
- \medskip The example in \figref{fig:ex-theory} below shows a theory
- graph derived from \isa{Pure}, with theory \isa{Length}
- importing \isa{Nat} and \isa{List}. The body of \isa{Length} consists of a sequence of updates, working mostly on
- drafts. Intermediate checkpoints may occur as well, due to the
- history mechanism provided by the Isar top-level, cf.\
- \secref{sec:isar-toplevel}.
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{rcccl}
- & & \isa{Pure} \\
- & & \isa{{\isasymdown}} \\
- & & \isa{FOL} \\
- & $\swarrow$ & & $\searrow$ & \\
- \isa{Nat} & & & & \isa{List} \\
- & $\searrow$ & & $\swarrow$ \\
- & & \isa{Length} \\
- & & \multicolumn{3}{l}{~~\hyperlink{keyword.imports}{\mbox{\isa{\isakeyword{imports}}}}} \\
- & & \multicolumn{3}{l}{~~\hyperlink{keyword.begin}{\mbox{\isa{\isakeyword{begin}}}}} \\
- & & $\vdots$~~ \\
- & & \isa{{\isasymbullet}}~~ \\
- & & $\vdots$~~ \\
- & & \isa{{\isasymbullet}}~~ \\
- & & $\vdots$~~ \\
- & & \multicolumn{3}{l}{~~\hyperlink{command.end}{\mbox{\isa{\isacommand{end}}}}} \\
- \end{tabular}
- \caption{A theory definition depending on ancestors}\label{fig:ex-theory}
- \end{center}
- \end{figure}
-
- \medskip There is a separate notion of \emph{theory reference} for
- maintaining a live link to an evolving theory context: updates on
- drafts are propagated automatically. Dynamic updating stops after
- an explicit \isa{end} only.
-
- Derived entities may store a theory reference in order to indicate
- the context they belong to. This implicitly assumes monotonic
- reasoning, because the referenced context may become larger without
- further notice.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{theory}\verb|type theory| \\
- \indexml{Theory.subthy}\verb|Theory.subthy: theory * theory -> bool| \\
- \indexml{Theory.merge}\verb|Theory.merge: theory * theory -> theory| \\
- \indexml{Theory.checkpoint}\verb|Theory.checkpoint: theory -> theory| \\
- \indexml{Theory.copy}\verb|Theory.copy: theory -> theory| \\
- \end{mldecls}
- \begin{mldecls}
- \indexmltype{theory\_ref}\verb|type theory_ref| \\
- \indexml{Theory.deref}\verb|Theory.deref: theory_ref -> theory| \\
- \indexml{Theory.check\_thy}\verb|Theory.check_thy: theory -> theory_ref| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|theory| represents theory contexts. This is
- essentially a linear type! Most operations destroy the original
- version, which then becomes ``stale''.
-
- \item \verb|Theory.subthy|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}}
- compares theories according to the inherent graph structure of the
- construction. This sub-theory relation is a nominal approximation
- of inclusion (\isa{{\isasymsubseteq}}) of the corresponding content.
-
- \item \verb|Theory.merge|~\isa{{\isacharparenleft}thy\isactrlsub {\isadigit{1}}{\isacharcomma}\ thy\isactrlsub {\isadigit{2}}{\isacharparenright}}
- absorbs one theory into the other. This fails for unrelated
- theories!
-
- \item \verb|Theory.checkpoint|~\isa{thy} produces a safe
- stepping stone in the linear development of \isa{thy}. The next
- update will result in two related, valid theories.
-
- \item \verb|Theory.copy|~\isa{thy} produces a variant of \isa{thy} that holds a copy of the same data. The result is not
- related to the original; the original is unchanched.
-
- \item \verb|theory_ref| represents a sliding reference to an
- always valid theory; updates on the original are propagated
- automatically.
-
- \item \verb|Theory.deref|~\isa{thy{\isacharunderscore}ref} turns a \verb|theory_ref| into an \verb|theory| value. As the referenced
- theory evolves monotonically over time, later invocations of \verb|Theory.deref| may refer to a larger context.
-
- \item \verb|Theory.check_thy|~\isa{thy} produces a \verb|theory_ref| from a valid \verb|theory| value.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Proof context \label{sec:context-proof}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\glossary{Proof context}{The static context of a structured proof,
- acts like a local ``theory'' of the current portion of Isar proof
- text, generalizes the idea of local hypotheses \isa{{\isasymGamma}} in
- judgments \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}} of natural deduction calculi. There is a
- generic notion of introducing and discharging hypotheses.
- Arbritrary auxiliary context data may be adjoined.}
-
- A proof context is a container for pure data with a back-reference
- to the theory it belongs to. The \isa{init} operation creates a
- proof context from a given theory. Modifications to draft theories
- are propagated to the proof context as usual, but there is also an
- explicit \isa{transfer} operation to force resynchronization
- with more substantial updates to the underlying theory. The actual
- context data does not require any special bookkeeping, thanks to the
- lack of destructive features.
-
- Entities derived in a proof context need to record inherent logical
- requirements explicitly, since there is no separate context
- identification as for theories. For example, hypotheses used in
- primitive derivations (cf.\ \secref{sec:thms}) are recorded
- separately within the sequent \isa{{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}}, just to make double
- sure. Results could still leak into an alien proof context do to
- programming errors, but Isabelle/Isar includes some extra validity
- checks in critical positions, notably at the end of a sub-proof.
-
- Proof contexts may be manipulated arbitrarily, although the common
- discipline is to follow block structure as a mental model: a given
- context is extended consecutively, and results are exported back
- into the original context. Note that the Isar proof states model
- block-structured reasoning explicitly, using a stack of proof
- contexts internally, cf.\ \secref{sec:isar-proof-state}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{Proof.context}\verb|type Proof.context| \\
- \indexml{ProofContext.init}\verb|ProofContext.init: theory -> Proof.context| \\
- \indexml{ProofContext.theory\_of}\verb|ProofContext.theory_of: Proof.context -> theory| \\
- \indexml{ProofContext.transfer}\verb|ProofContext.transfer: theory -> Proof.context -> Proof.context| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Proof.context| represents proof contexts. Elements
- of this type are essentially pure values, with a sliding reference
- to the background theory.
-
- \item \verb|ProofContext.init|~\isa{thy} produces a proof context
- derived from \isa{thy}, initializing all data.
-
- \item \verb|ProofContext.theory_of|~\isa{ctxt} selects the
- background theory from \isa{ctxt}, dereferencing its internal
- \verb|theory_ref|.
-
- \item \verb|ProofContext.transfer|~\isa{thy\ ctxt} promotes the
- background theory of \isa{ctxt} to the super theory \isa{thy}.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Generic contexts \label{sec:generic-context}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-A generic context is the disjoint sum of either a theory or proof
- context. Occasionally, this enables uniform treatment of generic
- context data, typically extra-logical information. Operations on
- generic contexts include the usual injections, partial selections,
- and combinators for lifting operations on either component of the
- disjoint sum.
-
- Moreover, there are total operations \isa{theory{\isacharunderscore}of} and \isa{proof{\isacharunderscore}of} to convert a generic context into either kind: a theory
- can always be selected from the sum, while a proof context might
- have to be constructed by an ad-hoc \isa{init} operation.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{Context.generic}\verb|type Context.generic| \\
- \indexml{Context.theory\_of}\verb|Context.theory_of: Context.generic -> theory| \\
- \indexml{Context.proof\_of}\verb|Context.proof_of: Context.generic -> Proof.context| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Context.generic| is the direct sum of \verb|theory| and \verb|Proof.context|, with the datatype
- constructors \verb|Context.Theory| and \verb|Context.Proof|.
-
- \item \verb|Context.theory_of|~\isa{context} always produces a
- theory from the generic \isa{context}, using \verb|ProofContext.theory_of| as required.
-
- \item \verb|Context.proof_of|~\isa{context} always produces a
- proof context from the generic \isa{context}, using \verb|ProofContext.init| as required (note that this re-initializes the
- context data with each invocation).
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Context data \label{sec:context-data}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The main purpose of theory and proof contexts is to manage arbitrary
- data. New data types can be declared incrementally at compile time.
- There are separate declaration mechanisms for any of the three kinds
- of contexts: theory, proof, generic.
-
- \paragraph{Theory data} may refer to destructive entities, which are
- maintained in direct correspondence to the linear evolution of
- theory values, including explicit copies.\footnote{Most existing
- instances of destructive theory data are merely historical relics
- (e.g.\ the destructive theorem storage, and destructive hints for
- the Simplifier and Classical rules).} A theory data declaration
- needs to implement the following SML signature:
-
- \medskip
- \begin{tabular}{ll}
- \isa{{\isasymtype}\ T} & representing type \\
- \isa{{\isasymval}\ empty{\isacharcolon}\ T} & empty default value \\
- \isa{{\isasymval}\ copy{\isacharcolon}\ T\ {\isasymrightarrow}\ T} & refresh impure data \\
- \isa{{\isasymval}\ extend{\isacharcolon}\ T\ {\isasymrightarrow}\ T} & re-initialize on import \\
- \isa{{\isasymval}\ merge{\isacharcolon}\ T\ {\isasymtimes}\ T\ {\isasymrightarrow}\ T} & join on import \\
- \end{tabular}
- \medskip
-
- \noindent The \isa{empty} value acts as initial default for
- \emph{any} theory that does not declare actual data content; \isa{copy} maintains persistent integrity for impure data, it is just
- the identity for pure values; \isa{extend} is acts like a
- unitary version of \isa{merge}, both operations should also
- include the functionality of \isa{copy} for impure data.
-
- \paragraph{Proof context data} is purely functional. A declaration
- needs to implement the following SML signature:
-
- \medskip
- \begin{tabular}{ll}
- \isa{{\isasymtype}\ T} & representing type \\
- \isa{{\isasymval}\ init{\isacharcolon}\ theory\ {\isasymrightarrow}\ T} & produce initial value \\
- \end{tabular}
- \medskip
-
- \noindent The \isa{init} operation is supposed to produce a pure
- value from the given background theory.
-
- \paragraph{Generic data} provides a hybrid interface for both theory
- and proof data. The declaration is essentially the same as for
- (pure) theory data, without \isa{copy}. The \isa{init}
- operation for proof contexts merely selects the current data value
- from the background theory.
-
- \bigskip A data declaration of type \isa{T} results in the
- following interface:
-
- \medskip
- \begin{tabular}{ll}
- \isa{init{\isacharcolon}\ theory\ {\isasymrightarrow}\ theory} \\
- \isa{get{\isacharcolon}\ context\ {\isasymrightarrow}\ T} \\
- \isa{put{\isacharcolon}\ T\ {\isasymrightarrow}\ context\ {\isasymrightarrow}\ context} \\
- \isa{map{\isacharcolon}\ {\isacharparenleft}T\ {\isasymrightarrow}\ T{\isacharparenright}\ {\isasymrightarrow}\ context\ {\isasymrightarrow}\ context} \\
- \end{tabular}
- \medskip
-
- \noindent Here \isa{init} is only applicable to impure theory
- data to install a fresh copy persistently (destructive update on
- uninitialized has no permanent effect). The other operations provide
- access for the particular kind of context (theory, proof, or generic
- context). Note that this is a safe interface: there is no other way
- to access the corresponding data slot of a context. By keeping
- these operations private, a component may maintain abstract values
- authentically, without other components interfering.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmlfunctor{TheoryDataFun}\verb|functor TheoryDataFun| \\
- \indexmlfunctor{ProofDataFun}\verb|functor ProofDataFun| \\
- \indexmlfunctor{GenericDataFun}\verb|functor GenericDataFun| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|TheoryDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} declares data for
- type \verb|theory| according to the specification provided as
- argument structure. The resulting structure provides data init and
- access operations as described above.
-
- \item \verb|ProofDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} is analogous to
- \verb|TheoryDataFun| for type \verb|Proof.context|.
-
- \item \verb|GenericDataFun|\isa{{\isacharparenleft}spec{\isacharparenright}} is analogous to
- \verb|TheoryDataFun| for type \verb|Context.generic|.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Names \label{sec:names}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-In principle, a name is just a string, but there are various
- convention for encoding additional structure. For example, ``\isa{Foo{\isachardot}bar{\isachardot}baz}'' is considered as a qualified name consisting of
- three basic name components. The individual constituents of a name
- may have further substructure, e.g.\ the string
- ``\verb,\,\verb,<alpha>,'' encodes as a single symbol.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Strings of symbols%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\glossary{Symbol}{The smallest unit of text in Isabelle, subsumes
- plain ASCII characters as well as an infinite collection of named
- symbols (for greek, math etc.).}
-
- A \emph{symbol} constitutes the smallest textual unit in Isabelle
- --- raw characters are normally not encountered at all. Isabelle
- strings consist of a sequence of symbols, represented as a packed
- string or a list of strings. Each symbol is in itself a small
- string, which has either one of the following forms:
-
- \begin{enumerate}
-
- \item a single ASCII character ``\isa{c}'', for example
- ``\verb,a,'',
-
- \item a regular symbol ``\verb,\,\verb,<,\isa{ident}\verb,>,'',
- for example ``\verb,\,\verb,<alpha>,'',
-
- \item a control symbol ``\verb,\,\verb,<^,\isa{ident}\verb,>,'',
- for example ``\verb,\,\verb,<^bold>,'',
-
- \item a raw symbol ``\verb,\,\verb,<^raw:,\isa{text}\verb,>,''
- where \isa{text} constists of printable characters excluding
- ``\verb,.,'' and ``\verb,>,'', for example
- ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
-
- \item a numbered raw control symbol ``\verb,\,\verb,<^raw,\isa{n}\verb,>, where \isa{n} consists of digits, for example
- ``\verb,\,\verb,<^raw42>,''.
-
- \end{enumerate}
-
- \noindent The \isa{ident} syntax for symbol names is \isa{letter\ {\isacharparenleft}letter\ {\isacharbar}\ digit{\isacharparenright}\isactrlsup {\isacharasterisk}}, where \isa{letter\ {\isacharequal}\ A{\isachardot}{\isachardot}Za{\isachardot}{\isachardot}z} and \isa{digit\ {\isacharequal}\ {\isadigit{0}}{\isachardot}{\isachardot}{\isadigit{9}}}. There are infinitely many
- regular symbols and control symbols, but a fixed collection of
- standard symbols is treated specifically. For example,
- ``\verb,\,\verb,<alpha>,'' is classified as a letter, which means it
- may occur within regular Isabelle identifiers.
-
- Since the character set underlying Isabelle symbols is 7-bit ASCII
- and 8-bit characters are passed through transparently, Isabelle may
- also process Unicode/UCS data in UTF-8 encoding. Unicode provides
- its own collection of mathematical symbols, but there is no built-in
- link to the standard collection of Isabelle.
-
- \medskip Output of Isabelle symbols depends on the print mode
- (\secref{FIXME}). For example, the standard {\LaTeX} setup of the
- Isabelle document preparation system would present
- ``\verb,\,\verb,<alpha>,'' as \isa{{\isasymalpha}}, and
- ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as \isa{\isactrlbold {\isasymalpha}}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{Symbol.symbol}\verb|type Symbol.symbol| \\
- \indexml{Symbol.explode}\verb|Symbol.explode: string -> Symbol.symbol list| \\
- \indexml{Symbol.is\_letter}\verb|Symbol.is_letter: Symbol.symbol -> bool| \\
- \indexml{Symbol.is\_digit}\verb|Symbol.is_digit: Symbol.symbol -> bool| \\
- \indexml{Symbol.is\_quasi}\verb|Symbol.is_quasi: Symbol.symbol -> bool| \\
- \indexml{Symbol.is\_blank}\verb|Symbol.is_blank: Symbol.symbol -> bool| \\
- \end{mldecls}
- \begin{mldecls}
- \indexmltype{Symbol.sym}\verb|type Symbol.sym| \\
- \indexml{Symbol.decode}\verb|Symbol.decode: Symbol.symbol -> Symbol.sym| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Symbol.symbol| represents individual Isabelle
- symbols; this is an alias for \verb|string|.
-
- \item \verb|Symbol.explode|~\isa{str} produces a symbol list
- from the packed form. This function supercedes \verb|String.explode| for virtually all purposes of manipulating text in
- Isabelle!
-
- \item \verb|Symbol.is_letter|, \verb|Symbol.is_digit|, \verb|Symbol.is_quasi|, \verb|Symbol.is_blank| classify standard
- symbols according to fixed syntactic conventions of Isabelle, cf.\
- \cite{isabelle-isar-ref}.
-
- \item \verb|Symbol.sym| is a concrete datatype that represents
- the different kinds of symbols explicitly, with constructors \verb|Symbol.Char|, \verb|Symbol.Sym|, \verb|Symbol.Ctrl|, \verb|Symbol.Raw|.
-
- \item \verb|Symbol.decode| converts the string representation of a
- symbol into the datatype version.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Basic names \label{sec:basic-names}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-A \emph{basic name} essentially consists of a single Isabelle
- identifier. There are conventions to mark separate classes of basic
- names, by attaching a suffix of underscores (\isa{{\isacharunderscore}}): one
- underscore means \emph{internal name}, two underscores means
- \emph{Skolem name}, three underscores means \emph{internal Skolem
- name}.
-
- For example, the basic name \isa{foo} has the internal version
- \isa{foo{\isacharunderscore}}, with Skolem versions \isa{foo{\isacharunderscore}{\isacharunderscore}} and \isa{foo{\isacharunderscore}{\isacharunderscore}{\isacharunderscore}}, respectively.
-
- These special versions provide copies of the basic name space, apart
- from anything that normally appears in the user text. For example,
- system generated variables in Isar proof contexts are usually marked
- as internal, which prevents mysterious name references like \isa{xaa} to appear in the text.
-
- \medskip Manipulating binding scopes often requires on-the-fly
- renamings. A \emph{name context} contains a collection of already
- used names. The \isa{declare} operation adds names to the
- context.
-
- The \isa{invents} operation derives a number of fresh names from
- a given starting point. For example, the first three names derived
- from \isa{a} are \isa{a}, \isa{b}, \isa{c}.
-
- The \isa{variants} operation produces fresh names by
- incrementing tentative names as base-26 numbers (with digits \isa{a{\isachardot}{\isachardot}z}) until all clashes are resolved. For example, name \isa{foo} results in variants \isa{fooa}, \isa{foob}, \isa{fooc}, \dots, \isa{fooaa}, \isa{fooab} etc.; each renaming
- step picks the next unused variant from this sequence.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{Name.internal}\verb|Name.internal: string -> string| \\
- \indexml{Name.skolem}\verb|Name.skolem: string -> string| \\
- \end{mldecls}
- \begin{mldecls}
- \indexmltype{Name.context}\verb|type Name.context| \\
- \indexml{Name.context}\verb|Name.context: Name.context| \\
- \indexml{Name.declare}\verb|Name.declare: string -> Name.context -> Name.context| \\
- \indexml{Name.invents}\verb|Name.invents: Name.context -> string -> int -> string list| \\
- \indexml{Name.variants}\verb|Name.variants: string list -> Name.context -> string list * Name.context| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Name.internal|~\isa{name} produces an internal name
- by adding one underscore.
-
- \item \verb|Name.skolem|~\isa{name} produces a Skolem name by
- adding two underscores.
-
- \item \verb|Name.context| represents the context of already used
- names; the initial value is \verb|Name.context|.
-
- \item \verb|Name.declare|~\isa{name} enters a used name into the
- context.
-
- \item \verb|Name.invents|~\isa{context\ name\ n} produces \isa{n} fresh names derived from \isa{name}.
-
- \item \verb|Name.variants|~\isa{names\ context} produces fresh
- varians of \isa{names}; the result is entered into the context.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Indexed names%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-An \emph{indexed name} (or \isa{indexname}) is a pair of a basic
- name and a natural number. This representation allows efficient
- renaming by incrementing the second component only. The canonical
- way to rename two collections of indexnames apart from each other is
- this: determine the maximum index \isa{maxidx} of the first
- collection, then increment all indexes of the second collection by
- \isa{maxidx\ {\isacharplus}\ {\isadigit{1}}}; the maximum index of an empty collection is
- \isa{{\isacharminus}{\isadigit{1}}}.
-
- Occasionally, basic names and indexed names are injected into the
- same pair type: the (improper) indexname \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}} is used
- to encode basic names.
-
- \medskip Isabelle syntax observes the following rules for
- representing an indexname \isa{{\isacharparenleft}x{\isacharcomma}\ i{\isacharparenright}} as a packed string:
-
- \begin{itemize}
-
- \item \isa{{\isacharquery}x} if \isa{x} does not end with a digit and \isa{i\ {\isacharequal}\ {\isadigit{0}}},
-
- \item \isa{{\isacharquery}xi} if \isa{x} does not end with a digit,
-
- \item \isa{{\isacharquery}x{\isachardot}i} otherwise.
-
- \end{itemize}
-
- Indexnames may acquire large index numbers over time. Results are
- normalized towards \isa{{\isadigit{0}}} at certain checkpoints, notably at
- the end of a proof. This works by producing variants of the
- corresponding basic name components. For example, the collection
- \isa{{\isacharquery}x{\isadigit{1}}{\isacharcomma}\ {\isacharquery}x{\isadigit{7}}{\isacharcomma}\ {\isacharquery}x{\isadigit{4}}{\isadigit{2}}} becomes \isa{{\isacharquery}x{\isacharcomma}\ {\isacharquery}xa{\isacharcomma}\ {\isacharquery}xb}.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{indexname}\verb|type indexname| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|indexname| represents indexed names. This is an
- abbreviation for \verb|string * int|. The second component is
- usually non-negative, except for situations where \isa{{\isacharparenleft}x{\isacharcomma}\ {\isacharminus}{\isadigit{1}}{\isacharparenright}}
- is used to embed basic names into this type.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Qualified names and name spaces%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-A \emph{qualified name} consists of a non-empty sequence of basic
- name components. The packed representation uses a dot as separator,
- as in ``\isa{A{\isachardot}b{\isachardot}c}''. The last component is called \emph{base}
- name, the remaining prefix \emph{qualifier} (which may be empty).
- The idea of qualified names is to encode nested structures by
- recording the access paths as qualifiers. For example, an item
- named ``\isa{A{\isachardot}b{\isachardot}c}'' may be understood as a local entity \isa{c}, within a local structure \isa{b}, within a global
- structure \isa{A}. Typically, name space hierarchies consist of
- 1--2 levels of qualification, but this need not be always so.
-
- The empty name is commonly used as an indication of unnamed
- entities, whenever this makes any sense. The basic operations on
- qualified names are smart enough to pass through such improper names
- unchanged.
-
- \medskip A \isa{naming} policy tells how to turn a name
- specification into a fully qualified internal name (by the \isa{full} operation), and how fully qualified names may be accessed
- externally. For example, the default naming policy is to prefix an
- implicit path: \isa{full\ x} produces \isa{path{\isachardot}x}, and the
- standard accesses for \isa{path{\isachardot}x} include both \isa{x} and
- \isa{path{\isachardot}x}. Normally, the naming is implicit in the theory or
- proof context; there are separate versions of the corresponding.
-
- \medskip A \isa{name\ space} manages a collection of fully
- internalized names, together with a mapping between external names
- and internal names (in both directions). The corresponding \isa{intern} and \isa{extern} operations are mostly used for
- parsing and printing only! The \isa{declare} operation augments
- a name space according to the accesses determined by the naming
- policy.
-
- \medskip As a general principle, there is a separate name space for
- each kind of formal entity, e.g.\ logical constant, type
- constructor, type class, theorem. It is usually clear from the
- occurrence in concrete syntax (or from the scope) which kind of
- entity a name refers to. For example, the very same name \isa{c} may be used uniformly for a constant, type constructor, and
- type class.
-
- There are common schemes to name theorems systematically, according
- to the name of the main logical entity involved, e.g.\ \isa{c{\isachardot}intro} for a canonical theorem related to constant \isa{c}.
- This technique of mapping names from one space into another requires
- some care in order to avoid conflicts. In particular, theorem names
- derived from a type constructor or type class are better suffixed in
- addition to the usual qualification, e.g.\ \isa{c{\isacharunderscore}type{\isachardot}intro}
- and \isa{c{\isacharunderscore}class{\isachardot}intro} for theorems related to type \isa{c}
- and class \isa{c}, respectively.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{NameSpace.base}\verb|NameSpace.base: string -> string| \\
- \indexml{NameSpace.qualifier}\verb|NameSpace.qualifier: string -> string| \\
- \indexml{NameSpace.append}\verb|NameSpace.append: string -> string -> string| \\
- \indexml{NameSpace.implode}\verb|NameSpace.implode: string list -> string| \\
- \indexml{NameSpace.explode}\verb|NameSpace.explode: string -> string list| \\
- \end{mldecls}
- \begin{mldecls}
- \indexmltype{NameSpace.naming}\verb|type NameSpace.naming| \\
- \indexml{NameSpace.default\_naming}\verb|NameSpace.default_naming: NameSpace.naming| \\
- \indexml{NameSpace.add\_path}\verb|NameSpace.add_path: string -> NameSpace.naming -> NameSpace.naming| \\
- \indexml{NameSpace.full\_name}\verb|NameSpace.full_name: NameSpace.naming -> binding -> string| \\
- \end{mldecls}
- \begin{mldecls}
- \indexmltype{NameSpace.T}\verb|type NameSpace.T| \\
- \indexml{NameSpace.empty}\verb|NameSpace.empty: NameSpace.T| \\
- \indexml{NameSpace.merge}\verb|NameSpace.merge: NameSpace.T * NameSpace.T -> NameSpace.T| \\
- \indexml{NameSpace.declare}\verb|NameSpace.declare: NameSpace.naming -> binding -> NameSpace.T -> string * NameSpace.T| \\
- \indexml{NameSpace.intern}\verb|NameSpace.intern: NameSpace.T -> string -> string| \\
- \indexml{NameSpace.extern}\verb|NameSpace.extern: NameSpace.T -> string -> string| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|NameSpace.base|~\isa{name} returns the base name of a
- qualified name.
-
- \item \verb|NameSpace.qualifier|~\isa{name} returns the qualifier
- of a qualified name.
-
- \item \verb|NameSpace.append|~\isa{name\isactrlisub {\isadigit{1}}\ name\isactrlisub {\isadigit{2}}}
- appends two qualified names.
-
- \item \verb|NameSpace.implode|~\isa{name} and \verb|NameSpace.explode|~\isa{names} convert between the packed string
- representation and the explicit list form of qualified names.
-
- \item \verb|NameSpace.naming| represents the abstract concept of
- a naming policy.
-
- \item \verb|NameSpace.default_naming| is the default naming policy.
- In a theory context, this is usually augmented by a path prefix
- consisting of the theory name.
-
- \item \verb|NameSpace.add_path|~\isa{path\ naming} augments the
- naming policy by extending its path component.
-
- \item \verb|NameSpace.full_name|\isa{naming\ binding} turns a name
- binding (usually a basic name) into the fully qualified
- internal name, according to the given naming policy.
-
- \item \verb|NameSpace.T| represents name spaces.
-
- \item \verb|NameSpace.empty| and \verb|NameSpace.merge|~\isa{{\isacharparenleft}space\isactrlisub {\isadigit{1}}{\isacharcomma}\ space\isactrlisub {\isadigit{2}}{\isacharparenright}} are the canonical operations for
- maintaining name spaces according to theory data management
- (\secref{sec:context-data}).
-
- \item \verb|NameSpace.declare|~\isa{naming\ bindings\ space} enters a
- name binding as fully qualified internal name into the name space,
- with external accesses determined by the naming policy.
-
- \item \verb|NameSpace.intern|~\isa{space\ name} internalizes a
- (partially qualified) external name.
-
- This operation is mostly for parsing! Note that fully qualified
- names stemming from declarations are produced via \verb|NameSpace.full_name| and \verb|NameSpace.declare|
- (or their derivatives for \verb|theory| and
- \verb|Proof.context|).
-
- \item \verb|NameSpace.extern|~\isa{space\ name} externalizes a
- (fully qualified) internal name.
-
- This operation is mostly for printing! Note unqualified names are
- produced via \verb|NameSpace.base|.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/proof.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,396 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{proof}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ {\isachardoublequoteopen}proof{\isachardoublequoteclose}\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{Structured proofs%
-}
-\isamarkuptrue%
-%
-\isamarkupsection{Variables \label{sec:variables}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Any variable that is not explicitly bound by \isa{{\isasymlambda}}-abstraction
- is considered as ``free''. Logically, free variables act like
- outermost universal quantification at the sequent level: \isa{A\isactrlisub {\isadigit{1}}{\isacharparenleft}x{\isacharparenright}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n{\isacharparenleft}x{\isacharparenright}\ {\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}} means that the result
- holds \emph{for all} values of \isa{x}. Free variables for
- terms (not types) can be fully internalized into the logic: \isa{{\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}} and \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} are interchangeable, provided
- that \isa{x} does not occur elsewhere in the context.
- Inspecting \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} more closely, we see that inside the
- quantifier, \isa{x} is essentially ``arbitrary, but fixed'',
- while from outside it appears as a place-holder for instantiation
- (thanks to \isa{{\isasymAnd}} elimination).
-
- The Pure logic represents the idea of variables being either inside
- or outside the current scope by providing separate syntactic
- categories for \emph{fixed variables} (e.g.\ \isa{x}) vs.\
- \emph{schematic variables} (e.g.\ \isa{{\isacharquery}x}). Incidently, a
- universal result \isa{{\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}} has the HHF normal form \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}x{\isacharparenright}}, which represents its generality nicely without requiring
- an explicit quantifier. The same principle works for type
- variables: \isa{{\isasymturnstile}\ B{\isacharparenleft}{\isacharquery}{\isasymalpha}{\isacharparenright}} represents the idea of ``\isa{{\isasymturnstile}\ {\isasymforall}{\isasymalpha}{\isachardot}\ B{\isacharparenleft}{\isasymalpha}{\isacharparenright}}'' without demanding a truly polymorphic framework.
-
- \medskip Additional care is required to treat type variables in a
- way that facilitates type-inference. In principle, term variables
- depend on type variables, which means that type variables would have
- to be declared first. For example, a raw type-theoretic framework
- would demand the context to be constructed in stages as follows:
- \isa{{\isasymGamma}\ {\isacharequal}\ {\isasymalpha}{\isacharcolon}\ type{\isacharcomma}\ x{\isacharcolon}\ {\isasymalpha}{\isacharcomma}\ a{\isacharcolon}\ A{\isacharparenleft}x\isactrlisub {\isasymalpha}{\isacharparenright}}.
-
- We allow a slightly less formalistic mode of operation: term
- variables \isa{x} are fixed without specifying a type yet
- (essentially \emph{all} potential occurrences of some instance
- \isa{x\isactrlisub {\isasymtau}} are fixed); the first occurrence of \isa{x}
- within a specific term assigns its most general type, which is then
- maintained consistently in the context. The above example becomes
- \isa{{\isasymGamma}\ {\isacharequal}\ x{\isacharcolon}\ term{\isacharcomma}\ {\isasymalpha}{\isacharcolon}\ type{\isacharcomma}\ A{\isacharparenleft}x\isactrlisub {\isasymalpha}{\isacharparenright}}, where type \isa{{\isasymalpha}} is fixed \emph{after} term \isa{x}, and the constraint
- \isa{x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}} is an implicit consequence of the occurrence of
- \isa{x\isactrlisub {\isasymalpha}} in the subsequent proposition.
-
- This twist of dependencies is also accommodated by the reverse
- operation of exporting results from a context: a type variable
- \isa{{\isasymalpha}} is considered fixed as long as it occurs in some fixed
- term variable of the context. For example, exporting \isa{x{\isacharcolon}\ term{\isacharcomma}\ {\isasymalpha}{\isacharcolon}\ type\ {\isasymturnstile}\ x\isactrlisub {\isasymalpha}\ {\isacharequal}\ x\isactrlisub {\isasymalpha}} produces in the first step
- \isa{x{\isacharcolon}\ term\ {\isasymturnstile}\ x\isactrlisub {\isasymalpha}\ {\isacharequal}\ x\isactrlisub {\isasymalpha}} for fixed \isa{{\isasymalpha}},
- and only in the second step \isa{{\isasymturnstile}\ {\isacharquery}x\isactrlisub {\isacharquery}\isactrlisub {\isasymalpha}\ {\isacharequal}\ {\isacharquery}x\isactrlisub {\isacharquery}\isactrlisub {\isasymalpha}} for schematic \isa{{\isacharquery}x} and \isa{{\isacharquery}{\isasymalpha}}.
-
- \medskip The Isabelle/Isar proof context manages the gory details of
- term vs.\ type variables, with high-level principles for moving the
- frontier between fixed and schematic variables.
-
- The \isa{add{\isacharunderscore}fixes} operation explictly declares fixed
- variables; the \isa{declare{\isacharunderscore}term} operation absorbs a term into
- a context by fixing new type variables and adding syntactic
- constraints.
-
- The \isa{export} operation is able to perform the main work of
- generalizing term and type variables as sketched above, assuming
- that fixing variables and terms have been declared properly.
-
- There \isa{import} operation makes a generalized fact a genuine
- part of the context, by inventing fixed variables for the schematic
- ones. The effect can be reversed by using \isa{export} later,
- potentially with an extended context; the result is equivalent to
- the original modulo renaming of schematic variables.
-
- The \isa{focus} operation provides a variant of \isa{import}
- for nested propositions (with explicit quantification): \isa{{\isasymAnd}x\isactrlisub {\isadigit{1}}\ {\isasymdots}\ x\isactrlisub n{\isachardot}\ B{\isacharparenleft}x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n{\isacharparenright}} is
- decomposed by inventing fixed variables \isa{x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n} for the body.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{Variable.add\_fixes}\verb|Variable.add_fixes: |\isasep\isanewline%
-\verb| string list -> Proof.context -> string list * Proof.context| \\
- \indexml{Variable.variant\_fixes}\verb|Variable.variant_fixes: |\isasep\isanewline%
-\verb| string list -> Proof.context -> string list * Proof.context| \\
- \indexml{Variable.declare\_term}\verb|Variable.declare_term: term -> Proof.context -> Proof.context| \\
- \indexml{Variable.declare\_constraints}\verb|Variable.declare_constraints: term -> Proof.context -> Proof.context| \\
- \indexml{Variable.export}\verb|Variable.export: Proof.context -> Proof.context -> thm list -> thm list| \\
- \indexml{Variable.polymorphic}\verb|Variable.polymorphic: Proof.context -> term list -> term list| \\
- \indexml{Variable.import\_thms}\verb|Variable.import_thms: bool -> thm list -> Proof.context ->|\isasep\isanewline%
-\verb| ((ctyp list * cterm list) * thm list) * Proof.context| \\
- \indexml{Variable.focus}\verb|Variable.focus: cterm -> Proof.context -> (cterm list * cterm) * Proof.context| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Variable.add_fixes|~\isa{xs\ ctxt} fixes term
- variables \isa{xs}, returning the resulting internal names. By
- default, the internal representation coincides with the external
- one, which also means that the given variables must not be fixed
- already. There is a different policy within a local proof body: the
- given names are just hints for newly invented Skolem variables.
-
- \item \verb|Variable.variant_fixes| is similar to \verb|Variable.add_fixes|, but always produces fresh variants of the given
- names.
-
- \item \verb|Variable.declare_term|~\isa{t\ ctxt} declares term
- \isa{t} to belong to the context. This automatically fixes new
- type variables, but not term variables. Syntactic constraints for
- type and term variables are declared uniformly, though.
-
- \item \verb|Variable.declare_constraints|~\isa{t\ ctxt} declares
- syntactic constraints from term \isa{t}, without making it part
- of the context yet.
-
- \item \verb|Variable.export|~\isa{inner\ outer\ thms} generalizes
- fixed type and term variables in \isa{thms} according to the
- difference of the \isa{inner} and \isa{outer} context,
- following the principles sketched above.
-
- \item \verb|Variable.polymorphic|~\isa{ctxt\ ts} generalizes type
- variables in \isa{ts} as far as possible, even those occurring
- in fixed term variables. The default policy of type-inference is to
- fix newly introduced type variables, which is essentially reversed
- with \verb|Variable.polymorphic|: here the given terms are detached
- from the context as far as possible.
-
- \item \verb|Variable.import_thms|~\isa{open\ thms\ ctxt} invents fixed
- type and term variables for the schematic ones occurring in \isa{thms}. The \isa{open} flag indicates whether the fixed names
- should be accessible to the user, otherwise newly introduced names
- are marked as ``internal'' (\secref{sec:names}).
-
- \item \verb|Variable.focus|~\isa{B} decomposes the outermost \isa{{\isasymAnd}} prefix of proposition \isa{B}.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Assumptions \label{sec:assumptions}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-An \emph{assumption} is a proposition that it is postulated in the
- current context. Local conclusions may use assumptions as
- additional facts, but this imposes implicit hypotheses that weaken
- the overall statement.
-
- Assumptions are restricted to fixed non-schematic statements, i.e.\
- all generality needs to be expressed by explicit quantifiers.
- Nevertheless, the result will be in HHF normal form with outermost
- quantifiers stripped. For example, by assuming \isa{{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ P\ x} we get \isa{{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ P\ x\ {\isasymturnstile}\ P\ {\isacharquery}x} for schematic \isa{{\isacharquery}x}
- of fixed type \isa{{\isasymalpha}}. Local derivations accumulate more and
- more explicit references to hypotheses: \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n\ {\isasymturnstile}\ B} where \isa{A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n} needs to
- be covered by the assumptions of the current context.
-
- \medskip The \isa{add{\isacharunderscore}assms} operation augments the context by
- local assumptions, which are parameterized by an arbitrary \isa{export} rule (see below).
-
- The \isa{export} operation moves facts from a (larger) inner
- context into a (smaller) outer context, by discharging the
- difference of the assumptions as specified by the associated export
- rules. Note that the discharged portion is determined by the
- difference contexts, not the facts being exported! There is a
- separate flag to indicate a goal context, where the result is meant
- to refine an enclosing sub-goal of a structured proof state (cf.\
- \secref{sec:isar-proof-state}).
-
- \medskip The most basic export rule discharges assumptions directly
- by means of the \isa{{\isasymLongrightarrow}} introduction rule:
- \[
- \infer[(\isa{{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
- \]
-
- The variant for goal refinements marks the newly introduced
- premises, which causes the canonical Isar goal refinement scheme to
- enforce unification with local premises within the goal:
- \[
- \infer[(\isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ A\ {\isasymturnstile}\ {\isacharhash}A\ {\isasymLongrightarrow}\ B}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B}}
- \]
-
- \medskip Alternative versions of assumptions may perform arbitrary
- transformations on export, as long as the corresponding portion of
- hypotheses is removed from the given facts. For example, a local
- definition works by fixing \isa{x} and assuming \isa{x\ {\isasymequiv}\ t},
- with the following export rule to reverse the effect:
- \[
- \infer[(\isa{{\isasymequiv}{\isacharminus}expand})]{\isa{{\isasymGamma}\ {\isacharbackslash}\ x\ {\isasymequiv}\ t\ {\isasymturnstile}\ B\ t}}{\isa{{\isasymGamma}\ {\isasymturnstile}\ B\ x}}
- \]
- This works, because the assumption \isa{x\ {\isasymequiv}\ t} was introduced in
- a context with \isa{x} being fresh, so \isa{x} does not
- occur in \isa{{\isasymGamma}} here.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{Assumption.export}\verb|type Assumption.export| \\
- \indexml{Assumption.assume}\verb|Assumption.assume: cterm -> thm| \\
- \indexml{Assumption.add\_assms}\verb|Assumption.add_assms: Assumption.export ->|\isasep\isanewline%
-\verb| cterm list -> Proof.context -> thm list * Proof.context| \\
- \indexml{Assumption.add\_assumes}\verb|Assumption.add_assumes: |\isasep\isanewline%
-\verb| cterm list -> Proof.context -> thm list * Proof.context| \\
- \indexml{Assumption.export}\verb|Assumption.export: bool -> Proof.context -> Proof.context -> thm -> thm| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Assumption.export| represents arbitrary export
- rules, which is any function of type \verb|bool -> cterm list -> thm -> thm|,
- where the \verb|bool| indicates goal mode, and the \verb|cterm list| the collection of assumptions to be discharged
- simultaneously.
-
- \item \verb|Assumption.assume|~\isa{A} turns proposition \isa{A} into a raw assumption \isa{A\ {\isasymturnstile}\ A{\isacharprime}}, where the conclusion
- \isa{A{\isacharprime}} is in HHF normal form.
-
- \item \verb|Assumption.add_assms|~\isa{r\ As} augments the context
- by assumptions \isa{As} with export rule \isa{r}. The
- resulting facts are hypothetical theorems as produced by the raw
- \verb|Assumption.assume|.
-
- \item \verb|Assumption.add_assumes|~\isa{As} is a special case of
- \verb|Assumption.add_assms| where the export rule performs \isa{{\isasymLongrightarrow}{\isacharunderscore}intro} or \isa{{\isacharhash}{\isasymLongrightarrow}{\isacharunderscore}intro}, depending on goal mode.
-
- \item \verb|Assumption.export|~\isa{is{\isacharunderscore}goal\ inner\ outer\ thm}
- exports result \isa{thm} from the the \isa{inner} context
- back into the \isa{outer} one; \isa{is{\isacharunderscore}goal\ {\isacharequal}\ true} means
- this is a goal context. The result is in HHF normal form. Note
- that \verb|ProofContext.export| combines \verb|Variable.export|
- and \verb|Assumption.export| in the canonical way.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Results \label{sec:results}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Local results are established by monotonic reasoning from facts
- within a context. This allows common combinations of theorems,
- e.g.\ via \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} elimination, resolution rules, or equational
- reasoning, see \secref{sec:thms}. Unaccounted context manipulations
- should be avoided, notably raw \isa{{\isasymAnd}{\isacharslash}{\isasymLongrightarrow}} introduction or ad-hoc
- references to free variables or assumptions not present in the proof
- context.
-
- \medskip The \isa{SUBPROOF} combinator allows to structure a
- tactical proof recursively by decomposing a selected sub-goal:
- \isa{{\isacharparenleft}{\isasymAnd}x{\isachardot}\ A{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ B{\isacharparenleft}x{\isacharparenright}{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymdots}} is turned into \isa{B{\isacharparenleft}x{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymdots}}
- after fixing \isa{x} and assuming \isa{A{\isacharparenleft}x{\isacharparenright}}. This means
- the tactic needs to solve the conclusion, but may use the premise as
- a local fact, for locally fixed variables.
-
- The \isa{prove} operation provides an interface for structured
- backwards reasoning under program control, with some explicit sanity
- checks of the result. The goal context can be augmented by
- additional fixed variables (cf.\ \secref{sec:variables}) and
- assumptions (cf.\ \secref{sec:assumptions}), which will be available
- as local facts during the proof and discharged into implications in
- the result. Type and term variables are generalized as usual,
- according to the context.
-
- The \isa{obtain} operation produces results by eliminating
- existing facts by means of a given tactic. This acts like a dual
- conclusion: the proof demonstrates that the context may be augmented
- by certain fixed variables and assumptions. See also
- \cite{isabelle-isar-ref} for the user-level \isa{{\isasymOBTAIN}} and
- \isa{{\isasymGUESS}} elements. Final results, which may not refer to
- the parameters in the conclusion, need to exported explicitly into
- the original context.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{SUBPROOF}\verb|SUBPROOF: ({context: Proof.context, schematics: ctyp list * cterm list,|\isasep\isanewline%
-\verb| params: cterm list, asms: cterm list, concl: cterm,|\isasep\isanewline%
-\verb| prems: thm list} -> tactic) -> Proof.context -> int -> tactic| \\
- \end{mldecls}
- \begin{mldecls}
- \indexml{Goal.prove}\verb|Goal.prove: Proof.context -> string list -> term list -> term ->|\isasep\isanewline%
-\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm| \\
- \indexml{Goal.prove\_multi}\verb|Goal.prove_multi: Proof.context -> string list -> term list -> term list ->|\isasep\isanewline%
-\verb| ({prems: thm list, context: Proof.context} -> tactic) -> thm list| \\
- \end{mldecls}
- \begin{mldecls}
- \indexml{Obtain.result}\verb|Obtain.result: (Proof.context -> tactic) ->|\isasep\isanewline%
-\verb| thm list -> Proof.context -> (cterm list * thm list) * Proof.context| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|SUBPROOF|~\isa{tac} decomposes the structure of a
- particular sub-goal, producing an extended context and a reduced
- goal, which needs to be solved by the given tactic. All schematic
- parameters of the goal are imported into the context as fixed ones,
- which may not be instantiated in the sub-proof.
-
- \item \verb|Goal.prove|~\isa{ctxt\ xs\ As\ C\ tac} states goal \isa{C} in the context augmented by fixed variables \isa{xs} and
- assumptions \isa{As}, and applies tactic \isa{tac} to solve
- it. The latter may depend on the local assumptions being presented
- as facts. The result is in HHF normal form.
-
- \item \verb|Goal.prove_multi| is simular to \verb|Goal.prove|, but
- states several conclusions simultaneously. The goal is encoded by
- means of Pure conjunction; \verb|Goal.conjunction_tac| will turn this
- into a collection of individual subgoals.
-
- \item \verb|Obtain.result|~\isa{tac\ thms\ ctxt} eliminates the
- given facts using a tactic, which results in additional fixed
- variables and assumptions in the context. Final results need to be
- exported explicitly.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/document/session.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/Thy/document/session.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1,21 +1,23 @@
-\input{base.tex}
-
-\input{prelim.tex}
+\input{Base.tex}
-\input{logic.tex}
-
-\input{tactic.tex}
+\input{Integration.tex}
-\input{proof.tex}
-
-\input{isar.tex}
+\input{Isar.tex}
-\input{locale.tex}
+\input{Local_Theory.tex}
-\input{integration.tex}
+\input{Logic.tex}
\input{ML.tex}
+\input{Prelim.tex}
+
+\input{Proof.tex}
+
+\input{Syntax.tex}
+
+\input{Tactic.tex}
+
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "root"
--- a/doc-src/IsarImplementation/Thy/document/tactic.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,512 +0,0 @@
-%
-\begin{isabellebody}%
-\def\isabellecontext{tactic}%
-%
-\isadelimtheory
-\isanewline
-\isanewline
-\isanewline
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{theory}\isamarkupfalse%
-\ tactic\ \isakeyword{imports}\ base\ \isakeyword{begin}%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isamarkupchapter{Tactical reasoning%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Tactical reasoning works by refining the initial claim in a
- backwards fashion, until a solved form is reached. A \isa{goal}
- consists of several subgoals that need to be solved in order to
- achieve the main statement; zero subgoals means that the proof may
- be finished. A \isa{tactic} is a refinement operation that maps
- a goal to a lazy sequence of potential successors. A \isa{tactical} is a combinator for composing tactics.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Goals \label{sec:tactical-goals}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Isabelle/Pure represents a goal\glossary{Tactical goal}{A theorem of
- \seeglossary{Horn Clause} form stating that a number of subgoals
- imply the main conclusion, which is marked as a protected
- proposition.} as a theorem stating that the subgoals imply the main
- goal: \isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}. The outermost goal
- structure is that of a Horn Clause\glossary{Horn Clause}{An iterated
- implication \isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}, without any
- outermost quantifiers. Strictly speaking, propositions \isa{A\isactrlsub i} need to be atomic in Horn Clauses, but Isabelle admits
- arbitrary substructure here (nested \isa{{\isasymLongrightarrow}} and \isa{{\isasymAnd}}
- connectives).}: i.e.\ an iterated implication without any
- quantifiers\footnote{Recall that outermost \isa{{\isasymAnd}x{\isachardot}\ {\isasymphi}{\isacharbrackleft}x{\isacharbrackright}} is
- always represented via schematic variables in the body: \isa{{\isasymphi}{\isacharbrackleft}{\isacharquery}x{\isacharbrackright}}. These variables may get instantiated during the course of
- reasoning.}. For \isa{n\ {\isacharequal}\ {\isadigit{0}}} a goal is called ``solved''.
-
- The structure of each subgoal \isa{A\isactrlsub i} is that of a general
- Hereditary Harrop Formula \isa{{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ {\isasymAnd}x\isactrlsub k{\isachardot}\ H\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ H\isactrlsub m\ {\isasymLongrightarrow}\ B} in
- normal form. Here \isa{x\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlsub k} are goal parameters, i.e.\
- arbitrary-but-fixed entities of certain types, and \isa{H\isactrlsub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ H\isactrlsub m} are goal hypotheses, i.e.\ facts that may be assumed locally.
- Together, this forms the goal context of the conclusion \isa{B} to
- be established. The goal hypotheses may be again arbitrary
- Hereditary Harrop Formulas, although the level of nesting rarely
- exceeds 1--2 in practice.
-
- The main conclusion \isa{C} is internally marked as a protected
- proposition\glossary{Protected proposition}{An arbitrarily
- structured proposition \isa{C} which is forced to appear as
- atomic by wrapping it into a propositional identity operator;
- notation \isa{{\isacharhash}C}. Protecting a proposition prevents basic
- inferences from entering into that structure for the time being.},
- which is represented explicitly by the notation \isa{{\isacharhash}C}. This
- ensures that the decomposition into subgoals and main conclusion is
- well-defined for arbitrarily structured claims.
-
- \medskip Basic goal management is performed via the following
- Isabelle/Pure rules:
-
- \[
- \infer[\isa{{\isacharparenleft}init{\isacharparenright}}]{\isa{C\ {\isasymLongrightarrow}\ {\isacharhash}C}}{} \qquad
- \infer[\isa{{\isacharparenleft}finish{\isacharparenright}}]{\isa{C}}{\isa{{\isacharhash}C}}
- \]
-
- \medskip The following low-level variants admit general reasoning
- with protected propositions:
-
- \[
- \infer[\isa{{\isacharparenleft}protect{\isacharparenright}}]{\isa{{\isacharhash}C}}{\isa{C}} \qquad
- \infer[\isa{{\isacharparenleft}conclude{\isacharparenright}}]{\isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ C}}{\isa{A\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymLongrightarrow}\ A\isactrlsub n\ {\isasymLongrightarrow}\ {\isacharhash}C}}
- \]%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{Goal.init}\verb|Goal.init: cterm -> thm| \\
- \indexml{Goal.finish}\verb|Goal.finish: thm -> thm| \\
- \indexml{Goal.protect}\verb|Goal.protect: thm -> thm| \\
- \indexml{Goal.conclude}\verb|Goal.conclude: thm -> thm| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|Goal.init|~\isa{C} initializes a tactical goal from
- the well-formed proposition \isa{C}.
-
- \item \verb|Goal.finish|~\isa{thm} checks whether theorem
- \isa{thm} is a solved goal (no subgoals), and concludes the
- result by removing the goal protection.
-
- \item \verb|Goal.protect|~\isa{thm} protects the full statement
- of theorem \isa{thm}.
-
- \item \verb|Goal.conclude|~\isa{thm} removes the goal
- protection, even if there are pending subgoals.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Tactics%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-A \isa{tactic} is a function \isa{goal\ {\isasymrightarrow}\ goal\isactrlsup {\isacharasterisk}\isactrlsup {\isacharasterisk}} that
- maps a given goal state (represented as a theorem, cf.\
- \secref{sec:tactical-goals}) to a lazy sequence of potential
- successor states. The underlying sequence implementation is lazy
- both in head and tail, and is purely functional in \emph{not}
- supporting memoing.\footnote{The lack of memoing and the strict
- nature of SML requires some care when working with low-level
- sequence operations, to avoid duplicate or premature evaluation of
- results.}
-
- An \emph{empty result sequence} means that the tactic has failed: in
- a compound tactic expressions other tactics might be tried instead,
- or the whole refinement step might fail outright, producing a
- toplevel error message. When implementing tactics from scratch, one
- should take care to observe the basic protocol of mapping regular
- error conditions to an empty result; only serious faults should
- emerge as exceptions.
-
- By enumerating \emph{multiple results}, a tactic can easily express
- the potential outcome of an internal search process. There are also
- combinators for building proof tools that involve search
- systematically, see also \secref{sec:tacticals}.
-
- \medskip As explained in \secref{sec:tactical-goals}, a goal state
- essentially consists of a list of subgoals that imply the main goal
- (conclusion). Tactics may operate on all subgoals or on a
- particularly specified subgoal, but must not change the main
- conclusion (apart from instantiating schematic goal variables).
-
- Tactics with explicit \emph{subgoal addressing} are of the form
- \isa{int\ {\isasymrightarrow}\ tactic} and may be applied to a particular subgoal
- (counting from 1). If the subgoal number is out of range, the
- tactic should fail with an empty result sequence, but must not raise
- an exception!
-
- Operating on a particular subgoal means to replace it by an interval
- of zero or more subgoals in the same place; other subgoals must not
- be affected, apart from instantiating schematic variables ranging
- over the whole goal state.
-
- A common pattern of composing tactics with subgoal addressing is to
- try the first one, and then the second one only if the subgoal has
- not been solved yet. Special care is required here to avoid bumping
- into unrelated subgoals that happen to come after the original
- subgoal. Assuming that there is only a single initial subgoal is a
- very common error when implementing tactics!
-
- Tactics with internal subgoal addressing should expose the subgoal
- index as \isa{int} argument in full generality; a hardwired
- subgoal 1 inappropriate.
-
- \medskip The main well-formedness conditions for proper tactics are
- summarized as follows.
-
- \begin{itemize}
-
- \item General tactic failure is indicated by an empty result, only
- serious faults may produce an exception.
-
- \item The main conclusion must not be changed, apart from
- instantiating schematic variables.
-
- \item A tactic operates either uniformly on all subgoals, or
- specifically on a selected subgoal (without bumping into unrelated
- subgoals).
-
- \item Range errors in subgoal addressing produce an empty result.
-
- \end{itemize}
-
- Some of these conditions are checked by higher-level goal
- infrastructure (\secref{sec:results}); others are not checked
- explicitly, and violating them merely results in ill-behaved tactics
- experienced by the user (e.g.\ tactics that insist in being
- applicable only to singleton goals, or disallow composition with
- basic tacticals).%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexmltype{tactic}\verb|type tactic = thm -> thm Seq.seq| \\
- \indexml{no\_tac}\verb|no_tac: tactic| \\
- \indexml{all\_tac}\verb|all_tac: tactic| \\
- \indexml{print\_tac}\verb|print_tac: string -> tactic| \\[1ex]
- \indexml{PRIMITIVE}\verb|PRIMITIVE: (thm -> thm) -> tactic| \\[1ex]
- \indexml{SUBGOAL}\verb|SUBGOAL: (term * int -> tactic) -> int -> tactic| \\
- \indexml{CSUBGOAL}\verb|CSUBGOAL: (cterm * int -> tactic) -> int -> tactic| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|tactic| represents tactics. The well-formedness
- conditions described above need to be observed. See also \hyperlink{file.~~/src/Pure/General/seq.ML}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}Pure{\isacharslash}General{\isacharslash}seq{\isachardot}ML}}}} for the underlying implementation of
- lazy sequences.
-
- \item \verb|int -> tactic| represents tactics with explicit
- subgoal addressing, with well-formedness conditions as described
- above.
-
- \item \verb|no_tac| is a tactic that always fails, returning the
- empty sequence.
-
- \item \verb|all_tac| is a tactic that always succeeds, returning a
- singleton sequence with unchanged goal state.
-
- \item \verb|print_tac|~\isa{message} is like \verb|all_tac|, but
- prints a message together with the goal state on the tracing
- channel.
-
- \item \verb|PRIMITIVE|~\isa{rule} turns a primitive inference rule
- into a tactic with unique result. Exception \verb|THM| is considered
- a regular tactic failure and produces an empty result; other
- exceptions are passed through.
-
- \item \verb|SUBGOAL|~\isa{{\isacharparenleft}fn\ {\isacharparenleft}subgoal{\isacharcomma}\ i{\isacharparenright}\ {\isacharequal}{\isachargreater}\ tactic{\isacharparenright}} is the
- most basic form to produce a tactic with subgoal addressing. The
- given abstraction over the subgoal term and subgoal number allows to
- peek at the relevant information of the full goal state. The
- subgoal range is checked as required above.
-
- \item \verb|CSUBGOAL| is similar to \verb|SUBGOAL|, but passes the
- subgoal as \verb|cterm| instead of raw \verb|term|. This
- avoids expensive re-certification in situations where the subgoal is
- used directly for primitive inferences.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Resolution and assumption tactics \label{sec:resolve-assume-tac}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-\emph{Resolution} is the most basic mechanism for refining a
- subgoal using a theorem as object-level rule.
- \emph{Elim-resolution} is particularly suited for elimination rules:
- it resolves with a rule, proves its first premise by assumption, and
- finally deletes that assumption from any new subgoals.
- \emph{Destruct-resolution} is like elim-resolution, but the given
- destruction rules are first turned into canonical elimination
- format. \emph{Forward-resolution} is like destruct-resolution, but
- without deleting the selected assumption. The \isa{r{\isacharslash}e{\isacharslash}d{\isacharslash}f}
- naming convention is maintained for several different kinds of
- resolution rules and tactics.
-
- Assumption tactics close a subgoal by unifying some of its premises
- against its conclusion.
-
- \medskip All the tactics in this section operate on a subgoal
- designated by a positive integer. Other subgoals might be affected
- indirectly, due to instantiation of schematic variables.
-
- There are various sources of non-determinism, the tactic result
- sequence enumerates all possibilities of the following choices (if
- applicable):
-
- \begin{enumerate}
-
- \item selecting one of the rules given as argument to the tactic;
-
- \item selecting a subgoal premise to eliminate, unifying it against
- the first premise of the rule;
-
- \item unifying the conclusion of the subgoal to the conclusion of
- the rule.
-
- \end{enumerate}
-
- Recall that higher-order unification may produce multiple results
- that are enumerated here.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{resolve\_tac}\verb|resolve_tac: thm list -> int -> tactic| \\
- \indexml{eresolve\_tac}\verb|eresolve_tac: thm list -> int -> tactic| \\
- \indexml{dresolve\_tac}\verb|dresolve_tac: thm list -> int -> tactic| \\
- \indexml{forward\_tac}\verb|forward_tac: thm list -> int -> tactic| \\[1ex]
- \indexml{assume\_tac}\verb|assume_tac: int -> tactic| \\
- \indexml{eq\_assume\_tac}\verb|eq_assume_tac: int -> tactic| \\[1ex]
- \indexml{match\_tac}\verb|match_tac: thm list -> int -> tactic| \\
- \indexml{ematch\_tac}\verb|ematch_tac: thm list -> int -> tactic| \\
- \indexml{dmatch\_tac}\verb|dmatch_tac: thm list -> int -> tactic| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|resolve_tac|~\isa{thms\ i} refines the goal state
- using the given theorems, which should normally be introduction
- rules. The tactic resolves a rule's conclusion with subgoal \isa{i}, replacing it by the corresponding versions of the rule's
- premises.
-
- \item \verb|eresolve_tac|~\isa{thms\ i} performs elim-resolution
- with the given theorems, which should normally be elimination rules.
-
- \item \verb|dresolve_tac|~\isa{thms\ i} performs
- destruct-resolution with the given theorems, which should normally
- be destruction rules. This replaces an assumption by the result of
- applying one of the rules.
-
- \item \verb|forward_tac| is like \verb|dresolve_tac| except that the
- selected assumption is not deleted. It applies a rule to an
- assumption, adding the result as a new assumption.
-
- \item \verb|assume_tac|~\isa{i} attempts to solve subgoal \isa{i}
- by assumption (modulo higher-order unification).
-
- \item \verb|eq_assume_tac| is similar to \verb|assume_tac|, but checks
- only for immediate \isa{{\isasymalpha}}-convertibility instead of using
- unification. It succeeds (with a unique next state) if one of the
- assumptions is equal to the subgoal's conclusion. Since it does not
- instantiate variables, it cannot make other subgoals unprovable.
-
- \item \verb|match_tac|, \verb|ematch_tac|, and \verb|dmatch_tac| are
- similar to \verb|resolve_tac|, \verb|eresolve_tac|, and \verb|dresolve_tac|, respectively, but do not instantiate schematic
- variables in the goal state.
-
- Flexible subgoals are not updated at will, but are left alone.
- Strictly speaking, matching means to treat the unknowns in the goal
- state as constants; these tactics merely discard unifiers that would
- update the goal state.
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsubsection{Explicit instantiation within a subgoal context%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The main resolution tactics (\secref{sec:resolve-assume-tac})
- use higher-order unification, which works well in many practical
- situations despite its daunting theoretical properties.
- Nonetheless, there are important problem classes where unguided
- higher-order unification is not so useful. This typically involves
- rules like universal elimination, existential introduction, or
- equational substitution. Here the unification problem involves
- fully flexible \isa{{\isacharquery}P\ {\isacharquery}x} schemes, which are hard to manage
- without further hints.
-
- By providing a (small) rigid term for \isa{{\isacharquery}x} explicitly, the
- remaining unification problem is to assign a (large) term to \isa{{\isacharquery}P}, according to the shape of the given subgoal. This is
- sufficiently well-behaved in most practical situations.
-
- \medskip Isabelle provides separate versions of the standard \isa{r{\isacharslash}e{\isacharslash}d{\isacharslash}f} resolution tactics that allow to provide explicit
- instantiations of unknowns of the given rule, wrt.\ terms that refer
- to the implicit context of the selected subgoal.
-
- An instantiation consists of a list of pairs of the form \isa{{\isacharparenleft}{\isacharquery}x{\isacharcomma}\ t{\isacharparenright}}, where \isa{{\isacharquery}x} is a schematic variable occurring in
- the given rule, and \isa{t} is a term from the current proof
- context, augmented by the local goal parameters of the selected
- subgoal; cf.\ the \isa{focus} operation described in
- \secref{sec:variables}.
-
- Entering the syntactic context of a subgoal is a brittle operation,
- because its exact form is somewhat accidental, and the choice of
- bound variable names depends on the presence of other local and
- global names. Explicit renaming of subgoal parameters prior to
- explicit instantiation might help to achieve a bit more robustness.
-
- Type instantiations may be given as well, via pairs like \isa{{\isacharparenleft}{\isacharquery}{\isacharprime}a{\isacharcomma}\ {\isasymtau}{\isacharparenright}}. Type instantiations are distinguished from term
- instantiations by the syntactic form of the schematic variable.
- Types are instantiated before terms are. Since term instantiation
- already performs type-inference as expected, explicit type
- instantiations are seldom necessary.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isatagmlref
-%
-\begin{isamarkuptext}%
-\begin{mldecls}
- \indexml{res\_inst\_tac}\verb|res_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\
- \indexml{eres\_inst\_tac}\verb|eres_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\
- \indexml{dres\_inst\_tac}\verb|dres_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\
- \indexml{forw\_inst\_tac}\verb|forw_inst_tac: Proof.context -> (indexname * string) list -> thm -> int -> tactic| \\[1ex]
- \indexml{rename\_tac}\verb|rename_tac: string list -> int -> tactic| \\
- \end{mldecls}
-
- \begin{description}
-
- \item \verb|res_inst_tac|~\isa{ctxt\ insts\ thm\ i} instantiates the
- rule \isa{thm} with the instantiations \isa{insts}, as described
- above, and then performs resolution on subgoal \isa{i}.
-
- \item \verb|eres_inst_tac| is like \verb|res_inst_tac|, but performs
- elim-resolution.
-
- \item \verb|dres_inst_tac| is like \verb|res_inst_tac|, but performs
- destruct-resolution.
-
- \item \verb|forw_inst_tac| is like \verb|dres_inst_tac| except that
- the selected assumption is not deleted.
-
- \item \verb|rename_tac|~\isa{names\ i} renames the innermost
- parameters of subgoal \isa{i} according to the provided \isa{names} (which need to be distinct indentifiers).
-
- \end{description}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\endisatagmlref
-{\isafoldmlref}%
-%
-\isadelimmlref
-%
-\endisadelimmlref
-%
-\isamarkupsection{Tacticals \label{sec:tacticals}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-FIXME
-
-\glossary{Tactical}{A functional combinator for building up complex
-tactics from simpler ones. Typical tactical perform sequential
-composition, disjunction (choice), iteration, or goal addressing.
-Various search strategies may be expressed via tacticals.}%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-%
-\isatagtheory
-\isacommand{end}\isamarkupfalse%
-%
-\endisatagtheory
-{\isafoldtheory}%
-%
-\isadelimtheory
-%
-\endisadelimtheory
-\isanewline
-\isanewline
-\end{isabellebody}%
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "root"
-%%% End:
--- a/doc-src/IsarImplementation/Thy/integration.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,426 +0,0 @@
-
-(* $Id$ *)
-
-theory integration imports base begin
-
-chapter {* System integration *}
-
-section {* Isar toplevel \label{sec:isar-toplevel} *}
-
-text {* The Isar toplevel may be considered the centeral hub of the
- Isabelle/Isar system, where all key components and sub-systems are
- integrated into a single read-eval-print loop of Isar commands. We
- shall even incorporate the existing {\ML} toplevel of the compiler
- and run-time system (cf.\ \secref{sec:ML-toplevel}).
-
- Isabelle/Isar departs from the original ``LCF system architecture''
- where {\ML} was really The Meta Language for defining theories and
- conducting proofs. Instead, {\ML} now only serves as the
- implementation language for the system (and user extensions), while
- the specific Isar toplevel supports the concepts of theory and proof
- development natively. This includes the graph structure of theories
- and the block structure of proofs, support for unlimited undo,
- facilities for tracing, debugging, timing, profiling etc.
-
- \medskip The toplevel maintains an implicit state, which is
- transformed by a sequence of transitions -- either interactively or
- in batch-mode. In interactive mode, Isar state transitions are
- encapsulated as safe transactions, such that both failure and undo
- are handled conveniently without destroying the underlying draft
- theory (cf.~\secref{sec:context-theory}). In batch mode,
- transitions operate in a linear (destructive) fashion, such that
- error conditions abort the present attempt to construct a theory or
- proof altogether.
-
- The toplevel state is a disjoint sum of empty @{text toplevel}, or
- @{text theory}, or @{text proof}. On entering the main Isar loop we
- start with an empty toplevel. A theory is commenced by giving a
- @{text \<THEORY>} header; within a theory we may issue theory
- commands such as @{text \<DEFINITION>}, or state a @{text
- \<THEOREM>} to be proven. Now we are within a proof state, with a
- rich collection of Isar proof commands for structured proof
- composition, or unstructured proof scripts. When the proof is
- concluded we get back to the theory, which is then updated by
- storing the resulting fact. Further theory declarations or theorem
- statements with proofs may follow, until we eventually conclude the
- theory development by issuing @{text \<END>}. The resulting theory
- is then stored within the theory database and we are back to the
- empty toplevel.
-
- In addition to these proper state transformations, there are also
- some diagnostic commands for peeking at the toplevel state without
- modifying it (e.g.\ \isakeyword{thm}, \isakeyword{term},
- \isakeyword{print-cases}).
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type Toplevel.state} \\
- @{index_ML Toplevel.UNDEF: "exn"} \\
- @{index_ML Toplevel.is_toplevel: "Toplevel.state -> bool"} \\
- @{index_ML Toplevel.theory_of: "Toplevel.state -> theory"} \\
- @{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
- @{index_ML Toplevel.debug: "bool ref"} \\
- @{index_ML Toplevel.timing: "bool ref"} \\
- @{index_ML Toplevel.profiling: "int ref"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type Toplevel.state} represents Isar toplevel states,
- which are normally manipulated through the concept of toplevel
- transitions only (\secref{sec:toplevel-transition}). Also note that
- a raw toplevel state is subject to the same linearity restrictions
- as a theory context (cf.~\secref{sec:context-theory}).
-
- \item @{ML Toplevel.UNDEF} is raised for undefined toplevel
- operations. Many operations work only partially for certain cases,
- since @{ML_type Toplevel.state} is a sum type.
-
- \item @{ML Toplevel.is_toplevel}~@{text "state"} checks for an empty
- toplevel state.
-
- \item @{ML Toplevel.theory_of}~@{text "state"} selects the theory of
- a theory or proof (!), otherwise raises @{ML Toplevel.UNDEF}.
-
- \item @{ML Toplevel.proof_of}~@{text "state"} selects the Isar proof
- state if available, otherwise raises @{ML Toplevel.UNDEF}.
-
- \item @{ML "set Toplevel.debug"} makes the toplevel print further
- details about internal error conditions, exceptions being raised
- etc.
-
- \item @{ML "set Toplevel.timing"} makes the toplevel print timing
- information for each Isar command being executed.
-
- \item @{ML Toplevel.profiling}~@{verbatim ":="}~@{text "n"} controls
- low-level profiling of the underlying {\ML} runtime system. For
- Poly/ML, @{text "n = 1"} means time and @{text "n = 2"} space
- profiling.
-
- \end{description}
-*}
-
-
-subsection {* Toplevel transitions \label{sec:toplevel-transition} *}
-
-text {*
- An Isar toplevel transition consists of a partial function on the
- toplevel state, with additional information for diagnostics and
- error reporting: there are fields for command name, source position,
- optional source text, as well as flags for interactive-only commands
- (which issue a warning in batch-mode), printing of result state,
- etc.
-
- The operational part is represented as the sequential union of a
- list of partial functions, which are tried in turn until the first
- one succeeds. This acts like an outer case-expression for various
- alternative state transitions. For example, \isakeyword{qed} acts
- differently for a local proofs vs.\ the global ending of the main
- proof.
-
- Toplevel transitions are composed via transition transformers.
- Internally, Isar commands are put together from an empty transition
- extended by name and source position (and optional source text). It
- is then left to the individual command parser to turn the given
- concrete syntax into a suitable transition transformer that adjoin
- actual operations on a theory or proof state etc.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML Toplevel.print: "Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.no_timing: "Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.keep: "(Toplevel.state -> unit) ->
- Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.theory: "(theory -> theory) ->
- Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.theory_to_proof: "(theory -> Proof.state) ->
- Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.proof: "(Proof.state -> Proof.state) ->
- Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.proofs: "(Proof.state -> Proof.state Seq.seq) ->
- Toplevel.transition -> Toplevel.transition"} \\
- @{index_ML Toplevel.end_proof: "(bool -> Proof.state -> Proof.context) ->
- Toplevel.transition -> Toplevel.transition"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML Toplevel.print}~@{text "tr"} sets the print flag, which
- causes the toplevel loop to echo the result state (in interactive
- mode).
-
- \item @{ML Toplevel.no_timing}~@{text "tr"} indicates that the
- transition should never show timing information, e.g.\ because it is
- a diagnostic command.
-
- \item @{ML Toplevel.keep}~@{text "tr"} adjoins a diagnostic
- function.
-
- \item @{ML Toplevel.theory}~@{text "tr"} adjoins a theory
- transformer.
-
- \item @{ML Toplevel.theory_to_proof}~@{text "tr"} adjoins a global
- goal function, which turns a theory into a proof state. The theory
- may be changed before entering the proof; the generic Isar goal
- setup includes an argument that specifies how to apply the proven
- result to the theory, when the proof is finished.
-
- \item @{ML Toplevel.proof}~@{text "tr"} adjoins a deterministic
- proof command, with a singleton result.
-
- \item @{ML Toplevel.proofs}~@{text "tr"} adjoins a general proof
- command, with zero or more result states (represented as a lazy
- list).
-
- \item @{ML Toplevel.end_proof}~@{text "tr"} adjoins a concluding
- proof command, that returns the resulting theory, after storing the
- resulting facts in the context etc.
-
- \end{description}
-*}
-
-
-subsection {* Toplevel control *}
-
-text {*
- There are a few special control commands that modify the behavior
- the toplevel itself, and only make sense in interactive mode. Under
- normal circumstances, the user encounters these only implicitly as
- part of the protocol between the Isabelle/Isar system and a
- user-interface such as ProofGeneral.
-
- \begin{description}
-
- \item \isacommand{undo} follows the three-level hierarchy of empty
- toplevel vs.\ theory vs.\ proof: undo within a proof reverts to the
- previous proof context, undo after a proof reverts to the theory
- before the initial goal statement, undo of a theory command reverts
- to the previous theory value, undo of a theory header discontinues
- the current theory development and removes it from the theory
- database (\secref{sec:theory-database}).
-
- \item \isacommand{kill} aborts the current level of development:
- kill in a proof context reverts to the theory before the initial
- goal statement, kill in a theory context aborts the current theory
- development, removing it from the database.
-
- \item \isacommand{exit} drops out of the Isar toplevel into the
- underlying {\ML} toplevel (\secref{sec:ML-toplevel}). The Isar
- toplevel state is preserved and may be continued later.
-
- \item \isacommand{quit} terminates the Isabelle/Isar process without
- saving.
-
- \end{description}
-*}
-
-
-section {* ML toplevel \label{sec:ML-toplevel} *}
-
-text {*
- The {\ML} toplevel provides a read-compile-eval-print loop for {\ML}
- values, types, structures, and functors. {\ML} declarations operate
- on the global system state, which consists of the compiler
- environment plus the values of {\ML} reference variables. There is
- no clean way to undo {\ML} declarations, except for reverting to a
- previously saved state of the whole Isabelle process. {\ML} input
- is either read interactively from a TTY, or from a string (usually
- within a theory text), or from a source file (usually loaded from a
- theory).
-
- Whenever the {\ML} toplevel is active, the current Isabelle theory
- context is passed as an internal reference variable. Thus {\ML}
- code may access the theory context during compilation, it may even
- change the value of a theory being under construction --- while
- observing the usual linearity restrictions
- (cf.~\secref{sec:context-theory}).
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML the_context: "unit -> theory"} \\
- @{index_ML "Context.>> ": "(Context.generic -> Context.generic) -> unit"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML "the_context ()"} refers to the theory context of the
- {\ML} toplevel --- at compile time! {\ML} code needs to take care
- to refer to @{ML "the_context ()"} correctly. Recall that
- evaluation of a function body is delayed until actual runtime.
- Moreover, persistent {\ML} toplevel bindings to an unfinished theory
- should be avoided: code should either project out the desired
- information immediately, or produce an explicit @{ML_type
- theory_ref} (cf.\ \secref{sec:context-theory}).
-
- \item @{ML "Context.>>"}~@{text f} applies context transformation
- @{text f} to the implicit context of the {\ML} toplevel.
-
- \end{description}
-
- It is very important to note that the above functions are really
- restricted to the compile time, even though the {\ML} compiler is
- invoked at runtime! The majority of {\ML} code uses explicit
- functional arguments of a theory or proof context instead. Thus it
- may be invoked for an arbitrary context later on, without having to
- worry about any operational details.
-
- \bigskip
-
- \begin{mldecls}
- @{index_ML Isar.main: "unit -> unit"} \\
- @{index_ML Isar.loop: "unit -> unit"} \\
- @{index_ML Isar.state: "unit -> Toplevel.state"} \\
- @{index_ML Isar.exn: "unit -> (exn * string) option"} \\
- @{index_ML Isar.context: "unit -> Proof.context"} \\
- @{index_ML Isar.goal: "unit -> thm"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML "Isar.main ()"} invokes the Isar toplevel from {\ML},
- initializing an empty toplevel state.
-
- \item @{ML "Isar.loop ()"} continues the Isar toplevel with the
- current state, after having dropped out of the Isar toplevel loop.
-
- \item @{ML "Isar.state ()"} and @{ML "Isar.exn ()"} get current
- toplevel state and error condition, respectively. This only works
- after having dropped out of the Isar toplevel loop.
-
- \item @{ML "Isar.context ()"} produces the proof context from @{ML
- "Isar.state ()"}, analogous to @{ML Context.proof_of}
- (\secref{sec:generic-context}).
-
- \item @{ML "Isar.goal ()"} picks the tactical goal from @{ML
- "Isar.state ()"}, represented as a theorem according to
- \secref{sec:tactical-goals}.
-
- \end{description}
-*}
-
-
-section {* Theory database \label{sec:theory-database} *}
-
-text {*
- The theory database maintains a collection of theories, together
- with some administrative information about their original sources,
- which are held in an external store (i.e.\ some directory within the
- regular file system).
-
- The theory database is organized as a directed acyclic graph;
- entries are referenced by theory name. Although some additional
- interfaces allow to include a directory specification as well, this
- is only a hint to the underlying theory loader. The internal theory
- name space is flat!
-
- Theory @{text A} is associated with the main theory file @{text
- A}\verb,.thy,, which needs to be accessible through the theory
- loader path. Any number of additional {\ML} source files may be
- associated with each theory, by declaring these dependencies in the
- theory header as @{text \<USES>}, and loading them consecutively
- within the theory context. The system keeps track of incoming {\ML}
- sources and associates them with the current theory. The file
- @{text A}\verb,.ML, is loaded after a theory has been concluded, in
- order to support legacy proof {\ML} proof scripts.
-
- The basic internal actions of the theory database are @{text
- "update"}, @{text "outdate"}, and @{text "remove"}:
-
- \begin{itemize}
-
- \item @{text "update A"} introduces a link of @{text "A"} with a
- @{text "theory"} value of the same name; it asserts that the theory
- sources are now consistent with that value;
-
- \item @{text "outdate A"} invalidates the link of a theory database
- entry to its sources, but retains the present theory value;
-
- \item @{text "remove A"} deletes entry @{text "A"} from the theory
- database.
-
- \end{itemize}
-
- These actions are propagated to sub- or super-graphs of a theory
- entry as expected, in order to preserve global consistency of the
- state of all loaded theories with the sources of the external store.
- This implies certain causalities between actions: @{text "update"}
- or @{text "outdate"} of an entry will @{text "outdate"} all
- descendants; @{text "remove"} will @{text "remove"} all descendants.
-
- \medskip There are separate user-level interfaces to operate on the
- theory database directly or indirectly. The primitive actions then
- just happen automatically while working with the system. In
- particular, processing a theory header @{text "\<THEORY> A
- \<IMPORTS> B\<^sub>1 \<dots> B\<^sub>n \<BEGIN>"} ensures that the
- sub-graph of the collective imports @{text "B\<^sub>1 \<dots> B\<^sub>n"}
- is up-to-date, too. Earlier theories are reloaded as required, with
- @{text update} actions proceeding in topological order according to
- theory dependencies. There may be also a wave of implied @{text
- outdate} actions for derived theory nodes until a stable situation
- is achieved eventually.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML theory: "string -> theory"} \\
- @{index_ML use_thy: "string -> unit"} \\
- @{index_ML use_thys: "string list -> unit"} \\
- @{index_ML ThyInfo.touch_thy: "string -> unit"} \\
- @{index_ML ThyInfo.remove_thy: "string -> unit"} \\[1ex]
- @{index_ML ThyInfo.begin_theory}@{verbatim ": ... -> bool -> theory"} \\
- @{index_ML ThyInfo.end_theory: "theory -> unit"} \\
- @{index_ML ThyInfo.register_theory: "theory -> unit"} \\[1ex]
- @{verbatim "datatype action = Update | Outdate | Remove"} \\
- @{index_ML ThyInfo.add_hook: "(ThyInfo.action -> string -> unit) -> unit"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML theory}~@{text A} retrieves the theory value presently
- associated with name @{text A}. Note that the result might be
- outdated.
-
- \item @{ML use_thy}~@{text A} ensures that theory @{text A} is fully
- up-to-date wrt.\ the external file store, reloading outdated
- ancestors as required.
-
- \item @{ML use_thys} is similar to @{ML use_thy}, but handles
- several theories simultaneously. Thus it acts like processing the
- import header of a theory, without performing the merge of the
- result, though.
-
- \item @{ML ThyInfo.touch_thy}~@{text A} performs and @{text outdate} action
- on theory @{text A} and all descendants.
-
- \item @{ML ThyInfo.remove_thy}~@{text A} deletes theory @{text A} and all
- descendants from the theory database.
-
- \item @{ML ThyInfo.begin_theory} is the basic operation behind a
- @{text \<THEORY>} header declaration. This is {\ML} functions is
- normally not invoked directly.
-
- \item @{ML ThyInfo.end_theory} concludes the loading of a theory
- proper and stores the result in the theory database.
-
- \item @{ML ThyInfo.register_theory}~@{text "text thy"} registers an
- existing theory value with the theory loader database. There is no
- management of associated sources.
-
- \item @{ML "ThyInfo.add_hook"}~@{text f} registers function @{text
- f} as a hook for theory database actions. The function will be
- invoked with the action and theory name being involved; thus derived
- actions may be performed in associated system components, e.g.\
- maintaining the state of an editor for the theory sources.
-
- The kind and order of actions occurring in practice depends both on
- user interactions and the internal process of resolving theory
- imports. Hooks should not rely on a particular policy here! Any
- exceptions raised by the hook are ignored.
-
- \end{description}
-*}
-
-end
--- a/doc-src/IsarImplementation/Thy/isar.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-
-(* $Id$ *)
-
-theory isar imports base begin
-
-chapter {* Isar proof texts *}
-
-section {* Proof context *}
-
-text FIXME
-
-
-section {* Proof state \label{sec:isar-proof-state} *}
-
-text {*
- FIXME
-
-\glossary{Proof state}{The whole configuration of a structured proof,
-consisting of a \seeglossary{proof context} and an optional
-\seeglossary{structured goal}. Internally, an Isar proof state is
-organized as a stack to accomodate block structure of proof texts.
-For historical reasons, a low-level \seeglossary{tactical goal} is
-occasionally called ``proof state'' as well.}
-
-\glossary{Structured goal}{FIXME}
-
-\glossary{Goal}{See \seeglossary{tactical goal} or \seeglossary{structured goal}. \norefpage}
-
-
-*}
-
-section {* Proof methods *}
-
-text FIXME
-
-section {* Attributes *}
-
-text "FIXME ?!"
-
-
-end
--- a/doc-src/IsarImplementation/Thy/locale.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-
-(* $Id$ *)
-
-theory "locale" imports base begin
-
-chapter {* Structured specifications *}
-
-section {* Specification elements *}
-
-text FIXME
-
-
-section {* Type-inference *}
-
-text FIXME
-
-
-section {* Local theories *}
-
-text {*
- FIXME
-
- \glossary{Local theory}{FIXME}
-*}
-
-end
--- a/doc-src/IsarImplementation/Thy/logic.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,851 +0,0 @@
-theory logic imports base begin
-
-chapter {* Primitive logic \label{ch:logic} *}
-
-text {*
- The logical foundations of Isabelle/Isar are that of the Pure logic,
- which has been introduced as a natural-deduction framework in
- \cite{paulson700}. This is essentially the same logic as ``@{text
- "\<lambda>HOL"}'' in the more abstract setting of Pure Type Systems (PTS)
- \cite{Barendregt-Geuvers:2001}, although there are some key
- differences in the specific treatment of simple types in
- Isabelle/Pure.
-
- Following type-theoretic parlance, the Pure logic consists of three
- levels of @{text "\<lambda>"}-calculus with corresponding arrows, @{text
- "\<Rightarrow>"} for syntactic function space (terms depending on terms), @{text
- "\<And>"} for universal quantification (proofs depending on terms), and
- @{text "\<Longrightarrow>"} for implication (proofs depending on proofs).
-
- Derivations are relative to a logical theory, which declares type
- constructors, constants, and axioms. Theory declarations support
- schematic polymorphism, which is strictly speaking outside the
- logic.\footnote{This is the deeper logical reason, why the theory
- context @{text "\<Theta>"} is separate from the proof context @{text "\<Gamma>"}
- of the core calculus.}
-*}
-
-
-section {* Types \label{sec:types} *}
-
-text {*
- The language of types is an uninterpreted order-sorted first-order
- algebra; types are qualified by ordered type classes.
-
- \medskip A \emph{type class} is an abstract syntactic entity
- declared in the theory context. The \emph{subclass relation} @{text
- "c\<^isub>1 \<subseteq> c\<^isub>2"} is specified by stating an acyclic
- generating relation; the transitive closure is maintained
- internally. The resulting relation is an ordering: reflexive,
- transitive, and antisymmetric.
-
- A \emph{sort} is a list of type classes written as @{text "s =
- {c\<^isub>1, \<dots>, c\<^isub>m}"}, which represents symbolic
- intersection. Notationally, the curly braces are omitted for
- singleton intersections, i.e.\ any class @{text "c"} may be read as
- a sort @{text "{c}"}. The ordering on type classes is extended to
- sorts according to the meaning of intersections: @{text
- "{c\<^isub>1, \<dots> c\<^isub>m} \<subseteq> {d\<^isub>1, \<dots>, d\<^isub>n}"} iff
- @{text "\<forall>j. \<exists>i. c\<^isub>i \<subseteq> d\<^isub>j"}. The empty intersection
- @{text "{}"} refers to the universal sort, which is the largest
- element wrt.\ the sort order. The intersections of all (finitely
- many) classes declared in the current theory are the minimal
- elements wrt.\ the sort order.
-
- \medskip A \emph{fixed type variable} is a pair of a basic name
- (starting with a @{text "'"} character) and a sort constraint, e.g.\
- @{text "('a, s)"} which is usually printed as @{text "\<alpha>\<^isub>s"}.
- A \emph{schematic type variable} is a pair of an indexname and a
- sort constraint, e.g.\ @{text "(('a, 0), s)"} which is usually
- printed as @{text "?\<alpha>\<^isub>s"}.
-
- Note that \emph{all} syntactic components contribute to the identity
- of type variables, including the sort constraint. The core logic
- handles type variables with the same name but different sorts as
- different, although some outer layers of the system make it hard to
- produce anything like this.
-
- A \emph{type constructor} @{text "\<kappa>"} is a @{text "k"}-ary operator
- on types declared in the theory. Type constructor application is
- written postfix as @{text "(\<alpha>\<^isub>1, \<dots>, \<alpha>\<^isub>k)\<kappa>"}. For
- @{text "k = 0"} the argument tuple is omitted, e.g.\ @{text "prop"}
- instead of @{text "()prop"}. For @{text "k = 1"} the parentheses
- are omitted, e.g.\ @{text "\<alpha> list"} instead of @{text "(\<alpha>)list"}.
- Further notation is provided for specific constructors, notably the
- right-associative infix @{text "\<alpha> \<Rightarrow> \<beta>"} instead of @{text "(\<alpha>,
- \<beta>)fun"}.
-
- A \emph{type} is defined inductively over type variables and type
- constructors as follows: @{text "\<tau> = \<alpha>\<^isub>s | ?\<alpha>\<^isub>s |
- (\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>k)\<kappa>"}.
-
- A \emph{type abbreviation} is a syntactic definition @{text
- "(\<^vec>\<alpha>)\<kappa> = \<tau>"} of an arbitrary type expression @{text "\<tau>"} over
- variables @{text "\<^vec>\<alpha>"}. Type abbreviations appear as type
- constructors in the syntax, but are expanded before entering the
- logical core.
-
- A \emph{type arity} declares the image behavior of a type
- constructor wrt.\ the algebra of sorts: @{text "\<kappa> :: (s\<^isub>1, \<dots>,
- s\<^isub>k)s"} means that @{text "(\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>k)\<kappa>"} is
- of sort @{text "s"} if every argument type @{text "\<tau>\<^isub>i"} is
- of sort @{text "s\<^isub>i"}. Arity declarations are implicitly
- completed, i.e.\ @{text "\<kappa> :: (\<^vec>s)c"} entails @{text "\<kappa> ::
- (\<^vec>s)c'"} for any @{text "c' \<supseteq> c"}.
-
- \medskip The sort algebra is always maintained as \emph{coregular},
- which means that type arities are consistent with the subclass
- relation: for any type constructor @{text "\<kappa>"}, and classes @{text
- "c\<^isub>1 \<subseteq> c\<^isub>2"}, and arities @{text "\<kappa> ::
- (\<^vec>s\<^isub>1)c\<^isub>1"} and @{text "\<kappa> ::
- (\<^vec>s\<^isub>2)c\<^isub>2"} holds @{text "\<^vec>s\<^isub>1 \<subseteq>
- \<^vec>s\<^isub>2"} component-wise.
-
- The key property of a coregular order-sorted algebra is that sort
- constraints can be solved in a most general fashion: for each type
- constructor @{text "\<kappa>"} and sort @{text "s"} there is a most general
- vector of argument sorts @{text "(s\<^isub>1, \<dots>, s\<^isub>k)"} such
- that a type scheme @{text "(\<alpha>\<^bsub>s\<^isub>1\<^esub>, \<dots>,
- \<alpha>\<^bsub>s\<^isub>k\<^esub>)\<kappa>"} is of sort @{text "s"}.
- Consequently, type unification has most general solutions (modulo
- equivalence of sorts), so type-inference produces primary types as
- expected \cite{nipkow-prehofer}.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type class} \\
- @{index_ML_type sort} \\
- @{index_ML_type arity} \\
- @{index_ML_type typ} \\
- @{index_ML map_atyps: "(typ -> typ) -> typ -> typ"} \\
- @{index_ML fold_atyps: "(typ -> 'a -> 'a) -> typ -> 'a -> 'a"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML Sign.subsort: "theory -> sort * sort -> bool"} \\
- @{index_ML Sign.of_sort: "theory -> typ * sort -> bool"} \\
- @{index_ML Sign.add_types: "(string * int * mixfix) list -> theory -> theory"} \\
- @{index_ML Sign.add_tyabbrs_i: "
- (string * string list * typ * mixfix) list -> theory -> theory"} \\
- @{index_ML Sign.primitive_class: "string * class list -> theory -> theory"} \\
- @{index_ML Sign.primitive_classrel: "class * class -> theory -> theory"} \\
- @{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type class} represents type classes; this is an alias for
- @{ML_type string}.
-
- \item @{ML_type sort} represents sorts; this is an alias for
- @{ML_type "class list"}.
-
- \item @{ML_type arity} represents type arities; this is an alias for
- triples of the form @{text "(\<kappa>, \<^vec>s, s)"} for @{text "\<kappa> ::
- (\<^vec>s)s"} described above.
-
- \item @{ML_type typ} represents types; this is a datatype with
- constructors @{ML TFree}, @{ML TVar}, @{ML Type}.
-
- \item @{ML map_atyps}~@{text "f \<tau>"} applies the mapping @{text "f"}
- to all atomic types (@{ML TFree}, @{ML TVar}) occurring in @{text
- "\<tau>"}.
-
- \item @{ML fold_atyps}~@{text "f \<tau>"} iterates the operation @{text
- "f"} over all occurrences of atomic types (@{ML TFree}, @{ML TVar})
- in @{text "\<tau>"}; the type structure is traversed from left to right.
-
- \item @{ML Sign.subsort}~@{text "thy (s\<^isub>1, s\<^isub>2)"}
- tests the subsort relation @{text "s\<^isub>1 \<subseteq> s\<^isub>2"}.
-
- \item @{ML Sign.of_sort}~@{text "thy (\<tau>, s)"} tests whether type
- @{text "\<tau>"} is of sort @{text "s"}.
-
- \item @{ML Sign.add_types}~@{text "[(\<kappa>, k, mx), \<dots>]"} declares a new
- type constructors @{text "\<kappa>"} with @{text "k"} arguments and
- optional mixfix syntax.
-
- \item @{ML Sign.add_tyabbrs_i}~@{text "[(\<kappa>, \<^vec>\<alpha>, \<tau>, mx), \<dots>]"}
- defines a new type abbreviation @{text "(\<^vec>\<alpha>)\<kappa> = \<tau>"} with
- optional mixfix syntax.
-
- \item @{ML Sign.primitive_class}~@{text "(c, [c\<^isub>1, \<dots>,
- c\<^isub>n])"} declares a new class @{text "c"}, together with class
- relations @{text "c \<subseteq> c\<^isub>i"}, for @{text "i = 1, \<dots>, n"}.
-
- \item @{ML Sign.primitive_classrel}~@{text "(c\<^isub>1,
- c\<^isub>2)"} declares the class relation @{text "c\<^isub>1 \<subseteq>
- c\<^isub>2"}.
-
- \item @{ML Sign.primitive_arity}~@{text "(\<kappa>, \<^vec>s, s)"} declares
- the arity @{text "\<kappa> :: (\<^vec>s)s"}.
-
- \end{description}
-*}
-
-
-
-section {* Terms \label{sec:terms} *}
-
-text {*
- \glossary{Term}{FIXME}
-
- The language of terms is that of simply-typed @{text "\<lambda>"}-calculus
- with de-Bruijn indices for bound variables (cf.\ \cite{debruijn72}
- or \cite{paulson-ml2}), with the types being determined determined
- by the corresponding binders. In contrast, free variables and
- constants are have an explicit name and type in each occurrence.
-
- \medskip A \emph{bound variable} is a natural number @{text "b"},
- which accounts for the number of intermediate binders between the
- variable occurrence in the body and its binding position. For
- example, the de-Bruijn term @{text
- "\<lambda>\<^bsub>nat\<^esub>. \<lambda>\<^bsub>nat\<^esub>. 1 + 0"} would
- correspond to @{text
- "\<lambda>x\<^bsub>nat\<^esub>. \<lambda>y\<^bsub>nat\<^esub>. x + y"} in a named
- representation. Note that a bound variable may be represented by
- different de-Bruijn indices at different occurrences, depending on
- the nesting of abstractions.
-
- A \emph{loose variable} is a bound variable that is outside the
- scope of local binders. The types (and names) for loose variables
- can be managed as a separate context, that is maintained as a stack
- of hypothetical binders. The core logic operates on closed terms,
- without any loose variables.
-
- A \emph{fixed variable} is a pair of a basic name and a type, e.g.\
- @{text "(x, \<tau>)"} which is usually printed @{text "x\<^isub>\<tau>"}. A
- \emph{schematic variable} is a pair of an indexname and a type,
- e.g.\ @{text "((x, 0), \<tau>)"} which is usually printed as @{text
- "?x\<^isub>\<tau>"}.
-
- \medskip A \emph{constant} is a pair of a basic name and a type,
- e.g.\ @{text "(c, \<tau>)"} which is usually printed as @{text
- "c\<^isub>\<tau>"}. Constants are declared in the context as polymorphic
- families @{text "c :: \<sigma>"}, meaning that all substitution instances
- @{text "c\<^isub>\<tau>"} for @{text "\<tau> = \<sigma>\<vartheta>"} are valid.
-
- The vector of \emph{type arguments} of constant @{text "c\<^isub>\<tau>"}
- wrt.\ the declaration @{text "c :: \<sigma>"} is defined as the codomain of
- the matcher @{text "\<vartheta> = {?\<alpha>\<^isub>1 \<mapsto> \<tau>\<^isub>1, \<dots>,
- ?\<alpha>\<^isub>n \<mapsto> \<tau>\<^isub>n}"} presented in canonical order @{text
- "(\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>n)"}. Within a given theory context,
- there is a one-to-one correspondence between any constant @{text
- "c\<^isub>\<tau>"} and the application @{text "c(\<tau>\<^isub>1, \<dots>,
- \<tau>\<^isub>n)"} of its type arguments. For example, with @{text "plus
- :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"}, the instance @{text "plus\<^bsub>nat \<Rightarrow> nat \<Rightarrow>
- nat\<^esub>"} corresponds to @{text "plus(nat)"}.
-
- Constant declarations @{text "c :: \<sigma>"} may contain sort constraints
- for type variables in @{text "\<sigma>"}. These are observed by
- type-inference as expected, but \emph{ignored} by the core logic.
- This means the primitive logic is able to reason with instances of
- polymorphic constants that the user-level type-checker would reject
- due to violation of type class restrictions.
-
- \medskip An \emph{atomic} term is either a variable or constant. A
- \emph{term} is defined inductively over atomic terms, with
- abstraction and application as follows: @{text "t = b | x\<^isub>\<tau> |
- ?x\<^isub>\<tau> | c\<^isub>\<tau> | \<lambda>\<^isub>\<tau>. t | t\<^isub>1 t\<^isub>2"}.
- Parsing and printing takes care of converting between an external
- representation with named bound variables. Subsequently, we shall
- use the latter notation instead of internal de-Bruijn
- representation.
-
- The inductive relation @{text "t :: \<tau>"} assigns a (unique) type to a
- term according to the structure of atomic terms, abstractions, and
- applicatins:
- \[
- \infer{@{text "a\<^isub>\<tau> :: \<tau>"}}{}
- \qquad
- \infer{@{text "(\<lambda>x\<^sub>\<tau>. t) :: \<tau> \<Rightarrow> \<sigma>"}}{@{text "t :: \<sigma>"}}
- \qquad
- \infer{@{text "t u :: \<sigma>"}}{@{text "t :: \<tau> \<Rightarrow> \<sigma>"} & @{text "u :: \<tau>"}}
- \]
- A \emph{well-typed term} is a term that can be typed according to these rules.
-
- Typing information can be omitted: type-inference is able to
- reconstruct the most general type of a raw term, while assigning
- most general types to all of its variables and constants.
- Type-inference depends on a context of type constraints for fixed
- variables, and declarations for polymorphic constants.
-
- The identity of atomic terms consists both of the name and the type
- component. This means that different variables @{text
- "x\<^bsub>\<tau>\<^isub>1\<^esub>"} and @{text
- "x\<^bsub>\<tau>\<^isub>2\<^esub>"} may become the same after type
- instantiation. Some outer layers of the system make it hard to
- produce variables of the same name, but different types. In
- contrast, mixed instances of polymorphic constants occur frequently.
-
- \medskip The \emph{hidden polymorphism} of a term @{text "t :: \<sigma>"}
- is the set of type variables occurring in @{text "t"}, but not in
- @{text "\<sigma>"}. This means that the term implicitly depends on type
- arguments that are not accounted in the result type, i.e.\ there are
- different type instances @{text "t\<vartheta> :: \<sigma>"} and @{text
- "t\<vartheta>' :: \<sigma>"} with the same type. This slightly
- pathological situation notoriously demands additional care.
-
- \medskip A \emph{term abbreviation} is a syntactic definition @{text
- "c\<^isub>\<sigma> \<equiv> t"} of a closed term @{text "t"} of type @{text "\<sigma>"},
- without any hidden polymorphism. A term abbreviation looks like a
- constant in the syntax, but is expanded before entering the logical
- core. Abbreviations are usually reverted when printing terms, using
- @{text "t \<rightarrow> c\<^isub>\<sigma>"} as rules for higher-order rewriting.
-
- \medskip Canonical operations on @{text "\<lambda>"}-terms include @{text
- "\<alpha>\<beta>\<eta>"}-conversion: @{text "\<alpha>"}-conversion refers to capture-free
- renaming of bound variables; @{text "\<beta>"}-conversion contracts an
- abstraction applied to an argument term, substituting the argument
- in the body: @{text "(\<lambda>x. b)a"} becomes @{text "b[a/x]"}; @{text
- "\<eta>"}-conversion contracts vacuous application-abstraction: @{text
- "\<lambda>x. f x"} becomes @{text "f"}, provided that the bound variable
- does not occur in @{text "f"}.
-
- Terms are normally treated modulo @{text "\<alpha>"}-conversion, which is
- implicit in the de-Bruijn representation. Names for bound variables
- in abstractions are maintained separately as (meaningless) comments,
- mostly for parsing and printing. Full @{text "\<alpha>\<beta>\<eta>"}-conversion is
- commonplace in various standard operations (\secref{sec:obj-rules})
- that are based on higher-order unification and matching.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type term} \\
- @{index_ML "op aconv": "term * term -> bool"} \\
- @{index_ML map_types: "(typ -> typ) -> term -> term"} \\
- @{index_ML fold_types: "(typ -> 'a -> 'a) -> term -> 'a -> 'a"} \\
- @{index_ML map_aterms: "(term -> term) -> term -> term"} \\
- @{index_ML fold_aterms: "(term -> 'a -> 'a) -> term -> 'a -> 'a"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML fastype_of: "term -> typ"} \\
- @{index_ML lambda: "term -> term -> term"} \\
- @{index_ML betapply: "term * term -> term"} \\
- @{index_ML Sign.declare_const: "Properties.T -> (binding * typ) * mixfix ->
- theory -> term * theory"} \\
- @{index_ML Sign.add_abbrev: "string -> Properties.T -> binding * term ->
- theory -> (term * term) * theory"} \\
- @{index_ML Sign.const_typargs: "theory -> string * typ -> typ list"} \\
- @{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type term} represents de-Bruijn terms, with comments in
- abstractions, and explicitly named free variables and constants;
- this is a datatype with constructors @{ML Bound}, @{ML Free}, @{ML
- Var}, @{ML Const}, @{ML Abs}, @{ML "op $"}.
-
- \item @{text "t"}~@{ML aconv}~@{text "u"} checks @{text
- "\<alpha>"}-equivalence of two terms. This is the basic equality relation
- on type @{ML_type term}; raw datatype equality should only be used
- for operations related to parsing or printing!
-
- \item @{ML map_types}~@{text "f t"} applies the mapping @{text
- "f"} to all types occurring in @{text "t"}.
-
- \item @{ML fold_types}~@{text "f t"} iterates the operation @{text
- "f"} over all occurrences of types in @{text "t"}; the term
- structure is traversed from left to right.
-
- \item @{ML map_aterms}~@{text "f t"} applies the mapping @{text "f"}
- to all atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
- Const}) occurring in @{text "t"}.
-
- \item @{ML fold_aterms}~@{text "f t"} iterates the operation @{text
- "f"} over all occurrences of atomic terms (@{ML Bound}, @{ML Free},
- @{ML Var}, @{ML Const}) in @{text "t"}; the term structure is
- traversed from left to right.
-
- \item @{ML fastype_of}~@{text "t"} determines the type of a
- well-typed term. This operation is relatively slow, despite the
- omission of any sanity checks.
-
- \item @{ML lambda}~@{text "a b"} produces an abstraction @{text
- "\<lambda>a. b"}, where occurrences of the atomic term @{text "a"} in the
- body @{text "b"} are replaced by bound variables.
-
- \item @{ML betapply}~@{text "(t, u)"} produces an application @{text
- "t u"}, with topmost @{text "\<beta>"}-conversion if @{text "t"} is an
- abstraction.
-
- \item @{ML Sign.declare_const}~@{text "properties ((c, \<sigma>), mx)"}
- declares a new constant @{text "c :: \<sigma>"} with optional mixfix
- syntax.
-
- \item @{ML Sign.add_abbrev}~@{text "print_mode properties (c, t)"}
- introduces a new term abbreviation @{text "c \<equiv> t"}.
-
- \item @{ML Sign.const_typargs}~@{text "thy (c, \<tau>)"} and @{ML
- Sign.const_instance}~@{text "thy (c, [\<tau>\<^isub>1, \<dots>, \<tau>\<^isub>n])"}
- convert between two representations of polymorphic constants: full
- type instance vs.\ compact type arguments form.
-
- \end{description}
-*}
-
-
-section {* Theorems \label{sec:thms} *}
-
-text {*
- \glossary{Proposition}{FIXME A \seeglossary{term} of
- \seeglossary{type} @{text "prop"}. Internally, there is nothing
- special about propositions apart from their type, but the concrete
- syntax enforces a clear distinction. Propositions are structured
- via implication @{text "A \<Longrightarrow> B"} or universal quantification @{text
- "\<And>x. B x"} --- anything else is considered atomic. The canonical
- form for propositions is that of a \seeglossary{Hereditary Harrop
- Formula}. FIXME}
-
- \glossary{Theorem}{A proven proposition within a certain theory and
- proof context, formally @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<phi>"}; both contexts are
- rarely spelled out explicitly. Theorems are usually normalized
- according to the \seeglossary{HHF} format. FIXME}
-
- \glossary{Fact}{Sometimes used interchangeably for
- \seeglossary{theorem}. Strictly speaking, a list of theorems,
- essentially an extra-logical conjunction. Facts emerge either as
- local assumptions, or as results of local goal statements --- both
- may be simultaneous, hence the list representation. FIXME}
-
- \glossary{Schematic variable}{FIXME}
-
- \glossary{Fixed variable}{A variable that is bound within a certain
- proof context; an arbitrary-but-fixed entity within a portion of
- proof text. FIXME}
-
- \glossary{Free variable}{Synonymous for \seeglossary{fixed
- variable}. FIXME}
-
- \glossary{Bound variable}{FIXME}
-
- \glossary{Variable}{See \seeglossary{schematic variable},
- \seeglossary{fixed variable}, \seeglossary{bound variable}, or
- \seeglossary{type variable}. The distinguishing feature of
- different variables is their binding scope. FIXME}
-
- A \emph{proposition} is a well-typed term of type @{text "prop"}, a
- \emph{theorem} is a proven proposition (depending on a context of
- hypotheses and the background theory). Primitive inferences include
- plain natural deduction rules for the primary connectives @{text
- "\<And>"} and @{text "\<Longrightarrow>"} of the framework. There is also a builtin
- notion of equality/equivalence @{text "\<equiv>"}.
-*}
-
-subsection {* Primitive connectives and rules \label{sec:prim-rules} *}
-
-text {*
- The theory @{text "Pure"} contains constant declarations for the
- primitive connectives @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"} of
- the logical framework, see \figref{fig:pure-connectives}. The
- derivability judgment @{text "A\<^isub>1, \<dots>, A\<^isub>n \<turnstile> B"} is
- defined inductively by the primitive inferences given in
- \figref{fig:prim-rules}, with the global restriction that the
- hypotheses must \emph{not} contain any schematic variables. The
- builtin equality is conceptually axiomatized as shown in
- \figref{fig:pure-equality}, although the implementation works
- directly with derived inferences.
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{ll}
- @{text "all :: (\<alpha> \<Rightarrow> prop) \<Rightarrow> prop"} & universal quantification (binder @{text "\<And>"}) \\
- @{text "\<Longrightarrow> :: prop \<Rightarrow> prop \<Rightarrow> prop"} & implication (right associative infix) \\
- @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> prop"} & equality relation (infix) \\
- \end{tabular}
- \caption{Primitive connectives of Pure}\label{fig:pure-connectives}
- \end{center}
- \end{figure}
-
- \begin{figure}[htb]
- \begin{center}
- \[
- \infer[@{text "(axiom)"}]{@{text "\<turnstile> A"}}{@{text "A \<in> \<Theta>"}}
- \qquad
- \infer[@{text "(assume)"}]{@{text "A \<turnstile> A"}}{}
- \]
- \[
- \infer[@{text "(\<And>_intro)"}]{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}{@{text "\<Gamma> \<turnstile> b[x]"} & @{text "x \<notin> \<Gamma>"}}
- \qquad
- \infer[@{text "(\<And>_elim)"}]{@{text "\<Gamma> \<turnstile> b[a]"}}{@{text "\<Gamma> \<turnstile> \<And>x. b[x]"}}
- \]
- \[
- \infer[@{text "(\<Longrightarrow>_intro)"}]{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
- \qquad
- \infer[@{text "(\<Longrightarrow>_elim)"}]{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
- \]
- \caption{Primitive inferences of Pure}\label{fig:prim-rules}
- \end{center}
- \end{figure}
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{ll}
- @{text "\<turnstile> (\<lambda>x. b[x]) a \<equiv> b[a]"} & @{text "\<beta>"}-conversion \\
- @{text "\<turnstile> x \<equiv> x"} & reflexivity \\
- @{text "\<turnstile> x \<equiv> y \<Longrightarrow> P x \<Longrightarrow> P y"} & substitution \\
- @{text "\<turnstile> (\<And>x. f x \<equiv> g x) \<Longrightarrow> f \<equiv> g"} & extensionality \\
- @{text "\<turnstile> (A \<Longrightarrow> B) \<Longrightarrow> (B \<Longrightarrow> A) \<Longrightarrow> A \<equiv> B"} & logical equivalence \\
- \end{tabular}
- \caption{Conceptual axiomatization of Pure equality}\label{fig:pure-equality}
- \end{center}
- \end{figure}
-
- The introduction and elimination rules for @{text "\<And>"} and @{text
- "\<Longrightarrow>"} are analogous to formation of dependently typed @{text
- "\<lambda>"}-terms representing the underlying proof objects. Proof terms
- are irrelevant in the Pure logic, though; they cannot occur within
- propositions. The system provides a runtime option to record
- explicit proof terms for primitive inferences. Thus all three
- levels of @{text "\<lambda>"}-calculus become explicit: @{text "\<Rightarrow>"} for
- terms, and @{text "\<And>/\<Longrightarrow>"} for proofs (cf.\
- \cite{Berghofer-Nipkow:2000:TPHOL}).
-
- Observe that locally fixed parameters (as in @{text "\<And>_intro"}) need
- not be recorded in the hypotheses, because the simple syntactic
- types of Pure are always inhabitable. ``Assumptions'' @{text "x ::
- \<tau>"} for type-membership are only present as long as some @{text
- "x\<^isub>\<tau>"} occurs in the statement body.\footnote{This is the key
- difference to ``@{text "\<lambda>HOL"}'' in the PTS framework
- \cite{Barendregt-Geuvers:2001}, where hypotheses @{text "x : A"} are
- treated uniformly for propositions and types.}
-
- \medskip The axiomatization of a theory is implicitly closed by
- forming all instances of type and term variables: @{text "\<turnstile>
- A\<vartheta>"} holds for any substitution instance of an axiom
- @{text "\<turnstile> A"}. By pushing substitutions through derivations
- inductively, we also get admissible @{text "generalize"} and @{text
- "instance"} rules as shown in \figref{fig:subst-rules}.
-
- \begin{figure}[htb]
- \begin{center}
- \[
- \infer{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}{@{text "\<Gamma> \<turnstile> B[\<alpha>]"} & @{text "\<alpha> \<notin> \<Gamma>"}}
- \quad
- \infer[\quad@{text "(generalize)"}]{@{text "\<Gamma> \<turnstile> B[?x]"}}{@{text "\<Gamma> \<turnstile> B[x]"} & @{text "x \<notin> \<Gamma>"}}
- \]
- \[
- \infer{@{text "\<Gamma> \<turnstile> B[\<tau>]"}}{@{text "\<Gamma> \<turnstile> B[?\<alpha>]"}}
- \quad
- \infer[\quad@{text "(instantiate)"}]{@{text "\<Gamma> \<turnstile> B[t]"}}{@{text "\<Gamma> \<turnstile> B[?x]"}}
- \]
- \caption{Admissible substitution rules}\label{fig:subst-rules}
- \end{center}
- \end{figure}
-
- Note that @{text "instantiate"} does not require an explicit
- side-condition, because @{text "\<Gamma>"} may never contain schematic
- variables.
-
- In principle, variables could be substituted in hypotheses as well,
- but this would disrupt the monotonicity of reasoning: deriving
- @{text "\<Gamma>\<vartheta> \<turnstile> B\<vartheta>"} from @{text "\<Gamma> \<turnstile> B"} is
- correct, but @{text "\<Gamma>\<vartheta> \<supseteq> \<Gamma>"} does not necessarily hold:
- the result belongs to a different proof context.
-
- \medskip An \emph{oracle} is a function that produces axioms on the
- fly. Logically, this is an instance of the @{text "axiom"} rule
- (\figref{fig:prim-rules}), but there is an operational difference.
- The system always records oracle invocations within derivations of
- theorems. Tracing plain axioms (and named theorems) is optional.
-
- Axiomatizations should be limited to the bare minimum, typically as
- part of the initial logical basis of an object-logic formalization.
- Later on, theories are usually developed in a strictly definitional
- fashion, by stating only certain equalities over new constants.
-
- A \emph{simple definition} consists of a constant declaration @{text
- "c :: \<sigma>"} together with an axiom @{text "\<turnstile> c \<equiv> t"}, where @{text "t
- :: \<sigma>"} is a closed term without any hidden polymorphism. The RHS
- may depend on further defined constants, but not @{text "c"} itself.
- Definitions of functions may be presented as @{text "c \<^vec>x \<equiv>
- t"} instead of the puristic @{text "c \<equiv> \<lambda>\<^vec>x. t"}.
-
- An \emph{overloaded definition} consists of a collection of axioms
- for the same constant, with zero or one equations @{text
- "c((\<^vec>\<alpha>)\<kappa>) \<equiv> t"} for each type constructor @{text "\<kappa>"} (for
- distinct variables @{text "\<^vec>\<alpha>"}). The RHS may mention
- previously defined constants as above, or arbitrary constants @{text
- "d(\<alpha>\<^isub>i)"} for some @{text "\<alpha>\<^isub>i"} projected from @{text
- "\<^vec>\<alpha>"}. Thus overloaded definitions essentially work by
- primitive recursion over the syntactic structure of a single type
- argument.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type ctyp} \\
- @{index_ML_type cterm} \\
- @{index_ML Thm.ctyp_of: "theory -> typ -> ctyp"} \\
- @{index_ML Thm.cterm_of: "theory -> term -> cterm"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML_type thm} \\
- @{index_ML proofs: "int ref"} \\
- @{index_ML Thm.assume: "cterm -> thm"} \\
- @{index_ML Thm.forall_intr: "cterm -> thm -> thm"} \\
- @{index_ML Thm.forall_elim: "cterm -> thm -> thm"} \\
- @{index_ML Thm.implies_intr: "cterm -> thm -> thm"} \\
- @{index_ML Thm.implies_elim: "thm -> thm -> thm"} \\
- @{index_ML Thm.generalize: "string list * string list -> int -> thm -> thm"} \\
- @{index_ML Thm.instantiate: "(ctyp * ctyp) list * (cterm * cterm) list -> thm -> thm"} \\
- @{index_ML Thm.axiom: "theory -> string -> thm"} \\
- @{index_ML Thm.add_oracle: "bstring * ('a -> cterm) -> theory
- -> (string * ('a -> thm)) * theory"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML Theory.add_axioms_i: "(binding * term) list -> theory -> theory"} \\
- @{index_ML Theory.add_deps: "string -> string * typ -> (string * typ) list -> theory -> theory"} \\
- @{index_ML Theory.add_defs_i: "bool -> bool -> (binding * term) list -> theory -> theory"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type ctyp} and @{ML_type cterm} represent certified types
- and terms, respectively. These are abstract datatypes that
- guarantee that its values have passed the full well-formedness (and
- well-typedness) checks, relative to the declarations of type
- constructors, constants etc. in the theory.
-
- \item @{ML ctyp_of}~@{text "thy \<tau>"} and @{ML cterm_of}~@{text "thy
- t"} explicitly checks types and terms, respectively. This also
- involves some basic normalizations, such expansion of type and term
- abbreviations from the theory context.
-
- Re-certification is relatively slow and should be avoided in tight
- reasoning loops. There are separate operations to decompose
- certified entities (including actual theorems).
-
- \item @{ML_type thm} represents proven propositions. This is an
- abstract datatype that guarantees that its values have been
- constructed by basic principles of the @{ML_struct Thm} module.
- Every @{ML thm} value contains a sliding back-reference to the
- enclosing theory, cf.\ \secref{sec:context-theory}.
-
- \item @{ML proofs} determines the detail of proof recording within
- @{ML_type thm} values: @{ML 0} records only oracles, @{ML 1} records
- oracles, axioms and named theorems, @{ML 2} records full proof
- terms.
-
- \item @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML
- Thm.forall_elim}, @{ML Thm.implies_intr}, and @{ML Thm.implies_elim}
- correspond to the primitive inferences of \figref{fig:prim-rules}.
-
- \item @{ML Thm.generalize}~@{text "(\<^vec>\<alpha>, \<^vec>x)"}
- corresponds to the @{text "generalize"} rules of
- \figref{fig:subst-rules}. Here collections of type and term
- variables are generalized simultaneously, specified by the given
- basic names.
-
- \item @{ML Thm.instantiate}~@{text "(\<^vec>\<alpha>\<^isub>s,
- \<^vec>x\<^isub>\<tau>)"} corresponds to the @{text "instantiate"} rules
- of \figref{fig:subst-rules}. Type variables are substituted before
- term variables. Note that the types in @{text "\<^vec>x\<^isub>\<tau>"}
- refer to the instantiated versions.
-
- \item @{ML Thm.axiom}~@{text "thy name"} retrieves a named
- axiom, cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
-
- \item @{ML Thm.add_oracle}~@{text "(name, oracle)"} produces a named
- oracle rule, essentially generating arbitrary axioms on the fly,
- cf.\ @{text "axiom"} in \figref{fig:prim-rules}.
-
- \item @{ML Theory.add_axioms_i}~@{text "[(name, A), \<dots>]"} declares
- arbitrary propositions as axioms.
-
- \item @{ML Theory.add_deps}~@{text "name c\<^isub>\<tau>
- \<^vec>d\<^isub>\<sigma>"} declares dependencies of a named specification
- for constant @{text "c\<^isub>\<tau>"}, relative to existing
- specifications for constants @{text "\<^vec>d\<^isub>\<sigma>"}.
-
- \item @{ML Theory.add_defs_i}~@{text "unchecked overloaded [(name, c
- \<^vec>x \<equiv> t), \<dots>]"} states a definitional axiom for an existing
- constant @{text "c"}. Dependencies are recorded (cf.\ @{ML
- Theory.add_deps}), unless the @{text "unchecked"} option is set.
-
- \end{description}
-*}
-
-
-subsection {* Auxiliary definitions *}
-
-text {*
- Theory @{text "Pure"} provides a few auxiliary definitions, see
- \figref{fig:pure-aux}. These special constants are normally not
- exposed to the user, but appear in internal encodings.
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{ll}
- @{text "conjunction :: prop \<Rightarrow> prop \<Rightarrow> prop"} & (infix @{text "&"}) \\
- @{text "\<turnstile> A & B \<equiv> (\<And>C. (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C)"} \\[1ex]
- @{text "prop :: prop \<Rightarrow> prop"} & (prefix @{text "#"}, suppressed) \\
- @{text "#A \<equiv> A"} \\[1ex]
- @{text "term :: \<alpha> \<Rightarrow> prop"} & (prefix @{text "TERM"}) \\
- @{text "term x \<equiv> (\<And>A. A \<Longrightarrow> A)"} \\[1ex]
- @{text "TYPE :: \<alpha> itself"} & (prefix @{text "TYPE"}) \\
- @{text "(unspecified)"} \\
- \end{tabular}
- \caption{Definitions of auxiliary connectives}\label{fig:pure-aux}
- \end{center}
- \end{figure}
-
- Derived conjunction rules include introduction @{text "A \<Longrightarrow> B \<Longrightarrow> A &
- B"}, and destructions @{text "A & B \<Longrightarrow> A"} and @{text "A & B \<Longrightarrow> B"}.
- Conjunction allows to treat simultaneous assumptions and conclusions
- uniformly. For example, multiple claims are intermediately
- represented as explicit conjunction, but this is refined into
- separate sub-goals before the user continues the proof; the final
- result is projected into a list of theorems (cf.\
- \secref{sec:tactical-goals}).
-
- The @{text "prop"} marker (@{text "#"}) makes arbitrarily complex
- propositions appear as atomic, without changing the meaning: @{text
- "\<Gamma> \<turnstile> A"} and @{text "\<Gamma> \<turnstile> #A"} are interchangeable. See
- \secref{sec:tactical-goals} for specific operations.
-
- The @{text "term"} marker turns any well-typed term into a derivable
- proposition: @{text "\<turnstile> TERM t"} holds unconditionally. Although
- this is logically vacuous, it allows to treat terms and proofs
- uniformly, similar to a type-theoretic framework.
-
- The @{text "TYPE"} constructor is the canonical representative of
- the unspecified type @{text "\<alpha> itself"}; it essentially injects the
- language of types into that of terms. There is specific notation
- @{text "TYPE(\<tau>)"} for @{text "TYPE\<^bsub>\<tau>
- itself\<^esub>"}.
- Although being devoid of any particular meaning, the @{text
- "TYPE(\<tau>)"} accounts for the type @{text "\<tau>"} within the term
- language. In particular, @{text "TYPE(\<alpha>)"} may be used as formal
- argument in primitive definitions, in order to circumvent hidden
- polymorphism (cf.\ \secref{sec:terms}). For example, @{text "c
- TYPE(\<alpha>) \<equiv> A[\<alpha>]"} defines @{text "c :: \<alpha> itself \<Rightarrow> prop"} in terms of
- a proposition @{text "A"} that depends on an additional type
- argument, which is essentially a predicate on types.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML Conjunction.intr: "thm -> thm -> thm"} \\
- @{index_ML Conjunction.elim: "thm -> thm * thm"} \\
- @{index_ML Drule.mk_term: "cterm -> thm"} \\
- @{index_ML Drule.dest_term: "thm -> cterm"} \\
- @{index_ML Logic.mk_type: "typ -> term"} \\
- @{index_ML Logic.dest_type: "term -> typ"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML Conjunction.intr} derives @{text "A & B"} from @{text
- "A"} and @{text "B"}.
-
- \item @{ML Conjunction.elim} derives @{text "A"} and @{text "B"}
- from @{text "A & B"}.
-
- \item @{ML Drule.mk_term} derives @{text "TERM t"}.
-
- \item @{ML Drule.dest_term} recovers term @{text "t"} from @{text
- "TERM t"}.
-
- \item @{ML Logic.mk_type}~@{text "\<tau>"} produces the term @{text
- "TYPE(\<tau>)"}.
-
- \item @{ML Logic.dest_type}~@{text "TYPE(\<tau>)"} recovers the type
- @{text "\<tau>"}.
-
- \end{description}
-*}
-
-
-section {* Object-level rules \label{sec:obj-rules} *}
-
-text %FIXME {*
-
-FIXME
-
- A \emph{rule} is any Pure theorem in HHF normal form; there is a
- separate calculus for rule composition, which is modeled after
- Gentzen's Natural Deduction \cite{Gentzen:1935}, but allows
- rules to be nested arbitrarily, similar to \cite{extensions91}.
-
- Normally, all theorems accessible to the user are proper rules.
- Low-level inferences are occasional required internally, but the
- result should be always presented in canonical form. The higher
- interfaces of Isabelle/Isar will always produce proper rules. It is
- important to maintain this invariant in add-on applications!
-
- There are two main principles of rule composition: @{text
- "resolution"} (i.e.\ backchaining of rules) and @{text
- "by-assumption"} (i.e.\ closing a branch); both principles are
- combined in the variants of @{text "elim-resolution"} and @{text
- "dest-resolution"}. Raw @{text "composition"} is occasionally
- useful as well, also it is strictly speaking outside of the proper
- rule calculus.
-
- Rules are treated modulo general higher-order unification, which is
- unification modulo the equational theory of @{text "\<alpha>\<beta>\<eta>"}-conversion
- on @{text "\<lambda>"}-terms. Moreover, propositions are understood modulo
- the (derived) equivalence @{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}.
-
- This means that any operations within the rule calculus may be
- subject to spontaneous @{text "\<alpha>\<beta>\<eta>"}-HHF conversions. It is common
- practice not to contract or expand unnecessarily. Some mechanisms
- prefer an one form, others the opposite, so there is a potential
- danger to produce some oscillation!
-
- Only few operations really work \emph{modulo} HHF conversion, but
- expect a normal form: quantifiers @{text "\<And>"} before implications
- @{text "\<Longrightarrow>"} at each level of nesting.
-
-\glossary{Hereditary Harrop Formula}{The set of propositions in HHF
-format is defined inductively as @{text "H = (\<And>x\<^sup>*. H\<^sup>* \<Longrightarrow>
-A)"}, for variables @{text "x"} and atomic propositions @{text "A"}.
-Any proposition may be put into HHF form by normalizing with the rule
-@{text "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}. In Isabelle, the outermost
-quantifier prefix is represented via \seeglossary{schematic
-variables}, such that the top-level structure is merely that of a
-\seeglossary{Horn Clause}}.
-
-\glossary{HHF}{See \seeglossary{Hereditary Harrop Formula}.}
-
-
- \[
- \infer[@{text "(assumption)"}]{@{text "C\<vartheta>"}}
- {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text i})}}
- \]
-
-
- \[
- \infer[@{text "(compose)"}]{@{text "\<^vec>A\<vartheta> \<Longrightarrow> C\<vartheta>"}}
- {@{text "\<^vec>A \<Longrightarrow> B"} & @{text "B' \<Longrightarrow> C"} & @{text "B\<vartheta> = B'\<vartheta>"}}
- \]
-
-
- \[
- \infer[@{text "(\<And>_lift)"}]{@{text "(\<And>\<^vec>x. \<^vec>A (?\<^vec>a \<^vec>x)) \<Longrightarrow> (\<And>\<^vec>x. B (?\<^vec>a \<^vec>x))"}}{@{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"}}
- \]
- \[
- \infer[@{text "(\<Longrightarrow>_lift)"}]{@{text "(\<^vec>H \<Longrightarrow> \<^vec>A) \<Longrightarrow> (\<^vec>H \<Longrightarrow> B)"}}{@{text "\<^vec>A \<Longrightarrow> B"}}
- \]
-
- The @{text resolve} scheme is now acquired from @{text "\<And>_lift"},
- @{text "\<Longrightarrow>_lift"}, and @{text compose}.
-
- \[
- \infer[@{text "(resolution)"}]
- {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (?\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
- {\begin{tabular}{l}
- @{text "\<^vec>A ?\<^vec>a \<Longrightarrow> B ?\<^vec>a"} \\
- @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
- @{text "(\<lambda>\<^vec>x. B (?\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
- \end{tabular}}
- \]
-
-
- FIXME @{text "elim_resolution"}, @{text "dest_resolution"}
-*}
-
-
-end
--- a/doc-src/IsarImplementation/Thy/prelim.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,779 +0,0 @@
-
-(* $Id$ *)
-
-theory prelim imports base begin
-
-chapter {* Preliminaries *}
-
-section {* Contexts \label{sec:context} *}
-
-text {*
- A logical context represents the background that is required for
- formulating statements and composing proofs. It acts as a medium to
- produce formal content, depending on earlier material (declarations,
- results etc.).
-
- For example, derivations within the Isabelle/Pure logic can be
- described as a judgment @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<phi>"}, which means that a
- proposition @{text "\<phi>"} is derivable from hypotheses @{text "\<Gamma>"}
- within the theory @{text "\<Theta>"}. There are logical reasons for
- keeping @{text "\<Theta>"} and @{text "\<Gamma>"} separate: theories can be
- liberal about supporting type constructors and schematic
- polymorphism of constants and axioms, while the inner calculus of
- @{text "\<Gamma> \<turnstile> \<phi>"} is strictly limited to Simple Type Theory (with
- fixed type variables in the assumptions).
-
- \medskip Contexts and derivations are linked by the following key
- principles:
-
- \begin{itemize}
-
- \item Transfer: monotonicity of derivations admits results to be
- transferred into a \emph{larger} context, i.e.\ @{text "\<Gamma> \<turnstile>\<^sub>\<Theta>
- \<phi>"} implies @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta>\<^sub>' \<phi>"} for contexts @{text "\<Theta>'
- \<supseteq> \<Theta>"} and @{text "\<Gamma>' \<supseteq> \<Gamma>"}.
-
- \item Export: discharge of hypotheses admits results to be exported
- into a \emph{smaller} context, i.e.\ @{text "\<Gamma>' \<turnstile>\<^sub>\<Theta> \<phi>"}
- implies @{text "\<Gamma> \<turnstile>\<^sub>\<Theta> \<Delta> \<Longrightarrow> \<phi>"} where @{text "\<Gamma>' \<supseteq> \<Gamma>"} and
- @{text "\<Delta> = \<Gamma>' - \<Gamma>"}. Note that @{text "\<Theta>"} remains unchanged here,
- only the @{text "\<Gamma>"} part is affected.
-
- \end{itemize}
-
- \medskip By modeling the main characteristics of the primitive
- @{text "\<Theta>"} and @{text "\<Gamma>"} above, and abstracting over any
- particular logical content, we arrive at the fundamental notions of
- \emph{theory context} and \emph{proof context} in Isabelle/Isar.
- These implement a certain policy to manage arbitrary \emph{context
- data}. There is a strongly-typed mechanism to declare new kinds of
- data at compile time.
-
- The internal bootstrap process of Isabelle/Pure eventually reaches a
- stage where certain data slots provide the logical content of @{text
- "\<Theta>"} and @{text "\<Gamma>"} sketched above, but this does not stop there!
- Various additional data slots support all kinds of mechanisms that
- are not necessarily part of the core logic.
-
- For example, there would be data for canonical introduction and
- elimination rules for arbitrary operators (depending on the
- object-logic and application), which enables users to perform
- standard proof steps implicitly (cf.\ the @{text "rule"} method
- \cite{isabelle-isar-ref}).
-
- \medskip Thus Isabelle/Isar is able to bring forth more and more
- concepts successively. In particular, an object-logic like
- Isabelle/HOL continues the Isabelle/Pure setup by adding specific
- components for automated reasoning (classical reasoner, tableau
- prover, structured induction etc.) and derived specification
- mechanisms (inductive predicates, recursive functions etc.). All of
- this is ultimately based on the generic data management by theory
- and proof contexts introduced here.
-*}
-
-
-subsection {* Theory context \label{sec:context-theory} *}
-
-text {*
- \glossary{Theory}{FIXME}
-
- A \emph{theory} is a data container with explicit named and unique
- identifier. Theories are related by a (nominal) sub-theory
- relation, which corresponds to the dependency graph of the original
- construction; each theory is derived from a certain sub-graph of
- ancestor theories.
-
- The @{text "merge"} operation produces the least upper bound of two
- theories, which actually degenerates into absorption of one theory
- into the other (due to the nominal sub-theory relation).
-
- The @{text "begin"} operation starts a new theory by importing
- several parent theories and entering a special @{text "draft"} mode,
- which is sustained until the final @{text "end"} operation. A draft
- theory acts like a linear type, where updates invalidate earlier
- versions. An invalidated draft is called ``stale''.
-
- The @{text "checkpoint"} operation produces an intermediate stepping
- stone that will survive the next update: both the original and the
- changed theory remain valid and are related by the sub-theory
- relation. Checkpointing essentially recovers purely functional
- theory values, at the expense of some extra internal bookkeeping.
-
- The @{text "copy"} operation produces an auxiliary version that has
- the same data content, but is unrelated to the original: updates of
- the copy do not affect the original, neither does the sub-theory
- relation hold.
-
- \medskip The example in \figref{fig:ex-theory} below shows a theory
- graph derived from @{text "Pure"}, with theory @{text "Length"}
- importing @{text "Nat"} and @{text "List"}. The body of @{text
- "Length"} consists of a sequence of updates, working mostly on
- drafts. Intermediate checkpoints may occur as well, due to the
- history mechanism provided by the Isar top-level, cf.\
- \secref{sec:isar-toplevel}.
-
- \begin{figure}[htb]
- \begin{center}
- \begin{tabular}{rcccl}
- & & @{text "Pure"} \\
- & & @{text "\<down>"} \\
- & & @{text "FOL"} \\
- & $\swarrow$ & & $\searrow$ & \\
- @{text "Nat"} & & & & @{text "List"} \\
- & $\searrow$ & & $\swarrow$ \\
- & & @{text "Length"} \\
- & & \multicolumn{3}{l}{~~@{keyword "imports"}} \\
- & & \multicolumn{3}{l}{~~@{keyword "begin"}} \\
- & & $\vdots$~~ \\
- & & @{text "\<bullet>"}~~ \\
- & & $\vdots$~~ \\
- & & @{text "\<bullet>"}~~ \\
- & & $\vdots$~~ \\
- & & \multicolumn{3}{l}{~~@{command "end"}} \\
- \end{tabular}
- \caption{A theory definition depending on ancestors}\label{fig:ex-theory}
- \end{center}
- \end{figure}
-
- \medskip There is a separate notion of \emph{theory reference} for
- maintaining a live link to an evolving theory context: updates on
- drafts are propagated automatically. Dynamic updating stops after
- an explicit @{text "end"} only.
-
- Derived entities may store a theory reference in order to indicate
- the context they belong to. This implicitly assumes monotonic
- reasoning, because the referenced context may become larger without
- further notice.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type theory} \\
- @{index_ML Theory.subthy: "theory * theory -> bool"} \\
- @{index_ML Theory.merge: "theory * theory -> theory"} \\
- @{index_ML Theory.checkpoint: "theory -> theory"} \\
- @{index_ML Theory.copy: "theory -> theory"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML_type theory_ref} \\
- @{index_ML Theory.deref: "theory_ref -> theory"} \\
- @{index_ML Theory.check_thy: "theory -> theory_ref"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type theory} represents theory contexts. This is
- essentially a linear type! Most operations destroy the original
- version, which then becomes ``stale''.
-
- \item @{ML "Theory.subthy"}~@{text "(thy\<^sub>1, thy\<^sub>2)"}
- compares theories according to the inherent graph structure of the
- construction. This sub-theory relation is a nominal approximation
- of inclusion (@{text "\<subseteq>"}) of the corresponding content.
-
- \item @{ML "Theory.merge"}~@{text "(thy\<^sub>1, thy\<^sub>2)"}
- absorbs one theory into the other. This fails for unrelated
- theories!
-
- \item @{ML "Theory.checkpoint"}~@{text "thy"} produces a safe
- stepping stone in the linear development of @{text "thy"}. The next
- update will result in two related, valid theories.
-
- \item @{ML "Theory.copy"}~@{text "thy"} produces a variant of @{text
- "thy"} that holds a copy of the same data. The result is not
- related to the original; the original is unchanched.
-
- \item @{ML_type theory_ref} represents a sliding reference to an
- always valid theory; updates on the original are propagated
- automatically.
-
- \item @{ML "Theory.deref"}~@{text "thy_ref"} turns a @{ML_type
- "theory_ref"} into an @{ML_type "theory"} value. As the referenced
- theory evolves monotonically over time, later invocations of @{ML
- "Theory.deref"} may refer to a larger context.
-
- \item @{ML "Theory.check_thy"}~@{text "thy"} produces a @{ML_type
- "theory_ref"} from a valid @{ML_type "theory"} value.
-
- \end{description}
-*}
-
-
-subsection {* Proof context \label{sec:context-proof} *}
-
-text {*
- \glossary{Proof context}{The static context of a structured proof,
- acts like a local ``theory'' of the current portion of Isar proof
- text, generalizes the idea of local hypotheses @{text "\<Gamma>"} in
- judgments @{text "\<Gamma> \<turnstile> \<phi>"} of natural deduction calculi. There is a
- generic notion of introducing and discharging hypotheses.
- Arbritrary auxiliary context data may be adjoined.}
-
- A proof context is a container for pure data with a back-reference
- to the theory it belongs to. The @{text "init"} operation creates a
- proof context from a given theory. Modifications to draft theories
- are propagated to the proof context as usual, but there is also an
- explicit @{text "transfer"} operation to force resynchronization
- with more substantial updates to the underlying theory. The actual
- context data does not require any special bookkeeping, thanks to the
- lack of destructive features.
-
- Entities derived in a proof context need to record inherent logical
- requirements explicitly, since there is no separate context
- identification as for theories. For example, hypotheses used in
- primitive derivations (cf.\ \secref{sec:thms}) are recorded
- separately within the sequent @{text "\<Gamma> \<turnstile> \<phi>"}, just to make double
- sure. Results could still leak into an alien proof context do to
- programming errors, but Isabelle/Isar includes some extra validity
- checks in critical positions, notably at the end of a sub-proof.
-
- Proof contexts may be manipulated arbitrarily, although the common
- discipline is to follow block structure as a mental model: a given
- context is extended consecutively, and results are exported back
- into the original context. Note that the Isar proof states model
- block-structured reasoning explicitly, using a stack of proof
- contexts internally, cf.\ \secref{sec:isar-proof-state}.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type Proof.context} \\
- @{index_ML ProofContext.init: "theory -> Proof.context"} \\
- @{index_ML ProofContext.theory_of: "Proof.context -> theory"} \\
- @{index_ML ProofContext.transfer: "theory -> Proof.context -> Proof.context"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type Proof.context} represents proof contexts. Elements
- of this type are essentially pure values, with a sliding reference
- to the background theory.
-
- \item @{ML ProofContext.init}~@{text "thy"} produces a proof context
- derived from @{text "thy"}, initializing all data.
-
- \item @{ML ProofContext.theory_of}~@{text "ctxt"} selects the
- background theory from @{text "ctxt"}, dereferencing its internal
- @{ML_type theory_ref}.
-
- \item @{ML ProofContext.transfer}~@{text "thy ctxt"} promotes the
- background theory of @{text "ctxt"} to the super theory @{text
- "thy"}.
-
- \end{description}
-*}
-
-
-subsection {* Generic contexts \label{sec:generic-context} *}
-
-text {*
- A generic context is the disjoint sum of either a theory or proof
- context. Occasionally, this enables uniform treatment of generic
- context data, typically extra-logical information. Operations on
- generic contexts include the usual injections, partial selections,
- and combinators for lifting operations on either component of the
- disjoint sum.
-
- Moreover, there are total operations @{text "theory_of"} and @{text
- "proof_of"} to convert a generic context into either kind: a theory
- can always be selected from the sum, while a proof context might
- have to be constructed by an ad-hoc @{text "init"} operation.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type Context.generic} \\
- @{index_ML Context.theory_of: "Context.generic -> theory"} \\
- @{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type Context.generic} is the direct sum of @{ML_type
- "theory"} and @{ML_type "Proof.context"}, with the datatype
- constructors @{ML "Context.Theory"} and @{ML "Context.Proof"}.
-
- \item @{ML Context.theory_of}~@{text "context"} always produces a
- theory from the generic @{text "context"}, using @{ML
- "ProofContext.theory_of"} as required.
-
- \item @{ML Context.proof_of}~@{text "context"} always produces a
- proof context from the generic @{text "context"}, using @{ML
- "ProofContext.init"} as required (note that this re-initializes the
- context data with each invocation).
-
- \end{description}
-*}
-
-
-subsection {* Context data \label{sec:context-data} *}
-
-text {*
- The main purpose of theory and proof contexts is to manage arbitrary
- data. New data types can be declared incrementally at compile time.
- There are separate declaration mechanisms for any of the three kinds
- of contexts: theory, proof, generic.
-
- \paragraph{Theory data} may refer to destructive entities, which are
- maintained in direct correspondence to the linear evolution of
- theory values, including explicit copies.\footnote{Most existing
- instances of destructive theory data are merely historical relics
- (e.g.\ the destructive theorem storage, and destructive hints for
- the Simplifier and Classical rules).} A theory data declaration
- needs to implement the following SML signature:
-
- \medskip
- \begin{tabular}{ll}
- @{text "\<type> T"} & representing type \\
- @{text "\<val> empty: T"} & empty default value \\
- @{text "\<val> copy: T \<rightarrow> T"} & refresh impure data \\
- @{text "\<val> extend: T \<rightarrow> T"} & re-initialize on import \\
- @{text "\<val> merge: T \<times> T \<rightarrow> T"} & join on import \\
- \end{tabular}
- \medskip
-
- \noindent The @{text "empty"} value acts as initial default for
- \emph{any} theory that does not declare actual data content; @{text
- "copy"} maintains persistent integrity for impure data, it is just
- the identity for pure values; @{text "extend"} is acts like a
- unitary version of @{text "merge"}, both operations should also
- include the functionality of @{text "copy"} for impure data.
-
- \paragraph{Proof context data} is purely functional. A declaration
- needs to implement the following SML signature:
-
- \medskip
- \begin{tabular}{ll}
- @{text "\<type> T"} & representing type \\
- @{text "\<val> init: theory \<rightarrow> T"} & produce initial value \\
- \end{tabular}
- \medskip
-
- \noindent The @{text "init"} operation is supposed to produce a pure
- value from the given background theory.
-
- \paragraph{Generic data} provides a hybrid interface for both theory
- and proof data. The declaration is essentially the same as for
- (pure) theory data, without @{text "copy"}. The @{text "init"}
- operation for proof contexts merely selects the current data value
- from the background theory.
-
- \bigskip A data declaration of type @{text "T"} results in the
- following interface:
-
- \medskip
- \begin{tabular}{ll}
- @{text "init: theory \<rightarrow> theory"} \\
- @{text "get: context \<rightarrow> T"} \\
- @{text "put: T \<rightarrow> context \<rightarrow> context"} \\
- @{text "map: (T \<rightarrow> T) \<rightarrow> context \<rightarrow> context"} \\
- \end{tabular}
- \medskip
-
- \noindent Here @{text "init"} is only applicable to impure theory
- data to install a fresh copy persistently (destructive update on
- uninitialized has no permanent effect). The other operations provide
- access for the particular kind of context (theory, proof, or generic
- context). Note that this is a safe interface: there is no other way
- to access the corresponding data slot of a context. By keeping
- these operations private, a component may maintain abstract values
- authentically, without other components interfering.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_functor TheoryDataFun} \\
- @{index_ML_functor ProofDataFun} \\
- @{index_ML_functor GenericDataFun} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_functor TheoryDataFun}@{text "(spec)"} declares data for
- type @{ML_type theory} according to the specification provided as
- argument structure. The resulting structure provides data init and
- access operations as described above.
-
- \item @{ML_functor ProofDataFun}@{text "(spec)"} is analogous to
- @{ML_functor TheoryDataFun} for type @{ML_type Proof.context}.
-
- \item @{ML_functor GenericDataFun}@{text "(spec)"} is analogous to
- @{ML_functor TheoryDataFun} for type @{ML_type Context.generic}.
-
- \end{description}
-*}
-
-
-section {* Names \label{sec:names} *}
-
-text {*
- In principle, a name is just a string, but there are various
- convention for encoding additional structure. For example, ``@{text
- "Foo.bar.baz"}'' is considered as a qualified name consisting of
- three basic name components. The individual constituents of a name
- may have further substructure, e.g.\ the string
- ``\verb,\,\verb,<alpha>,'' encodes as a single symbol.
-*}
-
-
-subsection {* Strings of symbols *}
-
-text {*
- \glossary{Symbol}{The smallest unit of text in Isabelle, subsumes
- plain ASCII characters as well as an infinite collection of named
- symbols (for greek, math etc.).}
-
- A \emph{symbol} constitutes the smallest textual unit in Isabelle
- --- raw characters are normally not encountered at all. Isabelle
- strings consist of a sequence of symbols, represented as a packed
- string or a list of strings. Each symbol is in itself a small
- string, which has either one of the following forms:
-
- \begin{enumerate}
-
- \item a single ASCII character ``@{text "c"}'', for example
- ``\verb,a,'',
-
- \item a regular symbol ``\verb,\,\verb,<,@{text "ident"}\verb,>,'',
- for example ``\verb,\,\verb,<alpha>,'',
-
- \item a control symbol ``\verb,\,\verb,<^,@{text "ident"}\verb,>,'',
- for example ``\verb,\,\verb,<^bold>,'',
-
- \item a raw symbol ``\verb,\,\verb,<^raw:,@{text text}\verb,>,''
- where @{text text} constists of printable characters excluding
- ``\verb,.,'' and ``\verb,>,'', for example
- ``\verb,\,\verb,<^raw:$\sum_{i = 1}^n$>,'',
-
- \item a numbered raw control symbol ``\verb,\,\verb,<^raw,@{text
- n}\verb,>, where @{text n} consists of digits, for example
- ``\verb,\,\verb,<^raw42>,''.
-
- \end{enumerate}
-
- \noindent The @{text "ident"} syntax for symbol names is @{text
- "letter (letter | digit)\<^sup>*"}, where @{text "letter =
- A..Za..z"} and @{text "digit = 0..9"}. There are infinitely many
- regular symbols and control symbols, but a fixed collection of
- standard symbols is treated specifically. For example,
- ``\verb,\,\verb,<alpha>,'' is classified as a letter, which means it
- may occur within regular Isabelle identifiers.
-
- Since the character set underlying Isabelle symbols is 7-bit ASCII
- and 8-bit characters are passed through transparently, Isabelle may
- also process Unicode/UCS data in UTF-8 encoding. Unicode provides
- its own collection of mathematical symbols, but there is no built-in
- link to the standard collection of Isabelle.
-
- \medskip Output of Isabelle symbols depends on the print mode
- (\secref{FIXME}). For example, the standard {\LaTeX} setup of the
- Isabelle document preparation system would present
- ``\verb,\,\verb,<alpha>,'' as @{text "\<alpha>"}, and
- ``\verb,\,\verb,<^bold>,\verb,\,\verb,<alpha>,'' as @{text
- "\<^bold>\<alpha>"}.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type "Symbol.symbol"} \\
- @{index_ML Symbol.explode: "string -> Symbol.symbol list"} \\
- @{index_ML Symbol.is_letter: "Symbol.symbol -> bool"} \\
- @{index_ML Symbol.is_digit: "Symbol.symbol -> bool"} \\
- @{index_ML Symbol.is_quasi: "Symbol.symbol -> bool"} \\
- @{index_ML Symbol.is_blank: "Symbol.symbol -> bool"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML_type "Symbol.sym"} \\
- @{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type "Symbol.symbol"} represents individual Isabelle
- symbols; this is an alias for @{ML_type "string"}.
-
- \item @{ML "Symbol.explode"}~@{text "str"} produces a symbol list
- from the packed form. This function supercedes @{ML
- "String.explode"} for virtually all purposes of manipulating text in
- Isabelle!
-
- \item @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
- "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard
- symbols according to fixed syntactic conventions of Isabelle, cf.\
- \cite{isabelle-isar-ref}.
-
- \item @{ML_type "Symbol.sym"} is a concrete datatype that represents
- the different kinds of symbols explicitly, with constructors @{ML
- "Symbol.Char"}, @{ML "Symbol.Sym"}, @{ML "Symbol.Ctrl"}, @{ML
- "Symbol.Raw"}.
-
- \item @{ML "Symbol.decode"} converts the string representation of a
- symbol into the datatype version.
-
- \end{description}
-*}
-
-
-subsection {* Basic names \label{sec:basic-names} *}
-
-text {*
- A \emph{basic name} essentially consists of a single Isabelle
- identifier. There are conventions to mark separate classes of basic
- names, by attaching a suffix of underscores (@{text "_"}): one
- underscore means \emph{internal name}, two underscores means
- \emph{Skolem name}, three underscores means \emph{internal Skolem
- name}.
-
- For example, the basic name @{text "foo"} has the internal version
- @{text "foo_"}, with Skolem versions @{text "foo__"} and @{text
- "foo___"}, respectively.
-
- These special versions provide copies of the basic name space, apart
- from anything that normally appears in the user text. For example,
- system generated variables in Isar proof contexts are usually marked
- as internal, which prevents mysterious name references like @{text
- "xaa"} to appear in the text.
-
- \medskip Manipulating binding scopes often requires on-the-fly
- renamings. A \emph{name context} contains a collection of already
- used names. The @{text "declare"} operation adds names to the
- context.
-
- The @{text "invents"} operation derives a number of fresh names from
- a given starting point. For example, the first three names derived
- from @{text "a"} are @{text "a"}, @{text "b"}, @{text "c"}.
-
- The @{text "variants"} operation produces fresh names by
- incrementing tentative names as base-26 numbers (with digits @{text
- "a..z"}) until all clashes are resolved. For example, name @{text
- "foo"} results in variants @{text "fooa"}, @{text "foob"}, @{text
- "fooc"}, \dots, @{text "fooaa"}, @{text "fooab"} etc.; each renaming
- step picks the next unused variant from this sequence.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML Name.internal: "string -> string"} \\
- @{index_ML Name.skolem: "string -> string"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML_type Name.context} \\
- @{index_ML Name.context: Name.context} \\
- @{index_ML Name.declare: "string -> Name.context -> Name.context"} \\
- @{index_ML Name.invents: "Name.context -> string -> int -> string list"} \\
- @{index_ML Name.variants: "string list -> Name.context -> string list * Name.context"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML Name.internal}~@{text "name"} produces an internal name
- by adding one underscore.
-
- \item @{ML Name.skolem}~@{text "name"} produces a Skolem name by
- adding two underscores.
-
- \item @{ML_type Name.context} represents the context of already used
- names; the initial value is @{ML "Name.context"}.
-
- \item @{ML Name.declare}~@{text "name"} enters a used name into the
- context.
-
- \item @{ML Name.invents}~@{text "context name n"} produces @{text
- "n"} fresh names derived from @{text "name"}.
-
- \item @{ML Name.variants}~@{text "names context"} produces fresh
- varians of @{text "names"}; the result is entered into the context.
-
- \end{description}
-*}
-
-
-subsection {* Indexed names *}
-
-text {*
- An \emph{indexed name} (or @{text "indexname"}) is a pair of a basic
- name and a natural number. This representation allows efficient
- renaming by incrementing the second component only. The canonical
- way to rename two collections of indexnames apart from each other is
- this: determine the maximum index @{text "maxidx"} of the first
- collection, then increment all indexes of the second collection by
- @{text "maxidx + 1"}; the maximum index of an empty collection is
- @{text "-1"}.
-
- Occasionally, basic names and indexed names are injected into the
- same pair type: the (improper) indexname @{text "(x, -1)"} is used
- to encode basic names.
-
- \medskip Isabelle syntax observes the following rules for
- representing an indexname @{text "(x, i)"} as a packed string:
-
- \begin{itemize}
-
- \item @{text "?x"} if @{text "x"} does not end with a digit and @{text "i = 0"},
-
- \item @{text "?xi"} if @{text "x"} does not end with a digit,
-
- \item @{text "?x.i"} otherwise.
-
- \end{itemize}
-
- Indexnames may acquire large index numbers over time. Results are
- normalized towards @{text "0"} at certain checkpoints, notably at
- the end of a proof. This works by producing variants of the
- corresponding basic name components. For example, the collection
- @{text "?x1, ?x7, ?x42"} becomes @{text "?x, ?xa, ?xb"}.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type indexname} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type indexname} represents indexed names. This is an
- abbreviation for @{ML_type "string * int"}. The second component is
- usually non-negative, except for situations where @{text "(x, -1)"}
- is used to embed basic names into this type.
-
- \end{description}
-*}
-
-
-subsection {* Qualified names and name spaces *}
-
-text {*
- A \emph{qualified name} consists of a non-empty sequence of basic
- name components. The packed representation uses a dot as separator,
- as in ``@{text "A.b.c"}''. The last component is called \emph{base}
- name, the remaining prefix \emph{qualifier} (which may be empty).
- The idea of qualified names is to encode nested structures by
- recording the access paths as qualifiers. For example, an item
- named ``@{text "A.b.c"}'' may be understood as a local entity @{text
- "c"}, within a local structure @{text "b"}, within a global
- structure @{text "A"}. Typically, name space hierarchies consist of
- 1--2 levels of qualification, but this need not be always so.
-
- The empty name is commonly used as an indication of unnamed
- entities, whenever this makes any sense. The basic operations on
- qualified names are smart enough to pass through such improper names
- unchanged.
-
- \medskip A @{text "naming"} policy tells how to turn a name
- specification into a fully qualified internal name (by the @{text
- "full"} operation), and how fully qualified names may be accessed
- externally. For example, the default naming policy is to prefix an
- implicit path: @{text "full x"} produces @{text "path.x"}, and the
- standard accesses for @{text "path.x"} include both @{text "x"} and
- @{text "path.x"}. Normally, the naming is implicit in the theory or
- proof context; there are separate versions of the corresponding.
-
- \medskip A @{text "name space"} manages a collection of fully
- internalized names, together with a mapping between external names
- and internal names (in both directions). The corresponding @{text
- "intern"} and @{text "extern"} operations are mostly used for
- parsing and printing only! The @{text "declare"} operation augments
- a name space according to the accesses determined by the naming
- policy.
-
- \medskip As a general principle, there is a separate name space for
- each kind of formal entity, e.g.\ logical constant, type
- constructor, type class, theorem. It is usually clear from the
- occurrence in concrete syntax (or from the scope) which kind of
- entity a name refers to. For example, the very same name @{text
- "c"} may be used uniformly for a constant, type constructor, and
- type class.
-
- There are common schemes to name theorems systematically, according
- to the name of the main logical entity involved, e.g.\ @{text
- "c.intro"} for a canonical theorem related to constant @{text "c"}.
- This technique of mapping names from one space into another requires
- some care in order to avoid conflicts. In particular, theorem names
- derived from a type constructor or type class are better suffixed in
- addition to the usual qualification, e.g.\ @{text "c_type.intro"}
- and @{text "c_class.intro"} for theorems related to type @{text "c"}
- and class @{text "c"}, respectively.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML NameSpace.base: "string -> string"} \\
- @{index_ML NameSpace.qualifier: "string -> string"} \\
- @{index_ML NameSpace.append: "string -> string -> string"} \\
- @{index_ML NameSpace.implode: "string list -> string"} \\
- @{index_ML NameSpace.explode: "string -> string list"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML_type NameSpace.naming} \\
- @{index_ML NameSpace.default_naming: NameSpace.naming} \\
- @{index_ML NameSpace.add_path: "string -> NameSpace.naming -> NameSpace.naming"} \\
- @{index_ML NameSpace.full_name: "NameSpace.naming -> binding -> string"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML_type NameSpace.T} \\
- @{index_ML NameSpace.empty: NameSpace.T} \\
- @{index_ML NameSpace.merge: "NameSpace.T * NameSpace.T -> NameSpace.T"} \\
- @{index_ML NameSpace.declare: "NameSpace.naming -> binding -> NameSpace.T -> string * NameSpace.T"} \\
- @{index_ML NameSpace.intern: "NameSpace.T -> string -> string"} \\
- @{index_ML NameSpace.extern: "NameSpace.T -> string -> string"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML NameSpace.base}~@{text "name"} returns the base name of a
- qualified name.
-
- \item @{ML NameSpace.qualifier}~@{text "name"} returns the qualifier
- of a qualified name.
-
- \item @{ML NameSpace.append}~@{text "name\<^isub>1 name\<^isub>2"}
- appends two qualified names.
-
- \item @{ML NameSpace.implode}~@{text "name"} and @{ML
- NameSpace.explode}~@{text "names"} convert between the packed string
- representation and the explicit list form of qualified names.
-
- \item @{ML_type NameSpace.naming} represents the abstract concept of
- a naming policy.
-
- \item @{ML NameSpace.default_naming} is the default naming policy.
- In a theory context, this is usually augmented by a path prefix
- consisting of the theory name.
-
- \item @{ML NameSpace.add_path}~@{text "path naming"} augments the
- naming policy by extending its path component.
-
- \item @{ML NameSpace.full_name}@{text "naming binding"} turns a name
- binding (usually a basic name) into the fully qualified
- internal name, according to the given naming policy.
-
- \item @{ML_type NameSpace.T} represents name spaces.
-
- \item @{ML NameSpace.empty} and @{ML NameSpace.merge}~@{text
- "(space\<^isub>1, space\<^isub>2)"} are the canonical operations for
- maintaining name spaces according to theory data management
- (\secref{sec:context-data}).
-
- \item @{ML NameSpace.declare}~@{text "naming bindings space"} enters a
- name binding as fully qualified internal name into the name space,
- with external accesses determined by the naming policy.
-
- \item @{ML NameSpace.intern}~@{text "space name"} internalizes a
- (partially qualified) external name.
-
- This operation is mostly for parsing! Note that fully qualified
- names stemming from declarations are produced via @{ML
- "NameSpace.full_name"} and @{ML "NameSpace.declare"}
- (or their derivatives for @{ML_type theory} and
- @{ML_type Proof.context}).
-
- \item @{ML NameSpace.extern}~@{text "space name"} externalizes a
- (fully qualified) internal name.
-
- This operation is mostly for printing! Note unqualified names are
- produced via @{ML NameSpace.base}.
-
- \end{description}
-*}
-
-end
--- a/doc-src/IsarImplementation/Thy/proof.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,332 +0,0 @@
-
-(* $Id$ *)
-
-theory "proof" imports base begin
-
-chapter {* Structured proofs *}
-
-section {* Variables \label{sec:variables} *}
-
-text {*
- Any variable that is not explicitly bound by @{text "\<lambda>"}-abstraction
- is considered as ``free''. Logically, free variables act like
- outermost universal quantification at the sequent level: @{text
- "A\<^isub>1(x), \<dots>, A\<^isub>n(x) \<turnstile> B(x)"} means that the result
- holds \emph{for all} values of @{text "x"}. Free variables for
- terms (not types) can be fully internalized into the logic: @{text
- "\<turnstile> B(x)"} and @{text "\<turnstile> \<And>x. B(x)"} are interchangeable, provided
- that @{text "x"} does not occur elsewhere in the context.
- Inspecting @{text "\<turnstile> \<And>x. B(x)"} more closely, we see that inside the
- quantifier, @{text "x"} is essentially ``arbitrary, but fixed'',
- while from outside it appears as a place-holder for instantiation
- (thanks to @{text "\<And>"} elimination).
-
- The Pure logic represents the idea of variables being either inside
- or outside the current scope by providing separate syntactic
- categories for \emph{fixed variables} (e.g.\ @{text "x"}) vs.\
- \emph{schematic variables} (e.g.\ @{text "?x"}). Incidently, a
- universal result @{text "\<turnstile> \<And>x. B(x)"} has the HHF normal form @{text
- "\<turnstile> B(?x)"}, which represents its generality nicely without requiring
- an explicit quantifier. The same principle works for type
- variables: @{text "\<turnstile> B(?\<alpha>)"} represents the idea of ``@{text "\<turnstile>
- \<forall>\<alpha>. B(\<alpha>)"}'' without demanding a truly polymorphic framework.
-
- \medskip Additional care is required to treat type variables in a
- way that facilitates type-inference. In principle, term variables
- depend on type variables, which means that type variables would have
- to be declared first. For example, a raw type-theoretic framework
- would demand the context to be constructed in stages as follows:
- @{text "\<Gamma> = \<alpha>: type, x: \<alpha>, a: A(x\<^isub>\<alpha>)"}.
-
- We allow a slightly less formalistic mode of operation: term
- variables @{text "x"} are fixed without specifying a type yet
- (essentially \emph{all} potential occurrences of some instance
- @{text "x\<^isub>\<tau>"} are fixed); the first occurrence of @{text "x"}
- within a specific term assigns its most general type, which is then
- maintained consistently in the context. The above example becomes
- @{text "\<Gamma> = x: term, \<alpha>: type, A(x\<^isub>\<alpha>)"}, where type @{text
- "\<alpha>"} is fixed \emph{after} term @{text "x"}, and the constraint
- @{text "x :: \<alpha>"} is an implicit consequence of the occurrence of
- @{text "x\<^isub>\<alpha>"} in the subsequent proposition.
-
- This twist of dependencies is also accommodated by the reverse
- operation of exporting results from a context: a type variable
- @{text "\<alpha>"} is considered fixed as long as it occurs in some fixed
- term variable of the context. For example, exporting @{text "x:
- term, \<alpha>: type \<turnstile> x\<^isub>\<alpha> = x\<^isub>\<alpha>"} produces in the first step
- @{text "x: term \<turnstile> x\<^isub>\<alpha> = x\<^isub>\<alpha>"} for fixed @{text "\<alpha>"},
- and only in the second step @{text "\<turnstile> ?x\<^isub>?\<^isub>\<alpha> =
- ?x\<^isub>?\<^isub>\<alpha>"} for schematic @{text "?x"} and @{text "?\<alpha>"}.
-
- \medskip The Isabelle/Isar proof context manages the gory details of
- term vs.\ type variables, with high-level principles for moving the
- frontier between fixed and schematic variables.
-
- The @{text "add_fixes"} operation explictly declares fixed
- variables; the @{text "declare_term"} operation absorbs a term into
- a context by fixing new type variables and adding syntactic
- constraints.
-
- The @{text "export"} operation is able to perform the main work of
- generalizing term and type variables as sketched above, assuming
- that fixing variables and terms have been declared properly.
-
- There @{text "import"} operation makes a generalized fact a genuine
- part of the context, by inventing fixed variables for the schematic
- ones. The effect can be reversed by using @{text "export"} later,
- potentially with an extended context; the result is equivalent to
- the original modulo renaming of schematic variables.
-
- The @{text "focus"} operation provides a variant of @{text "import"}
- for nested propositions (with explicit quantification): @{text
- "\<And>x\<^isub>1 \<dots> x\<^isub>n. B(x\<^isub>1, \<dots>, x\<^isub>n)"} is
- decomposed by inventing fixed variables @{text "x\<^isub>1, \<dots>,
- x\<^isub>n"} for the body.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML Variable.add_fixes: "
- string list -> Proof.context -> string list * Proof.context"} \\
- @{index_ML Variable.variant_fixes: "
- string list -> Proof.context -> string list * Proof.context"} \\
- @{index_ML Variable.declare_term: "term -> Proof.context -> Proof.context"} \\
- @{index_ML Variable.declare_constraints: "term -> Proof.context -> Proof.context"} \\
- @{index_ML Variable.export: "Proof.context -> Proof.context -> thm list -> thm list"} \\
- @{index_ML Variable.polymorphic: "Proof.context -> term list -> term list"} \\
- @{index_ML Variable.import_thms: "bool -> thm list -> Proof.context ->
- ((ctyp list * cterm list) * thm list) * Proof.context"} \\
- @{index_ML Variable.focus: "cterm -> Proof.context -> (cterm list * cterm) * Proof.context"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML Variable.add_fixes}~@{text "xs ctxt"} fixes term
- variables @{text "xs"}, returning the resulting internal names. By
- default, the internal representation coincides with the external
- one, which also means that the given variables must not be fixed
- already. There is a different policy within a local proof body: the
- given names are just hints for newly invented Skolem variables.
-
- \item @{ML Variable.variant_fixes} is similar to @{ML
- Variable.add_fixes}, but always produces fresh variants of the given
- names.
-
- \item @{ML Variable.declare_term}~@{text "t ctxt"} declares term
- @{text "t"} to belong to the context. This automatically fixes new
- type variables, but not term variables. Syntactic constraints for
- type and term variables are declared uniformly, though.
-
- \item @{ML Variable.declare_constraints}~@{text "t ctxt"} declares
- syntactic constraints from term @{text "t"}, without making it part
- of the context yet.
-
- \item @{ML Variable.export}~@{text "inner outer thms"} generalizes
- fixed type and term variables in @{text "thms"} according to the
- difference of the @{text "inner"} and @{text "outer"} context,
- following the principles sketched above.
-
- \item @{ML Variable.polymorphic}~@{text "ctxt ts"} generalizes type
- variables in @{text "ts"} as far as possible, even those occurring
- in fixed term variables. The default policy of type-inference is to
- fix newly introduced type variables, which is essentially reversed
- with @{ML Variable.polymorphic}: here the given terms are detached
- from the context as far as possible.
-
- \item @{ML Variable.import_thms}~@{text "open thms ctxt"} invents fixed
- type and term variables for the schematic ones occurring in @{text
- "thms"}. The @{text "open"} flag indicates whether the fixed names
- should be accessible to the user, otherwise newly introduced names
- are marked as ``internal'' (\secref{sec:names}).
-
- \item @{ML Variable.focus}~@{text B} decomposes the outermost @{text
- "\<And>"} prefix of proposition @{text "B"}.
-
- \end{description}
-*}
-
-
-section {* Assumptions \label{sec:assumptions} *}
-
-text {*
- An \emph{assumption} is a proposition that it is postulated in the
- current context. Local conclusions may use assumptions as
- additional facts, but this imposes implicit hypotheses that weaken
- the overall statement.
-
- Assumptions are restricted to fixed non-schematic statements, i.e.\
- all generality needs to be expressed by explicit quantifiers.
- Nevertheless, the result will be in HHF normal form with outermost
- quantifiers stripped. For example, by assuming @{text "\<And>x :: \<alpha>. P
- x"} we get @{text "\<And>x :: \<alpha>. P x \<turnstile> P ?x"} for schematic @{text "?x"}
- of fixed type @{text "\<alpha>"}. Local derivations accumulate more and
- more explicit references to hypotheses: @{text "A\<^isub>1, \<dots>,
- A\<^isub>n \<turnstile> B"} where @{text "A\<^isub>1, \<dots>, A\<^isub>n"} needs to
- be covered by the assumptions of the current context.
-
- \medskip The @{text "add_assms"} operation augments the context by
- local assumptions, which are parameterized by an arbitrary @{text
- "export"} rule (see below).
-
- The @{text "export"} operation moves facts from a (larger) inner
- context into a (smaller) outer context, by discharging the
- difference of the assumptions as specified by the associated export
- rules. Note that the discharged portion is determined by the
- difference contexts, not the facts being exported! There is a
- separate flag to indicate a goal context, where the result is meant
- to refine an enclosing sub-goal of a structured proof state (cf.\
- \secref{sec:isar-proof-state}).
-
- \medskip The most basic export rule discharges assumptions directly
- by means of the @{text "\<Longrightarrow>"} introduction rule:
- \[
- \infer[(@{text "\<Longrightarrow>_intro"})]{@{text "\<Gamma> \\ A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
- \]
-
- The variant for goal refinements marks the newly introduced
- premises, which causes the canonical Isar goal refinement scheme to
- enforce unification with local premises within the goal:
- \[
- \infer[(@{text "#\<Longrightarrow>_intro"})]{@{text "\<Gamma> \\ A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
- \]
-
- \medskip Alternative versions of assumptions may perform arbitrary
- transformations on export, as long as the corresponding portion of
- hypotheses is removed from the given facts. For example, a local
- definition works by fixing @{text "x"} and assuming @{text "x \<equiv> t"},
- with the following export rule to reverse the effect:
- \[
- \infer[(@{text "\<equiv>-expand"})]{@{text "\<Gamma> \\ x \<equiv> t \<turnstile> B t"}}{@{text "\<Gamma> \<turnstile> B x"}}
- \]
- This works, because the assumption @{text "x \<equiv> t"} was introduced in
- a context with @{text "x"} being fresh, so @{text "x"} does not
- occur in @{text "\<Gamma>"} here.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type Assumption.export} \\
- @{index_ML Assumption.assume: "cterm -> thm"} \\
- @{index_ML Assumption.add_assms:
- "Assumption.export ->
- cterm list -> Proof.context -> thm list * Proof.context"} \\
- @{index_ML Assumption.add_assumes: "
- cterm list -> Proof.context -> thm list * Proof.context"} \\
- @{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type Assumption.export} represents arbitrary export
- rules, which is any function of type @{ML_type "bool -> cterm list -> thm -> thm"},
- where the @{ML_type "bool"} indicates goal mode, and the @{ML_type
- "cterm list"} the collection of assumptions to be discharged
- simultaneously.
-
- \item @{ML Assumption.assume}~@{text "A"} turns proposition @{text
- "A"} into a raw assumption @{text "A \<turnstile> A'"}, where the conclusion
- @{text "A'"} is in HHF normal form.
-
- \item @{ML Assumption.add_assms}~@{text "r As"} augments the context
- by assumptions @{text "As"} with export rule @{text "r"}. The
- resulting facts are hypothetical theorems as produced by the raw
- @{ML Assumption.assume}.
-
- \item @{ML Assumption.add_assumes}~@{text "As"} is a special case of
- @{ML Assumption.add_assms} where the export rule performs @{text
- "\<Longrightarrow>_intro"} or @{text "#\<Longrightarrow>_intro"}, depending on goal mode.
-
- \item @{ML Assumption.export}~@{text "is_goal inner outer thm"}
- exports result @{text "thm"} from the the @{text "inner"} context
- back into the @{text "outer"} one; @{text "is_goal = true"} means
- this is a goal context. The result is in HHF normal form. Note
- that @{ML "ProofContext.export"} combines @{ML "Variable.export"}
- and @{ML "Assumption.export"} in the canonical way.
-
- \end{description}
-*}
-
-
-section {* Results \label{sec:results} *}
-
-text {*
- Local results are established by monotonic reasoning from facts
- within a context. This allows common combinations of theorems,
- e.g.\ via @{text "\<And>/\<Longrightarrow>"} elimination, resolution rules, or equational
- reasoning, see \secref{sec:thms}. Unaccounted context manipulations
- should be avoided, notably raw @{text "\<And>/\<Longrightarrow>"} introduction or ad-hoc
- references to free variables or assumptions not present in the proof
- context.
-
- \medskip The @{text "SUBPROOF"} combinator allows to structure a
- tactical proof recursively by decomposing a selected sub-goal:
- @{text "(\<And>x. A(x) \<Longrightarrow> B(x)) \<Longrightarrow> \<dots>"} is turned into @{text "B(x) \<Longrightarrow> \<dots>"}
- after fixing @{text "x"} and assuming @{text "A(x)"}. This means
- the tactic needs to solve the conclusion, but may use the premise as
- a local fact, for locally fixed variables.
-
- The @{text "prove"} operation provides an interface for structured
- backwards reasoning under program control, with some explicit sanity
- checks of the result. The goal context can be augmented by
- additional fixed variables (cf.\ \secref{sec:variables}) and
- assumptions (cf.\ \secref{sec:assumptions}), which will be available
- as local facts during the proof and discharged into implications in
- the result. Type and term variables are generalized as usual,
- according to the context.
-
- The @{text "obtain"} operation produces results by eliminating
- existing facts by means of a given tactic. This acts like a dual
- conclusion: the proof demonstrates that the context may be augmented
- by certain fixed variables and assumptions. See also
- \cite{isabelle-isar-ref} for the user-level @{text "\<OBTAIN>"} and
- @{text "\<GUESS>"} elements. Final results, which may not refer to
- the parameters in the conclusion, need to exported explicitly into
- the original context.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML SUBPROOF:
- "({context: Proof.context, schematics: ctyp list * cterm list,
- params: cterm list, asms: cterm list, concl: cterm,
- prems: thm list} -> tactic) -> Proof.context -> int -> tactic"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML Goal.prove: "Proof.context -> string list -> term list -> term ->
- ({prems: thm list, context: Proof.context} -> tactic) -> thm"} \\
- @{index_ML Goal.prove_multi: "Proof.context -> string list -> term list -> term list ->
- ({prems: thm list, context: Proof.context} -> tactic) -> thm list"} \\
- \end{mldecls}
- \begin{mldecls}
- @{index_ML Obtain.result: "(Proof.context -> tactic) ->
- thm list -> Proof.context -> (cterm list * thm list) * Proof.context"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML SUBPROOF}~@{text "tac"} decomposes the structure of a
- particular sub-goal, producing an extended context and a reduced
- goal, which needs to be solved by the given tactic. All schematic
- parameters of the goal are imported into the context as fixed ones,
- which may not be instantiated in the sub-proof.
-
- \item @{ML Goal.prove}~@{text "ctxt xs As C tac"} states goal @{text
- "C"} in the context augmented by fixed variables @{text "xs"} and
- assumptions @{text "As"}, and applies tactic @{text "tac"} to solve
- it. The latter may depend on the local assumptions being presented
- as facts. The result is in HHF normal form.
-
- \item @{ML Goal.prove_multi} is simular to @{ML Goal.prove}, but
- states several conclusions simultaneously. The goal is encoded by
- means of Pure conjunction; @{ML Goal.conjunction_tac} will turn this
- into a collection of individual subgoals.
-
- \item @{ML Obtain.result}~@{text "tac thms ctxt"} eliminates the
- given facts using a tactic, which results in additional fixed
- variables and assumptions in the context. Final results need to be
- exported explicitly.
-
- \end{description}
-*}
-
-end
--- a/doc-src/IsarImplementation/Thy/tactic.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,420 +0,0 @@
-
-(* $Id$ *)
-
-theory tactic imports base begin
-
-chapter {* Tactical reasoning *}
-
-text {*
- Tactical reasoning works by refining the initial claim in a
- backwards fashion, until a solved form is reached. A @{text "goal"}
- consists of several subgoals that need to be solved in order to
- achieve the main statement; zero subgoals means that the proof may
- be finished. A @{text "tactic"} is a refinement operation that maps
- a goal to a lazy sequence of potential successors. A @{text
- "tactical"} is a combinator for composing tactics.
-*}
-
-
-section {* Goals \label{sec:tactical-goals} *}
-
-text {*
- Isabelle/Pure represents a goal\glossary{Tactical goal}{A theorem of
- \seeglossary{Horn Clause} form stating that a number of subgoals
- imply the main conclusion, which is marked as a protected
- proposition.} as a theorem stating that the subgoals imply the main
- goal: @{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> C"}. The outermost goal
- structure is that of a Horn Clause\glossary{Horn Clause}{An iterated
- implication @{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> C"}, without any
- outermost quantifiers. Strictly speaking, propositions @{text
- "A\<^sub>i"} need to be atomic in Horn Clauses, but Isabelle admits
- arbitrary substructure here (nested @{text "\<Longrightarrow>"} and @{text "\<And>"}
- connectives).}: i.e.\ an iterated implication without any
- quantifiers\footnote{Recall that outermost @{text "\<And>x. \<phi>[x]"} is
- always represented via schematic variables in the body: @{text
- "\<phi>[?x]"}. These variables may get instantiated during the course of
- reasoning.}. For @{text "n = 0"} a goal is called ``solved''.
-
- The structure of each subgoal @{text "A\<^sub>i"} is that of a general
- Hereditary Harrop Formula @{text "\<And>x\<^sub>1 \<dots> \<And>x\<^sub>k. H\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> H\<^sub>m \<Longrightarrow> B"} in
- normal form. Here @{text "x\<^sub>1, \<dots>, x\<^sub>k"} are goal parameters, i.e.\
- arbitrary-but-fixed entities of certain types, and @{text "H\<^sub>1, \<dots>,
- H\<^sub>m"} are goal hypotheses, i.e.\ facts that may be assumed locally.
- Together, this forms the goal context of the conclusion @{text B} to
- be established. The goal hypotheses may be again arbitrary
- Hereditary Harrop Formulas, although the level of nesting rarely
- exceeds 1--2 in practice.
-
- The main conclusion @{text C} is internally marked as a protected
- proposition\glossary{Protected proposition}{An arbitrarily
- structured proposition @{text "C"} which is forced to appear as
- atomic by wrapping it into a propositional identity operator;
- notation @{text "#C"}. Protecting a proposition prevents basic
- inferences from entering into that structure for the time being.},
- which is represented explicitly by the notation @{text "#C"}. This
- ensures that the decomposition into subgoals and main conclusion is
- well-defined for arbitrarily structured claims.
-
- \medskip Basic goal management is performed via the following
- Isabelle/Pure rules:
-
- \[
- \infer[@{text "(init)"}]{@{text "C \<Longrightarrow> #C"}}{} \qquad
- \infer[@{text "(finish)"}]{@{text "C"}}{@{text "#C"}}
- \]
-
- \medskip The following low-level variants admit general reasoning
- with protected propositions:
-
- \[
- \infer[@{text "(protect)"}]{@{text "#C"}}{@{text "C"}} \qquad
- \infer[@{text "(conclude)"}]{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> C"}}{@{text "A\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^sub>n \<Longrightarrow> #C"}}
- \]
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML Goal.init: "cterm -> thm"} \\
- @{index_ML Goal.finish: "thm -> thm"} \\
- @{index_ML Goal.protect: "thm -> thm"} \\
- @{index_ML Goal.conclude: "thm -> thm"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML "Goal.init"}~@{text C} initializes a tactical goal from
- the well-formed proposition @{text C}.
-
- \item @{ML "Goal.finish"}~@{text "thm"} checks whether theorem
- @{text "thm"} is a solved goal (no subgoals), and concludes the
- result by removing the goal protection.
-
- \item @{ML "Goal.protect"}~@{text "thm"} protects the full statement
- of theorem @{text "thm"}.
-
- \item @{ML "Goal.conclude"}~@{text "thm"} removes the goal
- protection, even if there are pending subgoals.
-
- \end{description}
-*}
-
-
-section {* Tactics *}
-
-text {* A @{text "tactic"} is a function @{text "goal \<rightarrow> goal\<^sup>*\<^sup>*"} that
- maps a given goal state (represented as a theorem, cf.\
- \secref{sec:tactical-goals}) to a lazy sequence of potential
- successor states. The underlying sequence implementation is lazy
- both in head and tail, and is purely functional in \emph{not}
- supporting memoing.\footnote{The lack of memoing and the strict
- nature of SML requires some care when working with low-level
- sequence operations, to avoid duplicate or premature evaluation of
- results.}
-
- An \emph{empty result sequence} means that the tactic has failed: in
- a compound tactic expressions other tactics might be tried instead,
- or the whole refinement step might fail outright, producing a
- toplevel error message. When implementing tactics from scratch, one
- should take care to observe the basic protocol of mapping regular
- error conditions to an empty result; only serious faults should
- emerge as exceptions.
-
- By enumerating \emph{multiple results}, a tactic can easily express
- the potential outcome of an internal search process. There are also
- combinators for building proof tools that involve search
- systematically, see also \secref{sec:tacticals}.
-
- \medskip As explained in \secref{sec:tactical-goals}, a goal state
- essentially consists of a list of subgoals that imply the main goal
- (conclusion). Tactics may operate on all subgoals or on a
- particularly specified subgoal, but must not change the main
- conclusion (apart from instantiating schematic goal variables).
-
- Tactics with explicit \emph{subgoal addressing} are of the form
- @{text "int \<rightarrow> tactic"} and may be applied to a particular subgoal
- (counting from 1). If the subgoal number is out of range, the
- tactic should fail with an empty result sequence, but must not raise
- an exception!
-
- Operating on a particular subgoal means to replace it by an interval
- of zero or more subgoals in the same place; other subgoals must not
- be affected, apart from instantiating schematic variables ranging
- over the whole goal state.
-
- A common pattern of composing tactics with subgoal addressing is to
- try the first one, and then the second one only if the subgoal has
- not been solved yet. Special care is required here to avoid bumping
- into unrelated subgoals that happen to come after the original
- subgoal. Assuming that there is only a single initial subgoal is a
- very common error when implementing tactics!
-
- Tactics with internal subgoal addressing should expose the subgoal
- index as @{text "int"} argument in full generality; a hardwired
- subgoal 1 inappropriate.
-
- \medskip The main well-formedness conditions for proper tactics are
- summarized as follows.
-
- \begin{itemize}
-
- \item General tactic failure is indicated by an empty result, only
- serious faults may produce an exception.
-
- \item The main conclusion must not be changed, apart from
- instantiating schematic variables.
-
- \item A tactic operates either uniformly on all subgoals, or
- specifically on a selected subgoal (without bumping into unrelated
- subgoals).
-
- \item Range errors in subgoal addressing produce an empty result.
-
- \end{itemize}
-
- Some of these conditions are checked by higher-level goal
- infrastructure (\secref{sec:results}); others are not checked
- explicitly, and violating them merely results in ill-behaved tactics
- experienced by the user (e.g.\ tactics that insist in being
- applicable only to singleton goals, or disallow composition with
- basic tacticals).
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML_type tactic: "thm -> thm Seq.seq"} \\
- @{index_ML no_tac: tactic} \\
- @{index_ML all_tac: tactic} \\
- @{index_ML print_tac: "string -> tactic"} \\[1ex]
- @{index_ML PRIMITIVE: "(thm -> thm) -> tactic"} \\[1ex]
- @{index_ML SUBGOAL: "(term * int -> tactic) -> int -> tactic"} \\
- @{index_ML CSUBGOAL: "(cterm * int -> tactic) -> int -> tactic"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML_type tactic} represents tactics. The well-formedness
- conditions described above need to be observed. See also @{"file"
- "~~/src/Pure/General/seq.ML"} for the underlying implementation of
- lazy sequences.
-
- \item @{ML_type "int -> tactic"} represents tactics with explicit
- subgoal addressing, with well-formedness conditions as described
- above.
-
- \item @{ML no_tac} is a tactic that always fails, returning the
- empty sequence.
-
- \item @{ML all_tac} is a tactic that always succeeds, returning a
- singleton sequence with unchanged goal state.
-
- \item @{ML print_tac}~@{text "message"} is like @{ML all_tac}, but
- prints a message together with the goal state on the tracing
- channel.
-
- \item @{ML PRIMITIVE}~@{text rule} turns a primitive inference rule
- into a tactic with unique result. Exception @{ML THM} is considered
- a regular tactic failure and produces an empty result; other
- exceptions are passed through.
-
- \item @{ML SUBGOAL}~@{text "(fn (subgoal, i) => tactic)"} is the
- most basic form to produce a tactic with subgoal addressing. The
- given abstraction over the subgoal term and subgoal number allows to
- peek at the relevant information of the full goal state. The
- subgoal range is checked as required above.
-
- \item @{ML CSUBGOAL} is similar to @{ML SUBGOAL}, but passes the
- subgoal as @{ML_type cterm} instead of raw @{ML_type term}. This
- avoids expensive re-certification in situations where the subgoal is
- used directly for primitive inferences.
-
- \end{description}
-*}
-
-
-subsection {* Resolution and assumption tactics \label{sec:resolve-assume-tac} *}
-
-text {* \emph{Resolution} is the most basic mechanism for refining a
- subgoal using a theorem as object-level rule.
- \emph{Elim-resolution} is particularly suited for elimination rules:
- it resolves with a rule, proves its first premise by assumption, and
- finally deletes that assumption from any new subgoals.
- \emph{Destruct-resolution} is like elim-resolution, but the given
- destruction rules are first turned into canonical elimination
- format. \emph{Forward-resolution} is like destruct-resolution, but
- without deleting the selected assumption. The @{text "r/e/d/f"}
- naming convention is maintained for several different kinds of
- resolution rules and tactics.
-
- Assumption tactics close a subgoal by unifying some of its premises
- against its conclusion.
-
- \medskip All the tactics in this section operate on a subgoal
- designated by a positive integer. Other subgoals might be affected
- indirectly, due to instantiation of schematic variables.
-
- There are various sources of non-determinism, the tactic result
- sequence enumerates all possibilities of the following choices (if
- applicable):
-
- \begin{enumerate}
-
- \item selecting one of the rules given as argument to the tactic;
-
- \item selecting a subgoal premise to eliminate, unifying it against
- the first premise of the rule;
-
- \item unifying the conclusion of the subgoal to the conclusion of
- the rule.
-
- \end{enumerate}
-
- Recall that higher-order unification may produce multiple results
- that are enumerated here.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML resolve_tac: "thm list -> int -> tactic"} \\
- @{index_ML eresolve_tac: "thm list -> int -> tactic"} \\
- @{index_ML dresolve_tac: "thm list -> int -> tactic"} \\
- @{index_ML forward_tac: "thm list -> int -> tactic"} \\[1ex]
- @{index_ML assume_tac: "int -> tactic"} \\
- @{index_ML eq_assume_tac: "int -> tactic"} \\[1ex]
- @{index_ML match_tac: "thm list -> int -> tactic"} \\
- @{index_ML ematch_tac: "thm list -> int -> tactic"} \\
- @{index_ML dmatch_tac: "thm list -> int -> tactic"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML resolve_tac}~@{text "thms i"} refines the goal state
- using the given theorems, which should normally be introduction
- rules. The tactic resolves a rule's conclusion with subgoal @{text
- i}, replacing it by the corresponding versions of the rule's
- premises.
-
- \item @{ML eresolve_tac}~@{text "thms i"} performs elim-resolution
- with the given theorems, which should normally be elimination rules.
-
- \item @{ML dresolve_tac}~@{text "thms i"} performs
- destruct-resolution with the given theorems, which should normally
- be destruction rules. This replaces an assumption by the result of
- applying one of the rules.
-
- \item @{ML forward_tac} is like @{ML dresolve_tac} except that the
- selected assumption is not deleted. It applies a rule to an
- assumption, adding the result as a new assumption.
-
- \item @{ML assume_tac}~@{text i} attempts to solve subgoal @{text i}
- by assumption (modulo higher-order unification).
-
- \item @{ML eq_assume_tac} is similar to @{ML assume_tac}, but checks
- only for immediate @{text "\<alpha>"}-convertibility instead of using
- unification. It succeeds (with a unique next state) if one of the
- assumptions is equal to the subgoal's conclusion. Since it does not
- instantiate variables, it cannot make other subgoals unprovable.
-
- \item @{ML match_tac}, @{ML ematch_tac}, and @{ML dmatch_tac} are
- similar to @{ML resolve_tac}, @{ML eresolve_tac}, and @{ML
- dresolve_tac}, respectively, but do not instantiate schematic
- variables in the goal state.
-
- Flexible subgoals are not updated at will, but are left alone.
- Strictly speaking, matching means to treat the unknowns in the goal
- state as constants; these tactics merely discard unifiers that would
- update the goal state.
-
- \end{description}
-*}
-
-
-subsection {* Explicit instantiation within a subgoal context *}
-
-text {* The main resolution tactics (\secref{sec:resolve-assume-tac})
- use higher-order unification, which works well in many practical
- situations despite its daunting theoretical properties.
- Nonetheless, there are important problem classes where unguided
- higher-order unification is not so useful. This typically involves
- rules like universal elimination, existential introduction, or
- equational substitution. Here the unification problem involves
- fully flexible @{text "?P ?x"} schemes, which are hard to manage
- without further hints.
-
- By providing a (small) rigid term for @{text "?x"} explicitly, the
- remaining unification problem is to assign a (large) term to @{text
- "?P"}, according to the shape of the given subgoal. This is
- sufficiently well-behaved in most practical situations.
-
- \medskip Isabelle provides separate versions of the standard @{text
- "r/e/d/f"} resolution tactics that allow to provide explicit
- instantiations of unknowns of the given rule, wrt.\ terms that refer
- to the implicit context of the selected subgoal.
-
- An instantiation consists of a list of pairs of the form @{text
- "(?x, t)"}, where @{text ?x} is a schematic variable occurring in
- the given rule, and @{text t} is a term from the current proof
- context, augmented by the local goal parameters of the selected
- subgoal; cf.\ the @{text "focus"} operation described in
- \secref{sec:variables}.
-
- Entering the syntactic context of a subgoal is a brittle operation,
- because its exact form is somewhat accidental, and the choice of
- bound variable names depends on the presence of other local and
- global names. Explicit renaming of subgoal parameters prior to
- explicit instantiation might help to achieve a bit more robustness.
-
- Type instantiations may be given as well, via pairs like @{text
- "(?'a, \<tau>)"}. Type instantiations are distinguished from term
- instantiations by the syntactic form of the schematic variable.
- Types are instantiated before terms are. Since term instantiation
- already performs type-inference as expected, explicit type
- instantiations are seldom necessary.
-*}
-
-text %mlref {*
- \begin{mldecls}
- @{index_ML res_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
- @{index_ML eres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
- @{index_ML dres_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\
- @{index_ML forw_inst_tac: "Proof.context -> (indexname * string) list -> thm -> int -> tactic"} \\[1ex]
- @{index_ML rename_tac: "string list -> int -> tactic"} \\
- \end{mldecls}
-
- \begin{description}
-
- \item @{ML res_inst_tac}~@{text "ctxt insts thm i"} instantiates the
- rule @{text thm} with the instantiations @{text insts}, as described
- above, and then performs resolution on subgoal @{text i}.
-
- \item @{ML eres_inst_tac} is like @{ML res_inst_tac}, but performs
- elim-resolution.
-
- \item @{ML dres_inst_tac} is like @{ML res_inst_tac}, but performs
- destruct-resolution.
-
- \item @{ML forw_inst_tac} is like @{ML dres_inst_tac} except that
- the selected assumption is not deleted.
-
- \item @{ML rename_tac}~@{text "names i"} renames the innermost
- parameters of subgoal @{text i} according to the provided @{text
- names} (which need to be distinct indentifiers).
-
- \end{description}
-*}
-
-
-section {* Tacticals \label{sec:tacticals} *}
-
-text {*
-
-FIXME
-
-\glossary{Tactical}{A functional combinator for building up complex
-tactics from simpler ones. Typical tactical perform sequential
-composition, disjunction (choice), iteration, or goal addressing.
-Various search strategies may be expressed via tacticals.}
-
-*}
-
-end
-
--- a/doc-src/IsarImplementation/Thy/unused.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-
-section {* Sessions and document preparation *}
-
-section {* Structured output *}
-
-subsection {* Pretty printing *}
-
-text FIXME
-
-subsection {* Output channels *}
-
-text FIXME
-
-subsection {* Print modes \label{sec:print-mode} *}
-
-text FIXME
-
-text {*
-
-
- \medskip The general concept supports block-structured reasoning
- nicely, with arbitrary mechanisms for introducing local assumptions.
- The common reasoning pattern is as follows:
-
- \medskip
- \begin{tabular}{l}
- @{text "add_assms e\<^isub>1 A\<^isub>1"} \\
- @{text "\<dots>"} \\
- @{text "add_assms e\<^isub>n A\<^isub>n"} \\
- @{text "export"} \\
- \end{tabular}
- \medskip
-
- \noindent The final @{text "export"} will turn any fact @{text
- "A\<^isub>1, \<dots>, A\<^isub>n \<turnstile> B"} into some @{text "\<turnstile> B'"}, by
- applying the export rules @{text "e\<^isub>1, \<dots>, e\<^isub>n"}
- inside-out.
-
-
- A \emph{fixed variable} acts like a local constant in the current
- context, representing some simple type @{text "\<alpha>"}, or some value
- @{text "x: \<tau>"} (for a fixed type expression @{text "\<tau>"}). A
- \emph{schematic variable} acts like a placeholder for arbitrary
- elements, similar to outermost quantification. The division between
- fixed and schematic variables tells which abstract entities are
- inside and outside the current context.
-
-
- @{index_ML Variable.trade: "Proof.context -> (thm list -> thm list) -> thm list -> thm list"} \\
-
-
-
- \item @{ML Variable.trade} composes @{ML Variable.import} and @{ML
- Variable.export}, i.e.\ it provides a view on facts with all
- variables being fixed in the current context.
-
-
- In practice, super-contexts emerge either by merging existing ones,
- or by adding explicit declarations. For example, new theories are
- usually derived by importing existing theories from the library
- @{text "\<Theta> = \<Theta>\<^sub>1 + \<dots> + \<Theta>\<^isub>n"}, or
-
-
-
- The Isar toplevel works differently for interactive developments
- vs.\ batch processing of theory sources. For example, diagnostic
- commands produce a warning batch mode, because they are considered
- alien to the final theory document being produced eventually.
- Moreover, full @{text undo} with intermediate checkpoints to protect
- against destroying theories accidentally are limited to interactive
- mode. In batch mode there is only a single strictly linear stream
- of potentially desctructive theory transformations.
-
- \item @{ML Toplevel.empty} is an empty transition; the Isar command
- dispatcher internally applies @{ML Toplevel.name} (for the command)
- name and @{ML Toplevel.position} for the source position.
-
-*}
-
--- a/doc-src/IsarImplementation/checkglossary Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,28 +0,0 @@
-#!/usr/bin/env perl
-# $Id$
-
-use strict;
-
-my %defs = ();
-my %refs = ();
-
-while (<ARGV>) {
- if (m,\\glossaryentry\{\w*\\bf *((\w|\s)+)@,) {
- $defs{lc $1} = 1;
- }
- while (m,\\seeglossary *\{((\w|\s)+)\},g) {
- $refs{lc $1} = 1;
- }
-}
-
-print "Glossary definitions:\n";
-foreach (sort(keys(%defs))) {
- print " \"$_\"\n";
-}
-
-foreach (keys(%refs)) {
- s,s$,,;
- if (!defined($defs{$_})) {
- print "### Undefined glossary reference: \"$_\"\n";
- }
-}
--- a/doc-src/IsarImplementation/implementation.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/implementation.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
\documentclass[12pt,a4paper,fleqn]{report}
\usepackage{latexsym,graphicx}
\usepackage[refpage]{nomencl}
@@ -23,9 +20,6 @@
and Larry Paulson
}
-%FIXME
-%\makeglossary
-
\makeindex
@@ -71,28 +65,24 @@
\listoffigures
\clearfirst
-%\input{intro.tex}
-\input{Thy/document/prelim.tex}
-\input{Thy/document/logic.tex}
-\input{Thy/document/tactic.tex}
-\input{Thy/document/proof.tex}
-\input{Thy/document/isar.tex}
-\input{Thy/document/locale.tex}
-\input{Thy/document/integration.tex}
+\input{Thy/document/Prelim.tex}
+\input{Thy/document/Logic.tex}
+\input{Thy/document/Tactic.tex}
+\input{Thy/document/Proof.tex}
+\input{Thy/document/Syntax.tex}
+\input{Thy/document/Isar.tex}
+\input{Thy/document/Local_Theory.tex}
+\input{Thy/document/Integration.tex}
\appendix
\input{Thy/document/ML.tex}
\begingroup
\tocentry{\bibname}
-\bibliographystyle{plain} \small\raggedright\frenchspacing
+\bibliographystyle{abbrv} \small\raggedright\frenchspacing
\bibliography{../manual}
\endgroup
-%FIXME
-%\tocentry{\glossaryname}
-%\printglossary
-
\tocentry{\indexname}
\printindex
--- a/doc-src/IsarImplementation/intro.tex Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,13 +0,0 @@
-
-%% $Id$
-
-\chapter{Introduction}
-
-FIXME
-
-\nocite{Wenzel-PhD}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "implementation"
-%%% End:
--- a/doc-src/IsarImplementation/makeglossary Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,6 +0,0 @@
-#!/bin/sh
-# $Id$
-
-NAME="$1"
-makeindex -s nomencl -o "${NAME}.gls" "${NAME}.glo"
-./checkglossary "${NAME}.glo"
--- a/doc-src/IsarImplementation/style.sty Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarImplementation/style.sty Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
%% toc
\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
@@ -10,24 +7,12 @@
\newcommand{\chref}[1]{chapter~\ref{#1}}
\newcommand{\figref}[1]{figure~\ref{#1}}
-%% glossary
-\renewcommand{\glossary}[2]{\nomenclature{\bf #1}{#2}}
-\newcommand{\seeglossary}[1]{\emph{#1}}
-\newcommand{\glossaryname}{Glossary}
-\renewcommand{\nomname}{\glossaryname}
-\renewcommand{\pagedeclaration}[1]{\nobreak\quad\dotfill~page~\bold{#1}}
-
-%% index
-\newcommand{\indexml}[1]{\index{\emph{#1}|bold}}
-\newcommand{\indexmlexception}[1]{\index{\emph{#1} (exception)|bold}}
-\newcommand{\indexmltype}[1]{\index{\emph{#1} (type)|bold}}
-\newcommand{\indexmlstructure}[1]{\index{\emph{#1} (structure)|bold}}
-\newcommand{\indexmlfunctor}[1]{\index{\emph{#1} (functor)|bold}}
-
%% math
\newcommand{\text}[1]{\mbox{#1}}
\newcommand{\isasymvartheta}{\isamath{\theta}}
-\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
+\newcommand{\isactrlvec}[1]{\emph{$\vec{#1}$}}
+\newcommand{\isactrlBG}{\isacharbackquoteopen}
+\newcommand{\isactrlEN}{\isacharbackquoteclose}
\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
@@ -49,6 +34,10 @@
\newcommand{\isasymtype}{\minorcmd{type}}
\newcommand{\isasymval}{\minorcmd{val}}
+\newcommand{\isasymFIX}{\isakeyword{fix}}
+\newcommand{\isasymASSUME}{\isakeyword{assume}}
+\newcommand{\isasymDEFINE}{\isakeyword{define}}
+\newcommand{\isasymNOTE}{\isakeyword{note}}
\newcommand{\isasymGUESS}{\isakeyword{guess}}
\newcommand{\isasymOBTAIN}{\isakeyword{obtain}}
\newcommand{\isasymTHEORY}{\isakeyword{theory}}
@@ -61,6 +50,7 @@
\isabellestyle{it}
+
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "implementation"
--- a/doc-src/IsarOverview/Isar/document/.cvsignore Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-*.sty
-session.tex
\ No newline at end of file
--- a/doc-src/IsarRef/IsaMakefile Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/IsaMakefile Fri Feb 27 18:50:35 2009 +0100
@@ -22,10 +22,11 @@
HOL-IsarRef: $(LOG)/HOL-IsarRef.gz
$(LOG)/HOL-IsarRef.gz: Thy/ROOT.ML ../antiquote_setup.ML \
- Thy/Inner_Syntax.thy Thy/Introduction.thy Thy/Outer_Syntax.thy \
- Thy/Spec.thy Thy/Proof.thy Thy/Misc.thy Thy/Document_Preparation.thy \
- Thy/Generic.thy Thy/HOL_Specific.thy Thy/Quick_Reference.thy \
- Thy/Symbols.thy Thy/ML_Tactic.thy
+ Thy/First_Order_Logic.thy Thy/Framework.thy Thy/Inner_Syntax.thy \
+ Thy/Introduction.thy Thy/Outer_Syntax.thy Thy/Spec.thy Thy/Proof.thy \
+ Thy/Misc.thy Thy/Document_Preparation.thy Thy/Generic.thy \
+ Thy/HOL_Specific.thy Thy/Quick_Reference.thy Thy/Symbols.thy \
+ Thy/ML_Tactic.thy
@$(USEDIR) -s IsarRef HOL Thy
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Thy/First_Order_Logic.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,520 @@
+
+header {* Example: First-Order Logic *}
+
+theory %visible First_Order_Logic
+imports Pure
+begin
+
+text {*
+ \noindent In order to commence a new object-logic within
+ Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
+ for individuals and @{text "o"} for object-propositions. The latter
+ is embedded into the language of Pure propositions by means of a
+ separate judgment.
+*}
+
+typedecl i
+typedecl o
+
+judgment
+ Trueprop :: "o \<Rightarrow> prop" ("_" 5)
+
+text {*
+ \noindent Note that the object-logic judgement is implicit in the
+ syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
+ From the Pure perspective this means ``@{prop A} is derivable in the
+ object-logic''.
+*}
+
+
+subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
+
+text {*
+ Equality is axiomatized as a binary predicate on individuals, with
+ reflexivity as introduction, and substitution as elimination
+ principle. Note that the latter is particularly convenient in a
+ framework like Isabelle, because syntactic congruences are
+ implicitly produced by unification of @{term "B x"} against
+ expressions containing occurrences of @{term x}.
+*}
+
+axiomatization
+ equal :: "i \<Rightarrow> i \<Rightarrow> o" (infix "=" 50)
+where
+ refl [intro]: "x = x" and
+ subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
+
+text {*
+ \noindent Substitution is very powerful, but also hard to control in
+ full generality. We derive some common symmetry~/ transitivity
+ schemes of as particular consequences.
+*}
+
+theorem sym [sym]:
+ assumes "x = y"
+ shows "y = x"
+proof -
+ have "x = x" ..
+ with `x = y` show "y = x" ..
+qed
+
+theorem forw_subst [trans]:
+ assumes "y = x" and "B x"
+ shows "B y"
+proof -
+ from `y = x` have "x = y" ..
+ from this and `B x` show "B y" ..
+qed
+
+theorem back_subst [trans]:
+ assumes "B x" and "x = y"
+ shows "B y"
+proof -
+ from `x = y` and `B x`
+ show "B y" ..
+qed
+
+theorem trans [trans]:
+ assumes "x = y" and "y = z"
+ shows "x = z"
+proof -
+ from `y = z` and `x = y`
+ show "x = z" ..
+qed
+
+
+subsection {* Basic group theory *}
+
+text {*
+ As an example for equational reasoning we consider some bits of
+ group theory. The subsequent locale definition postulates group
+ operations and axioms; we also derive some consequences of this
+ specification.
+*}
+
+locale group =
+ fixes prod :: "i \<Rightarrow> i \<Rightarrow> i" (infix "\<circ>" 70)
+ and inv :: "i \<Rightarrow> i" ("(_\<inverse>)" [1000] 999)
+ and unit :: i ("1")
+ assumes assoc: "(x \<circ> y) \<circ> z = x \<circ> (y \<circ> z)"
+ and left_unit: "1 \<circ> x = x"
+ and left_inv: "x\<inverse> \<circ> x = 1"
+begin
+
+theorem right_inv: "x \<circ> x\<inverse> = 1"
+proof -
+ have "x \<circ> x\<inverse> = 1 \<circ> (x \<circ> x\<inverse>)" by (rule left_unit [symmetric])
+ also have "\<dots> = (1 \<circ> x) \<circ> x\<inverse>" by (rule assoc [symmetric])
+ also have "1 = (x\<inverse>)\<inverse> \<circ> x\<inverse>" by (rule left_inv [symmetric])
+ also have "\<dots> \<circ> x = (x\<inverse>)\<inverse> \<circ> (x\<inverse> \<circ> x)" by (rule assoc)
+ also have "x\<inverse> \<circ> x = 1" by (rule left_inv)
+ also have "((x\<inverse>)\<inverse> \<circ> \<dots>) \<circ> x\<inverse> = (x\<inverse>)\<inverse> \<circ> (1 \<circ> x\<inverse>)" by (rule assoc)
+ also have "1 \<circ> x\<inverse> = x\<inverse>" by (rule left_unit)
+ also have "(x\<inverse>)\<inverse> \<circ> \<dots> = 1" by (rule left_inv)
+ finally show "x \<circ> x\<inverse> = 1" .
+qed
+
+theorem right_unit: "x \<circ> 1 = x"
+proof -
+ have "1 = x\<inverse> \<circ> x" by (rule left_inv [symmetric])
+ also have "x \<circ> \<dots> = (x \<circ> x\<inverse>) \<circ> x" by (rule assoc [symmetric])
+ also have "x \<circ> x\<inverse> = 1" by (rule right_inv)
+ also have "\<dots> \<circ> x = x" by (rule left_unit)
+ finally show "x \<circ> 1 = x" .
+qed
+
+text {*
+ \noindent Reasoning from basic axioms is often tedious. Our proofs
+ work by producing various instances of the given rules (potentially
+ the symmetric form) using the pattern ``@{command have}~@{text
+ eq}~@{command "by"}~@{text "(rule r)"}'' and composing the chain of
+ results via @{command also}/@{command finally}. These steps may
+ involve any of the transitivity rules declared in
+ \secref{sec:framework-ex-equal}, namely @{thm trans} in combining
+ the first two results in @{thm right_inv} and in the final steps of
+ both proofs, @{thm forw_subst} in the first combination of @{thm
+ right_unit}, and @{thm back_subst} in all other calculational steps.
+
+ Occasional substitutions in calculations are adequate, but should
+ not be over-emphasized. The other extreme is to compose a chain by
+ plain transitivity only, with replacements occurring always in
+ topmost position. For example:
+*}
+
+(*<*)
+theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+ assume [symmetric, defn]: "\<And>x y. (x \<equiv> y) \<equiv> Trueprop (x = y)"
+(*>*)
+ have "x \<circ> 1 = x \<circ> (x\<inverse> \<circ> x)" unfolding left_inv ..
+ also have "\<dots> = (x \<circ> x\<inverse>) \<circ> x" unfolding assoc ..
+ also have "\<dots> = 1 \<circ> x" unfolding right_inv ..
+ also have "\<dots> = x" unfolding left_unit ..
+ finally have "x \<circ> 1 = x" .
+(*<*)
+qed
+(*>*)
+
+text {*
+ \noindent Here we have re-used the built-in mechanism for unfolding
+ definitions in order to normalize each equational problem. A more
+ realistic object-logic would include proper setup for the Simplifier
+ (\secref{sec:simplifier}), the main automated tool for equational
+ reasoning in Isabelle. Then ``@{command unfolding}~@{thm
+ left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
+ "(simp only: left_inv)"}'' etc.
+*}
+
+end
+
+
+subsection {* Propositional logic \label{sec:framework-ex-prop} *}
+
+text {*
+ We axiomatize basic connectives of propositional logic: implication,
+ disjunction, and conjunction. The associated rules are modeled
+ after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.
+*}
+
+axiomatization
+ imp :: "o \<Rightarrow> o \<Rightarrow> o" (infixr "\<longrightarrow>" 25) where
+ impI [intro]: "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B" and
+ impD [dest]: "(A \<longrightarrow> B) \<Longrightarrow> A \<Longrightarrow> B"
+
+axiomatization
+ disj :: "o \<Rightarrow> o \<Rightarrow> o" (infixr "\<or>" 30) where
+ disjI\<^isub>1 [intro]: "A \<Longrightarrow> A \<or> B" and
+ disjI\<^isub>2 [intro]: "B \<Longrightarrow> A \<or> B" and
+ disjE [elim]: "A \<or> B \<Longrightarrow> (A \<Longrightarrow> C) \<Longrightarrow> (B \<Longrightarrow> C) \<Longrightarrow> C"
+
+axiomatization
+ conj :: "o \<Rightarrow> o \<Rightarrow> o" (infixr "\<and>" 35) where
+ conjI [intro]: "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" and
+ conjD\<^isub>1: "A \<and> B \<Longrightarrow> A" and
+ conjD\<^isub>2: "A \<and> B \<Longrightarrow> B"
+
+text {*
+ \noindent The conjunctive destructions have the disadvantage that
+ decomposing @{prop "A \<and> B"} involves an immediate decision which
+ component should be projected. The more convenient simultaneous
+ elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
+ follows:
+*}
+
+theorem conjE [elim]:
+ assumes "A \<and> B"
+ obtains A and B
+proof
+ from `A \<and> B` show A by (rule conjD\<^isub>1)
+ from `A \<and> B` show B by (rule conjD\<^isub>2)
+qed
+
+text {*
+ \noindent Here is an example of swapping conjuncts with a single
+ intermediate elimination step:
+*}
+
+(*<*)
+lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+(*>*)
+ assume "A \<and> B"
+ then obtain B and A ..
+ then have "B \<and> A" ..
+(*<*)
+qed
+(*>*)
+
+text {*
+ \noindent Note that the analogous elimination rule for disjunction
+ ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
+ the original axiomatization of @{thm disjE}.
+
+ \medskip We continue propositional logic by introducing absurdity
+ with its characteristic elimination. Plain truth may then be
+ defined as a proposition that is trivially true.
+*}
+
+axiomatization
+ false :: o ("\<bottom>") where
+ falseE [elim]: "\<bottom> \<Longrightarrow> A"
+
+definition
+ true :: o ("\<top>") where
+ "\<top> \<equiv> \<bottom> \<longrightarrow> \<bottom>"
+
+theorem trueI [intro]: \<top>
+ unfolding true_def ..
+
+text {*
+ \medskip\noindent Now negation represents an implication towards
+ absurdity:
+*}
+
+definition
+ not :: "o \<Rightarrow> o" ("\<not> _" [40] 40) where
+ "\<not> A \<equiv> A \<longrightarrow> \<bottom>"
+
+theorem notI [intro]:
+ assumes "A \<Longrightarrow> \<bottom>"
+ shows "\<not> A"
+unfolding not_def
+proof
+ assume A
+ then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
+qed
+
+theorem notE [elim]:
+ assumes "\<not> A" and A
+ shows B
+proof -
+ from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
+ from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
+ then show B ..
+qed
+
+
+subsection {* Classical logic *}
+
+text {*
+ Subsequently we state the principle of classical contradiction as a
+ local assumption. Thus we refrain from forcing the object-logic
+ into the classical perspective. Within that context, we may derive
+ well-known consequences of the classical principle.
+*}
+
+locale classical =
+ assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
+begin
+
+theorem double_negation:
+ assumes "\<not> \<not> C"
+ shows C
+proof (rule classical)
+ assume "\<not> C"
+ with `\<not> \<not> C` show C ..
+qed
+
+theorem tertium_non_datur: "C \<or> \<not> C"
+proof (rule double_negation)
+ show "\<not> \<not> (C \<or> \<not> C)"
+ proof
+ assume "\<not> (C \<or> \<not> C)"
+ have "\<not> C"
+ proof
+ assume C then have "C \<or> \<not> C" ..
+ with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+ qed
+ then have "C \<or> \<not> C" ..
+ with `\<not> (C \<or> \<not> C)` show \<bottom> ..
+ qed
+qed
+
+text {*
+ \noindent These examples illustrate both classical reasoning and
+ non-trivial propositional proofs in general. All three rules
+ characterize classical logic independently, but the original rule is
+ already the most convenient to use, because it leaves the conclusion
+ unchanged. Note that @{prop "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"} fits again into our
+ format for eliminations, despite the additional twist that the
+ context refers to the main conclusion. So we may write @{thm
+ classical} as the Isar statement ``@{text "\<OBTAINS> \<not> thesis"}''.
+ This also explains nicely how classical reasoning really works:
+ whatever the main @{text thesis} might be, we may always assume its
+ negation!
+*}
+
+end
+
+
+subsection {* Quantifiers \label{sec:framework-ex-quant} *}
+
+text {*
+ Representing quantifiers is easy, thanks to the higher-order nature
+ of the underlying framework. According to the well-known technique
+ introduced by Church \cite{church40}, quantifiers are operators on
+ predicates, which are syntactically represented as @{text "\<lambda>"}-terms
+ of type @{typ "i \<Rightarrow> o"}. Binder notation turns @{text "All (\<lambda>x. B
+ x)"} into @{text "\<forall>x. B x"} etc.
+*}
+
+axiomatization
+ All :: "(i \<Rightarrow> o) \<Rightarrow> o" (binder "\<forall>" 10) where
+ allI [intro]: "(\<And>x. B x) \<Longrightarrow> \<forall>x. B x" and
+ allD [dest]: "(\<forall>x. B x) \<Longrightarrow> B a"
+
+axiomatization
+ Ex :: "(i \<Rightarrow> o) \<Rightarrow> o" (binder "\<exists>" 10) where
+ exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
+ exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
+
+text {*
+ \noindent The statement of @{thm exE} corresponds to ``@{text
+ "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar. In the
+ subsequent example we illustrate quantifier reasoning involving all
+ four rules:
+*}
+
+theorem
+ assumes "\<exists>x. \<forall>y. R x y"
+ shows "\<forall>y. \<exists>x. R x y"
+proof -- {* @{text "\<forall>"} introduction *}
+ obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` .. -- {* @{text "\<exists>"} elimination *}
+ fix y have "R x y" using `\<forall>y. R x y` .. -- {* @{text "\<forall>"} destruction *}
+ then show "\<exists>x. R x y" .. -- {* @{text "\<exists>"} introduction *}
+qed
+
+
+subsection {* Canonical reasoning patterns *}
+
+text {*
+ The main rules of first-order predicate logic from
+ \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
+ can now be summarized as follows, using the native Isar statement
+ format of \secref{sec:framework-stmt}.
+
+ \medskip
+ \begin{tabular}{l}
+ @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
+ @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
+
+ @{text "disjI\<^isub>1: \<ASSUMES> A \<SHOWS> A \<or> B"} \\
+ @{text "disjI\<^isub>2: \<ASSUMES> B \<SHOWS> A \<or> B"} \\
+ @{text "disjE: \<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"} \\[1ex]
+
+ @{text "conjI: \<ASSUMES> A \<AND> B \<SHOWS> A \<and> B"} \\
+ @{text "conjE: \<ASSUMES> A \<and> B \<OBTAINS> A \<AND> B"} \\[1ex]
+
+ @{text "falseE: \<ASSUMES> \<bottom> \<SHOWS> A"} \\
+ @{text "trueI: \<SHOWS> \<top>"} \\[1ex]
+
+ @{text "notI: \<ASSUMES> A \<Longrightarrow> \<bottom> \<SHOWS> \<not> A"} \\
+ @{text "notE: \<ASSUMES> \<not> A \<AND> A \<SHOWS> B"} \\[1ex]
+
+ @{text "allI: \<ASSUMES> \<And>x. B x \<SHOWS> \<forall>x. B x"} \\
+ @{text "allE: \<ASSUMES> \<forall>x. B x \<SHOWS> B a"} \\[1ex]
+
+ @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
+ @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
+ \end{tabular}
+ \medskip
+
+ \noindent This essentially provides a declarative reading of Pure
+ rules as Isar reasoning patterns: the rule statements tells how a
+ canonical proof outline shall look like. Since the above rules have
+ already been declared as @{attribute (Pure) intro}, @{attribute
+ (Pure) elim}, @{attribute (Pure) dest} --- each according to its
+ particular shape --- we can immediately write Isar proof texts as
+ follows:
+*}
+
+(*<*)
+theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
+proof -
+(*>*)
+
+ txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "A \<longrightarrow> B"
+ proof
+ assume A
+ show B sorry %noproof
+ qed
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "A \<longrightarrow> B" and A sorry %noproof
+ then have B ..
+
+ txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have A sorry %noproof
+ then have "A \<or> B" ..
+
+ have B sorry %noproof
+ then have "A \<or> B" ..
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "A \<or> B" sorry %noproof
+ then have C
+ proof
+ assume A
+ then show C sorry %noproof
+ next
+ assume B
+ then show C sorry %noproof
+ qed
+
+ txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have A and B sorry %noproof
+ then have "A \<and> B" ..
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "A \<and> B" sorry %noproof
+ then obtain A and B ..
+
+ txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<bottom>" sorry %noproof
+ then have A ..
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<top>" ..
+
+ txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<not> A"
+ proof
+ assume A
+ then show "\<bottom>" sorry %noproof
+ qed
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<not> A" and A sorry %noproof
+ then have B ..
+
+ txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<forall>x. B x"
+ proof
+ fix x
+ show "B x" sorry %noproof
+ qed
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<forall>x. B x" sorry %noproof
+ then have "B a" ..
+
+ txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<exists>x. B x"
+ proof
+ show "B a" sorry %noproof
+ qed
+
+ txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
+
+ have "\<exists>x. B x" sorry %noproof
+ then obtain a where "B a" ..
+
+ txt_raw {*\end{minipage}*}
+
+(*<*)
+qed
+(*>*)
+
+text {*
+ \bigskip\noindent Of course, these proofs are merely examples. As
+ sketched in \secref{sec:framework-subproof}, there is a fair amount
+ of flexibility in expressing Pure deductions in Isar. Here the user
+ is asked to express himself adequately, aiming at proof texts of
+ literary quality.
+*}
+
+end %visible
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Thy/Framework.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,1017 @@
+theory Framework
+imports Main
+begin
+
+chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
+
+text {*
+ Isabelle/Isar
+ \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
+ is intended as a generic framework for developing formal
+ mathematical documents with full proof checking. Definitions and
+ proofs are organized as theories. An assembly of theory sources may
+ be presented as a printed document; see also
+ \chref{ch:document-prep}.
+
+ The main objective of Isar is the design of a human-readable
+ structured proof language, which is called the ``primary proof
+ format'' in Isar terminology. Such a primary proof language is
+ somewhere in the middle between the extremes of primitive proof
+ objects and actual natural language. In this respect, Isar is a bit
+ more formalistic than Mizar
+ \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
+ using logical symbols for certain reasoning schemes where Mizar
+ would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
+ further comparisons of these systems.
+
+ So Isar challenges the traditional way of recording informal proofs
+ in mathematical prose, as well as the common tendency to see fully
+ formal proofs directly as objects of some logical calculus (e.g.\
+ @{text "\<lambda>"}-terms in a version of type theory). In fact, Isar is
+ better understood as an interpreter of a simple block-structured
+ language for describing the data flow of local facts and goals,
+ interspersed with occasional invocations of proof methods.
+ Everything is reduced to logical inferences internally, but these
+ steps are somewhat marginal compared to the overall bookkeeping of
+ the interpretation process. Thanks to careful design of the syntax
+ and semantics of Isar language elements, a formal record of Isar
+ instructions may later appear as an intelligible text to the
+ attentive reader.
+
+ The Isar proof language has emerged from careful analysis of some
+ inherent virtues of the existing logical framework of Isabelle/Pure
+ \cite{paulson-found,paulson700}, notably composition of higher-order
+ natural deduction rules, which is a generalization of Gentzen's
+ original calculus \cite{Gentzen:1935}. The approach of generic
+ inference systems in Pure is continued by Isar towards actual proof
+ texts.
+
+ Concrete applications require another intermediate layer: an
+ object-logic. Isabelle/HOL \cite{isa-tutorial} (simply-typed
+ set-theory) is being used most of the time; Isabelle/ZF
+ \cite{isabelle-ZF} is less extensively developed, although it would
+ probably fit better for classical mathematics.
+
+ \medskip In order to illustrate natural deduction in Isar, we shall
+ refer to the background theory and library of Isabelle/HOL. This
+ includes common notions of predicate logic, naive set-theory etc.\
+ using fairly standard mathematical notation. From the perspective
+ of generic natural deduction there is nothing special about the
+ logical connectives of HOL (@{text "\<and>"}, @{text "\<or>"}, @{text "\<forall>"},
+ @{text "\<exists>"}, etc.), only the resulting reasoning principles are
+ relevant to the user. There are similar rules available for
+ set-theory operators (@{text "\<inter>"}, @{text "\<union>"}, @{text "\<Inter>"}, @{text
+ "\<Union>"}, etc.), or any other theory developed in the library (lattice
+ theory, topology etc.).
+
+ Subsequently we briefly review fragments of Isar proof texts
+ corresponding directly to such general deduction schemes. The
+ examples shall refer to set-theory, to minimize the danger of
+ understanding connectives of predicate logic as something special.
+
+ \medskip The following deduction performs @{text "\<inter>"}-introduction,
+ working forwards from assumptions towards the conclusion. We give
+ both the Isar text, and depict the primitive rule involved, as
+ determined by unification of the problem against rules that are
+ declared in the library context.
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ assume "x \<in> A" and "x \<in> B"
+ then have "x \<in> A \<inter> B" ..
+(*<*)
+qed
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+ \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+ \medskip\noindent Note that @{command assume} augments the proof
+ context, @{command then} indicates that the current fact shall be
+ used in the next step, and @{command have} states an intermediate
+ goal. The two dots ``@{command ".."}'' refer to a complete proof of
+ this claim, using the indicated facts and a canonical rule from the
+ context. We could have been more explicit here by spelling out the
+ final proof step via the @{command "by"} command:
+*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ assume "x \<in> A" and "x \<in> B"
+ then have "x \<in> A \<inter> B" by (rule IntI)
+(*<*)
+qed
+(*>*)
+
+text {*
+ \noindent The format of the @{text "\<inter>"}-introduction rule represents
+ the most basic inference, which proceeds from given premises to a
+ conclusion, without any nested proof context involved.
+
+ The next example performs backwards introduction on @{term "\<Inter>\<A>"},
+ the intersection of all sets within a given set. This requires a
+ nested proof of set membership within a local context, where @{term
+ A} is an arbitrary-but-fixed member of the collection:
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ have "x \<in> \<Inter>\<A>"
+ proof
+ fix A
+ assume "A \<in> \<A>"
+ show "x \<in> A" sorry %noproof
+ qed
+(*<*)
+qed
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+ \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+ \medskip\noindent This Isar reasoning pattern again refers to the
+ primitive rule depicted above. The system determines it in the
+ ``@{command proof}'' step, which could have been spelt out more
+ explicitly as ``@{command proof}~@{text "(rule InterI)"}''. Note
+ that the rule involves both a local parameter @{term "A"} and an
+ assumption @{prop "A \<in> \<A>"} in the nested reasoning. This kind of
+ compound rule typically demands a genuine sub-proof in Isar, working
+ backwards rather than forwards as seen before. In the proof body we
+ encounter the @{command fix}-@{command assume}-@{command show}
+ outline of nested sub-proofs that is typical for Isar. The final
+ @{command show} is like @{command have} followed by an additional
+ refinement of the enclosing claim, using the rule derived from the
+ proof body.
+
+ \medskip The next example involves @{term "\<Union>\<A>"}, which can be
+ characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
+ \<in> A \<and> A \<in> \<A>"}. The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
+ not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
+ directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
+ \<in> \<A>"} hold. This corresponds to the following Isar proof and
+ inference rule, respectively:
+*}
+
+text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ assume "x \<in> \<Union>\<A>"
+ then have C
+ proof
+ fix A
+ assume "x \<in> A" and "A \<in> \<A>"
+ show C sorry %noproof
+ qed
+(*<*)
+qed
+(*>*)
+
+text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
+
+text {*
+ \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
+*}
+
+text_raw {*\end{minipage}*}
+
+text {*
+ \medskip\noindent Although the Isar proof follows the natural
+ deduction rule closely, the text reads not as natural as
+ anticipated. There is a double occurrence of an arbitrary
+ conclusion @{prop "C"}, which represents the final result, but is
+ irrelevant for now. This issue arises for any elimination rule
+ involving local parameters. Isar provides the derived language
+ element @{command obtain}, which is able to perform the same
+ elimination proof more conveniently:
+*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ assume "x \<in> \<Union>\<A>"
+ then obtain A where "x \<in> A" and "A \<in> \<A>" ..
+(*<*)
+qed
+(*>*)
+
+text {*
+ \noindent Here we avoid to mention the final conclusion @{prop "C"}
+ and return to plain forward reasoning. The rule involved in the
+ ``@{command ".."}'' proof is the same as before.
+*}
+
+
+section {* The Pure framework \label{sec:framework-pure} *}
+
+text {*
+ The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
+ fragment of higher-order logic \cite{church40}. In type-theoretic
+ parlance, there are three levels of @{text "\<lambda>"}-calculus with
+ corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
+
+ \medskip
+ \begin{tabular}{ll}
+ @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
+ @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
+ @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
+ \end{tabular}
+ \medskip
+
+ \noindent Here only the types of syntactic terms, and the
+ propositions of proof terms have been shown. The @{text
+ "\<lambda>"}-structure of proofs can be recorded as an optional feature of
+ the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
+ the formal system can never depend on them due to \emph{proof
+ irrelevance}.
+
+ On top of this most primitive layer of proofs, Pure implements a
+ generic calculus for nested natural deduction rules, similar to
+ \cite{Schroeder-Heister:1984}. Here object-logic inferences are
+ internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
+ Combining such rule statements may involve higher-order unification
+ \cite{paulson-natural}.
+*}
+
+
+subsection {* Primitive inferences *}
+
+text {*
+ Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
+ \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
+ implicit thanks to type-inference; terms of type @{text "prop"} are
+ called propositions. Logical statements are composed via @{text "\<And>x
+ :: \<alpha>. B(x)"} and @{text "A \<Longrightarrow> B"}. Primitive reasoning operates on
+ judgments of the form @{text "\<Gamma> \<turnstile> \<phi>"}, with standard introduction
+ and elimination rules for @{text "\<And>"} and @{text "\<Longrightarrow>"} that refer to
+ fixed parameters @{text "x\<^isub>1, \<dots>, x\<^isub>m"} and hypotheses
+ @{text "A\<^isub>1, \<dots>, A\<^isub>n"} from the context @{text "\<Gamma>"};
+ the corresponding proof terms are left implicit. The subsequent
+ inference rules define @{text "\<Gamma> \<turnstile> \<phi>"} inductively, relative to a
+ collection of axioms:
+
+ \[
+ \infer{@{text "\<turnstile> A"}}{(@{text "A"} \text{~axiom})}
+ \qquad
+ \infer{@{text "A \<turnstile> A"}}{}
+ \]
+
+ \[
+ \infer{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}{@{text "\<Gamma> \<turnstile> B(x)"} & @{text "x \<notin> \<Gamma>"}}
+ \qquad
+ \infer{@{text "\<Gamma> \<turnstile> B(a)"}}{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}
+ \]
+
+ \[
+ \infer{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
+ \qquad
+ \infer{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
+ \]
+
+ Furthermore, Pure provides a built-in equality @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow>
+ prop"} with axioms for reflexivity, substitution, extensionality,
+ and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
+
+ \medskip An object-logic introduces another layer on top of Pure,
+ e.g.\ with types @{text "i"} for individuals and @{text "o"} for
+ propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
+ (implicit) derivability judgment and connectives like @{text "\<and> :: o
+ \<Rightarrow> o \<Rightarrow> o"} or @{text "\<forall> :: (i \<Rightarrow> o) \<Rightarrow> o"}, and axioms for object-level
+ rules such as @{text "conjI: A \<Longrightarrow> B \<Longrightarrow> A \<and> B"} or @{text "allI: (\<And>x. B
+ x) \<Longrightarrow> \<forall>x. B x"}. Derived object rules are represented as theorems of
+ Pure. After the initial object-logic setup, further axiomatizations
+ are usually avoided; plain definitions and derived principles are
+ used exclusively.
+*}
+
+
+subsection {* Reasoning with rules \label{sec:framework-resolution} *}
+
+text {*
+ Primitive inferences mostly serve foundational purposes. The main
+ reasoning mechanisms of Pure operate on nested natural deduction
+ rules expressed as formulae, using @{text "\<And>"} to bind local
+ parameters and @{text "\<Longrightarrow>"} to express entailment. Multiple
+ parameters and premises are represented by repeating these
+ connectives in a right-associative manner.
+
+ Since @{text "\<And>"} and @{text "\<Longrightarrow>"} commute thanks to the theorem
+ @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}, we may assume w.l.o.g.\
+ that rule statements always observe the normal form where
+ quantifiers are pulled in front of implications at each level of
+ nesting. This means that any Pure proposition may be presented as a
+ \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
+ form @{text "\<And>x\<^isub>1 \<dots> x\<^isub>m. H\<^isub>1 \<Longrightarrow> \<dots> H\<^isub>n \<Longrightarrow>
+ A"} for @{text "m, n \<ge> 0"}, and @{text "A"} atomic, and @{text
+ "H\<^isub>1, \<dots>, H\<^isub>n"} being recursively of the same format.
+ Following the convention that outermost quantifiers are implicit,
+ Horn clauses @{text "A\<^isub>1 \<Longrightarrow> \<dots> A\<^isub>n \<Longrightarrow> A"} are a special
+ case of this.
+
+ For example, @{text "\<inter>"}-introduction rule encountered before is
+ represented as a Pure theorem as follows:
+ \[
+ @{text "IntI:"}~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
+ \]
+
+ \noindent This is a plain Horn clause, since no further nesting on
+ the left is involved. The general @{text "\<Inter>"}-introduction
+ corresponds to a Hereditary Harrop Formula with one additional level
+ of nesting:
+ \[
+ @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
+ \]
+
+ \medskip Goals are also represented as rules: @{text "A\<^isub>1 \<Longrightarrow>
+ \<dots> A\<^isub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^isub>1, \<dots>,
+ A\<^isub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
+ goal is finished. To allow @{text "C"} being a rule statement
+ itself, we introduce the protective marker @{text "# :: prop \<Rightarrow>
+ prop"}, which is defined as identity and hidden from the user. We
+ initialize and finish goal states as follows:
+
+ \[
+ \begin{array}{c@ {\qquad}c}
+ \infer[(@{inference_def init})]{@{text "C \<Longrightarrow> #C"}}{} &
+ \infer[(@{inference_def finish})]{@{text C}}{@{text "#C"}}
+ \end{array}
+ \]
+
+ \noindent Goal states are refined in intermediate proof steps until
+ a finished form is achieved. Here the two main reasoning principles
+ are @{inference resolution}, for back-chaining a rule against a
+ sub-goal (replacing it by zero or more sub-goals), and @{inference
+ assumption}, for solving a sub-goal (finding a short-circuit with
+ local assumptions). Below @{text "\<^vec>x"} stands for @{text
+ "x\<^isub>1, \<dots>, x\<^isub>n"} (@{text "n \<ge> 0"}).
+
+ \[
+ \infer[(@{inference_def resolution})]
+ {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+ {\begin{tabular}{rl}
+ @{text "rule:"} &
+ @{text "\<^vec>A \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
+ @{text "goal:"} &
+ @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+ @{text "goal unifier:"} &
+ @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+ \end{tabular}}
+ \]
+
+ \medskip
+
+ \[
+ \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
+ {\begin{tabular}{rl}
+ @{text "goal:"} &
+ @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} \\
+ @{text "assm unifier:"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text "H\<^sub>i"})} \\
+ \end{tabular}}
+ \]
+
+ The following trace illustrates goal-oriented reasoning in
+ Isabelle/Pure:
+
+ {\footnotesize
+ \medskip
+ \begin{tabular}{r@ {\quad}l}
+ @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
+ @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
+ @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> B)"} \\
+ @{text "(A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(assumption)"} \\
+ @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> A)"} \\
+ @{text "#\<dots>"} & @{text "(assumption)"} \\
+ @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
+ \end{tabular}
+ \medskip
+ }
+
+ Compositions of @{inference assumption} after @{inference
+ resolution} occurs quite often, typically in elimination steps.
+ Traditional Isabelle tactics accommodate this by a combined
+ @{inference_def elim_resolution} principle. In contrast, Isar uses
+ a slightly more refined combination, where the assumptions to be
+ closed are marked explicitly, using again the protective marker
+ @{text "#"}:
+
+ \[
+ \infer[(@{inference refinement})]
+ {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>G' (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
+ {\begin{tabular}{rl}
+ @{text "sub\<dash>proof:"} &
+ @{text "\<^vec>G \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
+ @{text "goal:"} &
+ @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
+ @{text "goal unifier:"} &
+ @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
+ @{text "assm unifiers:"} &
+ @{text "(\<lambda>\<^vec>x. G\<^sub>j (\<^vec>a \<^vec>x))\<vartheta> = #H\<^sub>i\<vartheta>"} \\
+ & \quad (for each marked @{text "G\<^sub>j"} some @{text "#H\<^sub>i"}) \\
+ \end{tabular}}
+ \]
+
+ \noindent Here the @{text "sub\<dash>proof"} rule stems from the
+ main @{command fix}-@{command assume}-@{command show} outline of
+ Isar (cf.\ \secref{sec:framework-subproof}): each assumption
+ indicated in the text results in a marked premise @{text "G"} above.
+ The marking enforces resolution against one of the sub-goal's
+ premises. Consequently, @{command fix}-@{command assume}-@{command
+ show} enables to fit the result of a sub-proof quite robustly into a
+ pending sub-goal, while maintaining a good measure of flexibility.
+*}
+
+
+section {* The Isar proof language \label{sec:framework-isar} *}
+
+text {*
+ Structured proofs are presented as high-level expressions for
+ composing entities of Pure (propositions, facts, and goals). The
+ Isar proof language allows to organize reasoning within the
+ underlying rule calculus of Pure, but Isar is not another logical
+ calculus!
+
+ Isar is an exercise in sound minimalism. Approximately half of the
+ language is introduced as primitive, the rest defined as derived
+ concepts. The following grammar describes the core language
+ (category @{text "proof"}), which is embedded into theory
+ specification elements such as @{command theorem}; see also
+ \secref{sec:framework-stmt} for the separate category @{text
+ "statement"}.
+
+ \medskip
+ \begin{tabular}{rcl}
+ @{text "theory\<dash>stmt"} & = & @{command "theorem"}~@{text "statement proof |"}~~@{command "definition"}~@{text "\<dots> | \<dots>"} \\[1ex]
+
+ @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\[1ex]
+
+ @{text prfx} & = & @{command "using"}~@{text "facts"} \\
+ & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
+
+ @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
+ & @{text "|"} & @{command "next"} \\
+ & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
+ & @{text "|"} & @{command "let"}~@{text "term = term"} \\
+ & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
+ & @{text "|"} & @{command assume}~@{text "\<guillemotleft>inference\<guillemotright> name: props"} \\
+ & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
+ @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
+ & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
+ \end{tabular}
+
+ \medskip Simultaneous propositions or facts may be separated by the
+ @{keyword "and"} keyword.
+
+ \medskip The syntax for terms and propositions is inherited from
+ Pure (and the object-logic). A @{text "pattern"} is a @{text
+ "term"} with schematic variables, to be bound by higher-order
+ matching.
+
+ \medskip Facts may be referenced by name or proposition. For
+ example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
+ becomes available both as @{text "a"} and
+ \isacharbackquoteopen@{text "A"}\isacharbackquoteclose. Moreover,
+ fact expressions may involve attributes that modify either the
+ theorem or the background context. For example, the expression
+ ``@{text "a [OF b]"}'' refers to the composition of two facts
+ according to the @{inference resolution} inference of
+ \secref{sec:framework-resolution}, while ``@{text "a [intro]"}''
+ declares a fact as introduction rule in the context.
+
+ The special fact called ``@{fact this}'' always refers to the last
+ result, as produced by @{command note}, @{command assume}, @{command
+ have}, or @{command show}. Since @{command note} occurs
+ frequently together with @{command then} we provide some
+ abbreviations:
+
+ \medskip
+ \begin{tabular}{rcl}
+ @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
+ @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
+ \end{tabular}
+ \medskip
+
+ The @{text "method"} category is essentially a parameter and may be
+ populated later. Methods use the facts indicated by @{command
+ "then"} or @{command using}, and then operate on the goal state.
+ Some basic methods are predefined: ``@{method "-"}'' leaves the goal
+ unchanged, ``@{method this}'' applies the facts as rules to the
+ goal, ``@{method "rule"}'' applies the facts to another rule and the
+ result to the goal (both ``@{method this}'' and ``@{method rule}''
+ refer to @{inference resolution} of
+ \secref{sec:framework-resolution}). The secondary arguments to
+ ``@{method rule}'' may be specified explicitly as in ``@{text "(rule
+ a)"}'', or picked from the context. In the latter case, the system
+ first tries rules declared as @{attribute (Pure) elim} or
+ @{attribute (Pure) dest}, followed by those declared as @{attribute
+ (Pure) intro}.
+
+ The default method for @{command proof} is ``@{method rule}''
+ (arguments picked from the context), for @{command qed} it is
+ ``@{method "-"}''. Further abbreviations for terminal proof steps
+ are ``@{command "by"}~@{text "method\<^sub>1 method\<^sub>2"}'' for
+ ``@{command proof}~@{text "method\<^sub>1"}~@{command qed}~@{text
+ "method\<^sub>2"}'', and ``@{command ".."}'' for ``@{command
+ "by"}~@{method rule}, and ``@{command "."}'' for ``@{command
+ "by"}~@{method this}''. The @{command unfolding} element operates
+ directly on the current facts and goal by applying equalities.
+
+ \medskip Block structure can be indicated explicitly by ``@{command
+ "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
+ already involves implicit nesting. In any case, @{command next}
+ jumps into the next section of a block, i.e.\ it acts like closing
+ an implicit block scope and opening another one; there is no direct
+ correspondence to subgoals here.
+
+ The remaining elements @{command fix} and @{command assume} build up
+ a local context (see \secref{sec:framework-context}), while
+ @{command show} refines a pending sub-goal by the rule resulting
+ from a nested sub-proof (see \secref{sec:framework-subproof}).
+ Further derived concepts will support calculational reasoning (see
+ \secref{sec:framework-calc}).
+*}
+
+
+subsection {* Context elements \label{sec:framework-context} *}
+
+text {*
+ In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
+ essentially acts like a proof context. Isar elaborates this idea
+ towards a higher-level notion, with additional information for
+ type-inference, term abbreviations, local facts, hypotheses etc.
+
+ The element @{command fix}~@{text "x :: \<alpha>"} declares a local
+ parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
+ results exported from the context, @{text "x"} may become anything.
+ The @{command assume}~@{text "\<guillemotleft>inference\<guillemotright>"} element provides a
+ general interface to hypotheses: ``@{command assume}~@{text
+ "\<guillemotleft>inference\<guillemotright> A"}'' produces @{text "A \<turnstile> A"} locally, while the
+ included inference tells how to discharge @{text A} from results
+ @{text "A \<turnstile> B"} later on. There is no user-syntax for @{text
+ "\<guillemotleft>inference\<guillemotright>"}, i.e.\ it may only occur internally when derived
+ commands are defined in ML.
+
+ At the user-level, the default inference for @{command assume} is
+ @{inference discharge} as given below. The additional variants
+ @{command presume} and @{command def} are defined as follows:
+
+ \medskip
+ \begin{tabular}{rcl}
+ @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<dash>discharge\<guillemotright> A"} \\
+ @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
+ \end{tabular}
+ \medskip
+
+ \[
+ \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
+ \]
+ \[
+ \infer[(@{inference_def "weak\<dash>discharge"})]{@{text "\<strut>\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
+ \]
+ \[
+ \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
+ \]
+
+ \medskip Note that @{inference discharge} and @{inference
+ "weak\<dash>discharge"} differ in the marker for @{prop A}, which is
+ relevant when the result of a @{command fix}-@{command
+ assume}-@{command show} outline is composed with a pending goal,
+ cf.\ \secref{sec:framework-subproof}.
+
+ The most interesting derived context element in Isar is @{command
+ obtain} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
+ elimination steps in a purely forward manner. The @{command obtain}
+ command takes a specification of parameters @{text "\<^vec>x"} and
+ assumptions @{text "\<^vec>A"} to be added to the context, together
+ with a proof of a case rule stating that this extension is
+ conservative (i.e.\ may be removed from closed results later on):
+
+ \medskip
+ \begin{tabular}{l}
+ @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
+ \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
+ \quad @{command proof}~@{method "-"} \\
+ \qquad @{command fix}~@{text thesis} \\
+ \qquad @{command assume}~@{text "[intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis"} \\
+ \qquad @{command show}~@{text thesis}~@{command using}~@{text "\<langle>facts\<rangle> \<langle>proof\<rangle>"} \\
+ \quad @{command qed} \\
+ \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
+ \end{tabular}
+ \medskip
+
+ \[
+ \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
+ \begin{tabular}{rl}
+ @{text "case:"} &
+ @{text "\<Gamma> \<turnstile> \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\[0.2ex]
+ @{text "result:"} &
+ @{text "\<Gamma> \<union> \<^vec>A \<^vec>y \<turnstile> B"} \\[0.2ex]
+ \end{tabular}}
+ \]
+
+ \noindent Here the name ``@{text thesis}'' is a specific convention
+ for an arbitrary-but-fixed proposition; in the primitive natural
+ deduction rules shown before we have occasionally used @{text C}.
+ The whole statement of ``@{command obtain}~@{text x}~@{keyword
+ "where"}~@{text "A x"}'' may be read as a claim that @{text "A x"}
+ may be assumed for some arbitrary-but-fixed @{text "x"}. Also note
+ that ``@{command obtain}~@{text "A \<AND> B"}'' without parameters
+ is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
+ latter involves multiple sub-goals.
+
+ \medskip The subsequent Isar proof texts explain all context
+ elements introduced above using the formal proof language itself.
+ After finishing a local proof within a block, we indicate the
+ exported result via @{command note}.
+*}
+
+(*<*)
+theorem True
+proof
+(*>*)
+ txt_raw {* \begin{minipage}[t]{0.4\textwidth} *}
+ {
+ fix x
+ have "B x" sorry %noproof
+ }
+ note `\<And>x. B x`
+ txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.4\textwidth} *}(*<*)next(*>*)
+ {
+ assume A
+ have B sorry %noproof
+ }
+ note `A \<Longrightarrow> B`
+ txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth} *}(*<*)next(*>*)
+ {
+ def x \<equiv> a
+ have "B x" sorry %noproof
+ }
+ note `B a`
+ txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.4\textwidth} *}(*<*)next(*>*)
+ {
+ obtain x where "A x" sorry %noproof
+ have B sorry %noproof
+ }
+ note `B`
+ txt_raw {* \end{minipage} *}
+(*<*)
+qed
+(*>*)
+
+text {*
+ \bigskip\noindent This illustrates the meaning of Isar context
+ elements without goals getting in between.
+*}
+
+subsection {* Structured statements \label{sec:framework-stmt} *}
+
+text {*
+ The category @{text "statement"} of top-level theorem specifications
+ is defined as follows:
+
+ \medskip
+ \begin{tabular}{rcl}
+ @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
+ & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
+
+ @{text "context"} & @{text "\<equiv>"} & @{text "\<FIXES> vars \<AND> \<dots>"} \\
+ & @{text "|"} & @{text "\<ASSUMES> name: props \<AND> \<dots>"} \\
+
+ @{text "conclusion"} & @{text "\<equiv>"} & @{text "\<SHOWS> name: props \<AND> \<dots>"} \\
+ & @{text "|"} & @{text "\<OBTAINS> vars \<AND> \<dots> \<WHERE> name: props \<AND> \<dots>"} \\
+ & & \quad @{text "\<BBAR> \<dots>"} \\
+ \end{tabular}
+
+ \medskip\noindent A simple @{text "statement"} consists of named
+ propositions. The full form admits local context elements followed
+ by the actual conclusions, such as ``@{keyword "fixes"}~@{text
+ x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
+ x"}''. The final result emerges as a Pure rule after discharging
+ the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
+
+ The @{keyword "obtains"} variant is another abbreviation defined
+ below; unlike @{command obtain} (cf.\
+ \secref{sec:framework-context}) there may be several ``cases''
+ separated by ``@{text "\<BBAR>"}'', each consisting of several
+ parameters (@{text "vars"}) and several premises (@{text "props"}).
+ This specifies multi-branch elimination rules.
+
+ \medskip
+ \begin{tabular}{l}
+ @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x \<BBAR> \<dots> \<equiv>"} \\[0.5ex]
+ \quad @{text "\<FIXES> thesis"} \\
+ \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis \<AND> \<dots>"} \\
+ \quad @{text "\<SHOWS> thesis"} \\
+ \end{tabular}
+ \medskip
+
+ Presenting structured statements in such an ``open'' format usually
+ simplifies the subsequent proof, because the outer structure of the
+ problem is already laid out directly. E.g.\ consider the following
+ canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
+ respectively:
+*}
+
+text_raw {*\begin{minipage}{0.5\textwidth}*}
+
+theorem
+ fixes x and y
+ assumes "A x" and "B y"
+ shows "C x y"
+proof -
+ from `A x` and `B y`
+ show "C x y" sorry %noproof
+qed
+
+text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+
+theorem
+ obtains x and y
+ where "A x" and "B y"
+proof -
+ have "A a" and "B b" sorry %noproof
+ then show thesis ..
+qed
+
+text_raw {*\end{minipage}*}
+
+text {*
+ \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
+ x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
+ y"}\isacharbackquoteclose\ are referenced immediately; there is no
+ need to decompose the logical rule structure again. In the second
+ proof the final ``@{command then}~@{command show}~@{text
+ thesis}~@{command ".."}'' involves the local rule case @{text "\<And>x
+ y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
+ "a"} and @{text "b"} produced in the body.
+*}
+
+
+subsection {* Structured proof refinement \label{sec:framework-subproof} *}
+
+text {*
+ By breaking up the grammar for the Isar proof language, we may
+ understand a proof text as a linear sequence of individual proof
+ commands. These are interpreted as transitions of the Isar virtual
+ machine (Isar/VM), which operates on a block-structured
+ configuration in single steps. This allows users to write proof
+ texts in an incremental manner, and inspect intermediate
+ configurations for debugging.
+
+ The basic idea is analogous to evaluating algebraic expressions on a
+ stack machine: @{text "(a + b) \<cdot> c"} then corresponds to a sequence
+ of single transitions for each symbol @{text "(, a, +, b, ), \<cdot>, c"}.
+ In Isar the algebraic values are facts or goals, and the operations
+ are inferences.
+
+ \medskip The Isar/VM state maintains a stack of nodes, each node
+ contains the local proof context, the linguistic mode, and a pending
+ goal (optional). The mode determines the type of transition that
+ may be performed next, it essentially alternates between forward and
+ backward reasoning, with an intermediate stage for chained facts
+ (see \figref{fig:isar-vm}).
+
+ \begin{figure}[htb]
+ \begin{center}
+ \includegraphics[width=0.8\textwidth]{Thy/document/isar-vm}
+ \end{center}
+ \caption{Isar/VM modes}\label{fig:isar-vm}
+ \end{figure}
+
+ For example, in @{text "state"} mode Isar acts like a mathematical
+ scratch-pad, accepting declarations like @{command fix}, @{command
+ assume}, and claims like @{command have}, @{command show}. A goal
+ statement changes the mode to @{text "prove"}, which means that we
+ may now refine the problem via @{command unfolding} or @{command
+ proof}. Then we are again in @{text "state"} mode of a proof body,
+ which may issue @{command show} statements to solve pending
+ sub-goals. A concluding @{command qed} will return to the original
+ @{text "state"} mode one level upwards. The subsequent Isar/VM
+ trace indicates block structure, linguistic mode, goal state, and
+ inferences:
+*}
+
+text_raw {* \begingroup\footnotesize *}
+(*<*)lemma True
+proof
+(*>*)
+ txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
+ have "A \<longrightarrow> B"
+ proof
+ assume A
+ show B
+ sorry %noproof
+ qed
+ txt_raw {* \end{minipage}\quad
+\begin{minipage}[t]{0.06\textwidth}
+@{text "begin"} \\
+\\
+\\
+@{text "begin"} \\
+@{text "end"} \\
+@{text "end"} \\
+\end{minipage}
+\begin{minipage}[t]{0.08\textwidth}
+@{text "prove"} \\
+@{text "state"} \\
+@{text "state"} \\
+@{text "prove"} \\
+@{text "state"} \\
+@{text "state"} \\
+\end{minipage}\begin{minipage}[t]{0.35\textwidth}
+@{text "(A \<longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
+@{text "(A \<Longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
+\\
+\\
+@{text "#(A \<longrightarrow> B)"} \\
+@{text "A \<longrightarrow> B"} \\
+\end{minipage}\begin{minipage}[t]{0.4\textwidth}
+@{text "(init)"} \\
+@{text "(resolution impI)"} \\
+\\
+\\
+@{text "(refinement #A \<Longrightarrow> B)"} \\
+@{text "(finish)"} \\
+\end{minipage} *}
+(*<*)
+qed
+(*>*)
+text_raw {* \endgroup *}
+
+text {*
+ \noindent Here the @{inference refinement} inference from
+ \secref{sec:framework-resolution} mediates composition of Isar
+ sub-proofs nicely. Observe that this principle incorporates some
+ degree of freedom in proof composition. In particular, the proof
+ body allows parameters and assumptions to be re-ordered, or commuted
+ according to Hereditary Harrop Form. Moreover, context elements
+ that are not used in a sub-proof may be omitted altogether. For
+ example:
+*}
+
+text_raw {*\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+ proof -
+ fix x and y
+ assume "A x" and "B y"
+ show "C x y" sorry %noproof
+ qed
+
+txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+next
+(*>*)
+ have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+ proof -
+ fix x assume "A x"
+ fix y assume "B y"
+ show "C x y" sorry %noproof
+ qed
+
+txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
+
+(*<*)
+next
+(*>*)
+ have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+ proof -
+ fix y assume "B y"
+ fix x assume "A x"
+ show "C x y" sorry
+ qed
+
+txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
+(*<*)
+next
+(*>*)
+ have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
+ proof -
+ fix y assume "B y"
+ fix x
+ show "C x y" sorry
+ qed
+(*<*)
+qed
+(*>*)
+
+text_raw {*\end{minipage}*}
+
+text {*
+ \medskip\noindent Such ``peephole optimizations'' of Isar texts are
+ practically important to improve readability, by rearranging
+ contexts elements according to the natural flow of reasoning in the
+ body, while still observing the overall scoping rules.
+
+ \medskip This illustrates the basic idea of structured proof
+ processing in Isar. The main mechanisms are based on natural
+ deduction rule composition within the Pure framework. In
+ particular, there are no direct operations on goal states within the
+ proof body. Moreover, there is no hidden automated reasoning
+ involved, just plain unification.
+*}
+
+
+subsection {* Calculational reasoning \label{sec:framework-calc} *}
+
+text {*
+ The existing Isar infrastructure is sufficiently flexible to support
+ calculational reasoning (chains of transitivity steps) as derived
+ concept. The generic proof elements introduced below depend on
+ rules declared as @{attribute trans} in the context. It is left to
+ the object-logic to provide a suitable rule collection for mixed
+ relations of @{text "="}, @{text "<"}, @{text "\<le>"}, @{text "\<subset>"},
+ @{text "\<subseteq>"} etc. Due to the flexibility of rule composition
+ (\secref{sec:framework-resolution}), substitution of equals by
+ equals is covered as well, even substitution of inequalities
+ involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
+ and \cite{Bauer-Wenzel:2001}.
+
+ The generic calculational mechanism is based on the observation that
+ rules such as @{text "trans:"}~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"}
+ proceed from the premises towards the conclusion in a deterministic
+ fashion. Thus we may reason in forward mode, feeding intermediate
+ results into rules selected from the context. The course of
+ reasoning is organized by maintaining a secondary fact called
+ ``@{fact calculation}'', apart from the primary ``@{fact this}''
+ already provided by the Isar primitives. In the definitions below,
+ @{attribute OF} refers to @{inference resolution}
+ (\secref{sec:framework-resolution}) with multiple rule arguments,
+ and @{text "trans"} represents to a suitable rule from the context:
+
+ \begin{matharray}{rcl}
+ @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
+ @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
+ @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\
+ \end{matharray}
+
+ \noindent The start of a calculation is determined implicitly in the
+ text: here @{command also} sets @{fact calculation} to the current
+ result; any subsequent occurrence will update @{fact calculation} by
+ combination with the next result and a transitivity rule. The
+ calculational sequence is concluded via @{command finally}, where
+ the final result is exposed for use in a concluding claim.
+
+ Here is a canonical proof pattern, using @{command have} to
+ establish the intermediate results:
+*}
+
+(*<*)
+lemma True
+proof
+(*>*)
+ have "a = b" sorry
+ also have "\<dots> = c" sorry
+ also have "\<dots> = d" sorry
+ finally have "a = d" .
+(*<*)
+qed
+(*>*)
+
+text {*
+ \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
+ provided by the Isabelle/Isar syntax layer: it statically refers to
+ the right-hand side argument of the previous statement given in the
+ text. Thus it happens to coincide with relevant sub-expressions in
+ the calculational chain, but the exact correspondence is dependent
+ on the transitivity rules being involved.
+
+ \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
+ transitivities with only one premise. Isar maintains a separate
+ rule collection declared via the @{attribute sym} attribute, to be
+ used in fact expressions ``@{text "a [symmetric]"}'', or single-step
+ proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
+ have}~@{text "y = x"}~@{command ".."}''.
+*}
+
+end
\ No newline at end of file
--- a/doc-src/IsarRef/Thy/Inner_Syntax.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Inner_Syntax.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Inner_Syntax
imports Main
begin
@@ -370,7 +368,7 @@
\end{matharray}
\begin{rail}
- ('notation' | 'no\_notation') target? mode? (nameref structmixfix + 'and')
+ ('notation' | 'no\_notation') target? mode? \\ (nameref structmixfix + 'and')
;
\end{rail}
@@ -525,13 +523,15 @@
& @{text "|"} & @{text "tid | tvar | "}@{verbatim "_"} \\
& @{text "|"} & @{text "tid"} @{verbatim "::"} @{text "sort | tvar "}@{verbatim "::"} @{text "sort | "}@{verbatim "_"} @{verbatim "::"} @{text "sort"} \\
& @{text "|"} & @{text "id | type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) id | "}@{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text id} \\
- & @{text "|"} & @{text "longid | type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) longid | "}@{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text longid} \\
+ & @{text "|"} & @{text "longid | type\<^sup>(\<^sup>1\<^sup>0\<^sup>0\<^sup>0\<^sup>) longid"} \\
+ & @{text "|"} & @{verbatim "("} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim ")"} @{text longid} \\
& @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
& @{text "|"} & @{text "type\<^sup>(\<^sup>1\<^sup>)"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\
& @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{verbatim "=>"} @{text type} & @{text "(0)"} \\
& @{text "|"} & @{verbatim "["} @{text type} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text type} @{verbatim "]"} @{text "\<Rightarrow>"} @{text type} & @{text "(0)"} \\\\
- @{syntax_def (inner) sort} & = & @{text "id | longid | "}@{verbatim "{}"}@{text " | "}@{verbatim "{"} @{text "(id | longid)"} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text "(id | longid)"} @{verbatim "}"} \\
+ @{syntax_def (inner) sort} & = & @{text "id | longid | "}@{verbatim "{}"} \\
+ & @{text "|"} & @{verbatim "{"} @{text "(id | longid)"} @{verbatim ","} @{text "\<dots>"} @{verbatim ","} @{text "(id | longid)"} @{verbatim "}"} \\
\end{supertabular}
\end{center}
--- a/doc-src/IsarRef/Thy/Introduction.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Introduction.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,3 @@
-(* $Id$ *)
-
theory Introduction
imports Main
begin
@@ -12,27 +10,27 @@
The \emph{Isabelle} system essentially provides a generic
infrastructure for building deductive systems (programmed in
Standard ML), with a special focus on interactive theorem proving in
- higher-order logics. In the olden days even end-users would refer
- to certain ML functions (goal commands, tactics, tacticals etc.) to
- pursue their everyday theorem proving tasks
- \cite{isabelle-intro,isabelle-ref}.
+ higher-order logics. Many years ago, even end-users would refer to
+ certain ML functions (goal commands, tactics, tacticals etc.) to
+ pursue their everyday theorem proving tasks.
In contrast \emph{Isar} provides an interpreted language environment
of its own, which has been specifically tailored for the needs of
theory and proof development. Compared to raw ML, the Isabelle/Isar
top-level provides a more robust and comfortable development
- platform, with proper support for theory development graphs,
- single-step transactions with unlimited undo, etc. The
- Isabelle/Isar version of the \emph{Proof~General} user interface
- \cite{proofgeneral,Aspinall:TACAS:2000} provides an adequate
- front-end for interactive theory and proof development in this
- advanced theorem proving environment.
+ platform, with proper support for theory development graphs, managed
+ transactions with unlimited undo etc. The Isabelle/Isar version of
+ the \emph{Proof~General} user interface
+ \cite{proofgeneral,Aspinall:TACAS:2000} provides a decent front-end
+ for interactive theory and proof development in this advanced
+ theorem proving environment, even though it is somewhat biased
+ towards old-style proof scripts.
\medskip Apart from the technical advances over bare-bones ML
programming, the main purpose of the Isar language is to provide a
conceptually different view on machine-checked proofs
- \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. ``Isar'' stands for
- ``Intelligible semi-automated reasoning''. Drawing from both the
+ \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. \emph{Isar} stands for
+ \emph{Intelligible semi-automated reasoning}. Drawing from both the
traditions of informal mathematical proof texts and high-level
programming languages, Isar offers a versatile environment for
structured formal proof documents. Thus properly written Isar
@@ -47,12 +45,12 @@
Despite its grand design of structured proof texts, Isar is able to
assimilate the old tactical style as an ``improper'' sub-language.
This provides an easy upgrade path for existing tactic scripts, as
- well as additional means for interactive experimentation and
- debugging of structured proofs. Isabelle/Isar supports a broad
- range of proof styles, both readable and unreadable ones.
+ well as some means for interactive experimentation and debugging of
+ structured proofs. Isabelle/Isar supports a broad range of proof
+ styles, both readable and unreadable ones.
- \medskip The Isabelle/Isar framework \cite{Wenzel:2006:Festschrift}
- is generic and should work reasonably well for any Isabelle
+ \medskip The generic Isabelle/Isar framework (see
+ \chref{ch:isar-framework}) works reasonably well for any Isabelle
object-logic that conforms to the natural deduction view of the
Isabelle/Pure framework. Specific language elements introduced by
the major object-logics are described in \chref{ch:hol}
@@ -72,194 +70,4 @@
context; other commands emulate old-style tactical theorem proving.
*}
-
-section {* User interfaces *}
-
-subsection {* Terminal sessions *}
-
-text {*
- The Isabelle \texttt{tty} tool provides a very interface for running
- the Isar interaction loop, with some support for command line
- editing. For example:
-\begin{ttbox}
-isabelle tty\medskip
-{\out Welcome to Isabelle/HOL (Isabelle2008)}\medskip
-theory Foo imports Main begin;
-definition foo :: nat where "foo == 1";
-lemma "0 < foo" by (simp add: foo_def);
-end;
-\end{ttbox}
-
- Any Isabelle/Isar command may be retracted by @{command undo}.
- See the Isabelle/Isar Quick Reference (\appref{ap:refcard}) for a
- comprehensive overview of available commands and other language
- elements.
-*}
-
-
-subsection {* Emacs Proof General *}
-
-text {*
- Plain TTY-based interaction as above used to be quite feasible with
- traditional tactic based theorem proving, but developing Isar
- documents really demands some better user-interface support. The
- Proof~General environment by David Aspinall
- \cite{proofgeneral,Aspinall:TACAS:2000} offers a generic Emacs
- interface for interactive theorem provers that organizes all the
- cut-and-paste and forward-backward walk through the text in a very
- neat way. In Isabelle/Isar, the current position within a partial
- proof document is equally important than the actual proof state.
- Thus Proof~General provides the canonical working environment for
- Isabelle/Isar, both for getting acquainted (e.g.\ by replaying
- existing Isar documents) and for production work.
-*}
-
-
-subsubsection{* Proof~General as default Isabelle interface *}
-
-text {*
- The Isabelle interface wrapper script provides an easy way to invoke
- Proof~General (including XEmacs or GNU Emacs). The default
- configuration of Isabelle is smart enough to detect the
- Proof~General distribution in several canonical places (e.g.\
- @{verbatim "$ISABELLE_HOME/contrib/ProofGeneral"}). Thus the
- capital @{verbatim Isabelle} executable would already refer to the
- @{verbatim "ProofGeneral/isar"} interface without further ado. The
- Isabelle interface script provides several options; pass @{verbatim
- "-?"} to see its usage.
-
- With the proper Isabelle interface setup, Isar documents may now be edited by
- visiting appropriate theory files, e.g.\
-\begin{ttbox}
-Isabelle \({\langle}isabellehome{\rangle}\)/src/HOL/Isar_examples/Summation.thy
-\end{ttbox}
- Beginners may note the tool bar for navigating forward and backward
- through the text (this depends on the local Emacs installation).
- Consult the Proof~General documentation \cite{proofgeneral} for
- further basic command sequences, in particular ``@{verbatim "C-c C-return"}''
- and ``@{verbatim "C-c u"}''.
-
- \medskip Proof~General may be also configured manually by giving
- Isabelle settings like this (see also \cite{isabelle-sys}):
-
-\begin{ttbox}
-ISABELLE_INTERFACE=\$ISABELLE_HOME/contrib/ProofGeneral/isar/interface
-PROOFGENERAL_OPTIONS=""
-\end{ttbox}
- You may have to change @{verbatim
- "$ISABELLE_HOME/contrib/ProofGeneral"} to the actual installation
- directory of Proof~General.
-
- \medskip Apart from the Isabelle command line, defaults for
- interface options may be given by the @{verbatim PROOFGENERAL_OPTIONS}
- setting. For example, the Emacs executable to be used may be
- configured in Isabelle's settings like this:
-\begin{ttbox}
-PROOFGENERAL_OPTIONS="-p xemacs-mule"
-\end{ttbox}
-
- Occasionally, a user's @{verbatim "~/.emacs"} file contains code
- that is incompatible with the (X)Emacs version used by
- Proof~General, causing the interface startup to fail prematurely.
- Here the @{verbatim "-u false"} option helps to get the interface
- process up and running. Note that additional Lisp customization
- code may reside in @{verbatim "proofgeneral-settings.el"} of
- @{verbatim "$ISABELLE_HOME/etc"} or @{verbatim
- "$ISABELLE_HOME_USER/etc"}.
-*}
-
-
-subsubsection {* The X-Symbol package *}
-
-text {*
- Proof~General incorporates a version of the Emacs X-Symbol package
- \cite{x-symbol}, which handles proper mathematical symbols displayed
- on screen. Pass option @{verbatim "-x true"} to the Isabelle
- interface script, or check the appropriate Proof~General menu
- setting by hand. The main challenge of getting X-Symbol to work
- properly is the underlying (semi-automated) X11 font setup.
-
- \medskip Using proper mathematical symbols in Isabelle theories can
- be very convenient for readability of large formulas. On the other
- hand, the plain ASCII sources easily become somewhat unintelligible.
- For example, @{text "\<Longrightarrow>"} would appear as @{verbatim "\<Longrightarrow>"} according
- the default set of Isabelle symbols. Nevertheless, the Isabelle
- document preparation system (see \chref{ch:document-prep}) will be
- happy to print non-ASCII symbols properly. It is even possible to
- invent additional notation beyond the display capabilities of Emacs
- and X-Symbol.
-*}
-
-
-section {* Isabelle/Isar theories *}
-
-text {*
- Isabelle/Isar offers the following main improvements over classic
- Isabelle.
-
- \begin{enumerate}
-
- \item A \emph{theory format} that integrates specifications and
- proofs, supporting interactive development and unlimited undo
- operation.
-
- \item A \emph{formal proof document language} designed to support
- intelligible semi-automated reasoning. Instead of putting together
- unreadable tactic scripts, the author is enabled to express the
- reasoning in way that is close to usual mathematical practice. The
- old tactical style has been assimilated as ``improper'' language
- elements.
-
- \item A simple document preparation system, for typesetting formal
- developments together with informal text. The resulting
- hyper-linked PDF documents are equally well suited for WWW
- presentation and as printed copies.
-
- \end{enumerate}
-
- The Isar proof language is embedded into the new theory format as a
- proper sub-language. Proof mode is entered by stating some
- @{command theorem} or @{command lemma} at the theory level, and
- left again with the final conclusion (e.g.\ via @{command qed}).
- A few theory specification mechanisms also require some proof, such
- as HOL's @{command typedef} which demands non-emptiness of the
- representing sets.
-*}
-
-
-section {* How to write Isar proofs anyway? \label{sec:isar-howto} *}
-
-text {*
- This is one of the key questions, of course. First of all, the
- tactic script emulation of Isabelle/Isar essentially provides a
- clarified version of the very same unstructured proof style of
- classic Isabelle. Old-time users should quickly become acquainted
- with that (slightly degenerative) view of Isar.
-
- Writing \emph{proper} Isar proof texts targeted at human readers is
- quite different, though. Experienced users of the unstructured
- style may even have to unlearn some of their habits to master proof
- composition in Isar. In contrast, new users with less experience in
- old-style tactical proving, but a good understanding of mathematical
- proof in general, often get started easier.
-
- \medskip The present text really is only a reference manual on
- Isabelle/Isar, not a tutorial. Nevertheless, we will attempt to
- give some clues of how the concepts introduced here may be put into
- practice. Especially note that \appref{ap:refcard} provides a quick
- reference card of the most common Isabelle/Isar language elements.
-
- Further issues concerning the Isar concepts are covered in the
- literature
- \cite{Wenzel:1999:TPHOL,Wiedijk:2000:MV,Bauer-Wenzel:2000:HB,Bauer-Wenzel:2001}.
- The author's PhD thesis \cite{Wenzel-PhD} presently provides the
- most complete exposition of Isar foundations, techniques, and
- applications. A number of example applications are distributed with
- Isabelle, and available via the Isabelle WWW library (e.g.\
- \url{http://isabelle.in.tum.de/library/}). The ``Archive of Formal
- Proofs'' \url{http://afp.sourceforge.net/} also provides plenty of
- examples, both in proper Isar proof style and unstructured tactic
- scripts.
-*}
-
end
--- a/doc-src/IsarRef/Thy/Outer_Syntax.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Outer_Syntax.thy Fri Feb 27 18:50:35 2009 +0100
@@ -170,10 +170,10 @@
Isabelle as @{verbatim \<forall>}. There are infinitely many Isabelle
symbols like this, although proper presentation is left to front-end
tools such as {\LaTeX} or Proof~General with the X-Symbol package.
- A list of standard Isabelle symbols that work well with these tools
- is given in \appref{app:symbols}. Note that @{verbatim "\<lambda>"} does
- not belong to the @{text letter} category, since it is already used
- differently in the Pure term language.
+ A list of predefined Isabelle symbols that work well with these
+ tools is given in \appref{app:symbols}. Note that @{verbatim "\<lambda>"}
+ does not belong to the @{text letter} category, since it is already
+ used differently in the Pure term language.
*}
--- a/doc-src/IsarRef/Thy/Proof.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Proof.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,17 +1,15 @@
-(* $Id$ *)
-
theory Proof
imports Main
begin
-chapter {* Proofs *}
+chapter {* Proofs \label{ch:proofs} *}
text {*
Proof commands perform transitions of Isar/VM machine
configurations, which are block-structured, consisting of a stack of
nodes with three main components: logical proof context, current
- facts, and open goals. Isar/VM transitions are \emph{typed}
- according to the following three different modes of operation:
+ facts, and open goals. Isar/VM transitions are typed according to
+ the following three different modes of operation:
\begin{description}
@@ -32,13 +30,17 @@
\end{description}
- The proof mode indicator may be read as a verb telling the writer
- what kind of operation may be performed next. The corresponding
- typings of proof commands restricts the shape of well-formed proof
- texts to particular command sequences. So dynamic arrangements of
- commands eventually turn out as static texts of a certain structure.
- \Appref{ap:refcard} gives a simplified grammar of the overall
- (extensible) language emerging that way.
+ The proof mode indicator may be understood as an instruction to the
+ writer, telling what kind of operation may be performed next. The
+ corresponding typings of proof commands restricts the shape of
+ well-formed proof texts to particular command sequences. So dynamic
+ arrangements of commands eventually turn out as static texts of a
+ certain structure.
+
+ \Appref{ap:refcard} gives a simplified grammar of the (extensible)
+ language emerging that way from the different types of proof
+ commands. The main ideas of the overall Isar framework are
+ explained in \chref{ch:isar-framework}.
*}
@@ -963,7 +965,7 @@
\begin{matharray}{l}
@{text "\<langle>using b\<^sub>1 \<dots> b\<^sub>k\<rangle>"}~~@{command "obtain"}~@{text "x\<^sub>1 \<dots> x\<^sub>m \<WHERE> a: \<phi>\<^sub>1 \<dots> \<phi>\<^sub>n \<langle>proof\<rangle> \<equiv>"} \\[1ex]
\quad @{command "have"}~@{text "\<And>thesis. (\<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\
- \quad @{command "proof"}~@{text succeed} \\
+ \quad @{command "proof"}~@{method succeed} \\
\qquad @{command "fix"}~@{text thesis} \\
\qquad @{command "assume"}~@{text "that [Pure.intro?]: \<And>x\<^sub>1 \<dots> x\<^sub>m. \<phi>\<^sub>1 \<Longrightarrow> \<dots> \<phi>\<^sub>n \<Longrightarrow> thesis"} \\
\qquad @{command "then"}~@{command "show"}~@{text thesis} \\
--- a/doc-src/IsarRef/Thy/Quick_Reference.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Quick_Reference.thy Fri Feb 27 18:50:35 2009 +0100
@@ -30,7 +30,7 @@
\begin{tabular}{rcl}
@{text "theory\<dash>stmt"} & = & @{command "theorem"}~@{text "name: props proof |"}~~@{command "definition"}~@{text "\<dots> | \<dots>"} \\[1ex]
- @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method stmt\<^sup>*"}~@{command "qed"}~@{text method} \\
+ @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\
& @{text "|"} & @{text "prfx\<^sup>*"}~@{command "done"} \\[1ex]
@{text prfx} & = & @{command "apply"}~@{text method} \\
& @{text "|"} & @{command "using"}~@{text "facts"} \\
--- a/doc-src/IsarRef/Thy/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/ROOT.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,10 +1,10 @@
-
-(* $Id$ *)
-
+set quick_and_dirty;
set ThyOutput.source;
use "../../antiquote_setup.ML";
use_thy "Introduction";
+use_thy "Framework";
+use_thy "First_Order_Logic";
use_thy "Outer_Syntax";
use_thy "Document_Preparation";
use_thy "Spec";
--- a/doc-src/IsarRef/Thy/Spec.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Spec.thy Fri Feb 27 18:50:35 2009 +0100
@@ -4,6 +4,24 @@
chapter {* Theory specifications *}
+text {*
+ The Isabelle/Isar theory format integrates specifications and
+ proofs, supporting interactive development with unlimited undo
+ operation. There is an integrated document preparation system (see
+ \chref{ch:document-prep}), for typesetting formal developments
+ together with informal text. The resulting hyper-linked PDF
+ documents can be used both for WWW presentation and printed copies.
+
+ The Isar proof language (see \chref{ch:proofs}) is embedded into the
+ theory language as a proper sub-language. Proof mode is entered by
+ stating some @{command theorem} or @{command lemma} at the theory
+ level, and left again with the final conclusion (e.g.\ via @{command
+ qed}). Some theory specification mechanisms also require a proof,
+ such as @{command typedef} in HOL, which demands non-emptiness of
+ the representing sets.
+*}
+
+
section {* Defining theories \label{sec:begin-thy} *}
text {*
@@ -106,9 +124,9 @@
@{command (global) "end"} has a different meaning: it concludes the
theory itself (\secref{sec:begin-thy}).
- \item @{text "(\<IN> c)"} given after any local theory command
- specifies an immediate target, e.g.\ ``@{command
- "definition"}~@{text "(\<IN> c) \<dots>"}'' or ``@{command
+ \item @{text "("}@{keyword_def "in"}~@{text "c)"} given after any
+ local theory command specifies an immediate target, e.g.\
+ ``@{command "definition"}~@{text "(\<IN> c) \<dots>"}'' or ``@{command
"theorem"}~@{text "(\<IN> c) \<dots>"}''. This works both in a local or
global theory context; the current target context will be suspended
for this command only. Note that ``@{text "(\<IN> -)"}'' will
@@ -1164,7 +1182,7 @@
\end{description}
- See @{"file" "~~/src/FOL/ex/IffOracle.thy"} for a worked example of
+ See @{"file" "~~/src/FOL/ex/Iff_Oracle.thy"} for a worked example of
defining a new primitive rule as oracle, and turning it into a proof
method.
*}
--- a/doc-src/IsarRef/Thy/Symbols.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/Symbols.thy Fri Feb 27 18:50:35 2009 +0100
@@ -4,7 +4,7 @@
imports Pure
begin
-chapter {* Standard Isabelle symbols \label{app:symbols} *}
+chapter {* Predefined Isabelle symbols \label{app:symbols} *}
text {*
Isabelle supports an infinite number of non-ASCII symbols, which are
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Thy/document/First_Order_Logic.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,1417 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{First{\isacharunderscore}Order{\isacharunderscore}Logic}%
+%
+\isamarkupheader{Example: First-Order Logic%
+}
+\isamarkuptrue%
+%
+\isadelimvisible
+%
+\endisadelimvisible
+%
+\isatagvisible
+\isacommand{theory}\isamarkupfalse%
+\ First{\isacharunderscore}Order{\isacharunderscore}Logic\isanewline
+\isakeyword{imports}\ Pure\isanewline
+\isakeyword{begin}%
+\endisatagvisible
+{\isafoldvisible}%
+%
+\isadelimvisible
+%
+\endisadelimvisible
+%
+\begin{isamarkuptext}%
+\noindent In order to commence a new object-logic within
+ Isabelle/Pure we introduce abstract syntactic categories \isa{{\isachardoublequote}i{\isachardoublequote}}
+ for individuals and \isa{{\isachardoublequote}o{\isachardoublequote}} for object-propositions. The latter
+ is embedded into the language of Pure propositions by means of a
+ separate judgment.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{typedecl}\isamarkupfalse%
+\ i\isanewline
+\isacommand{typedecl}\isamarkupfalse%
+\ o\isanewline
+\isanewline
+\isacommand{judgment}\isamarkupfalse%
+\isanewline
+\ \ Trueprop\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}o\ {\isasymRightarrow}\ prop{\isachardoublequoteclose}\ \ \ \ {\isacharparenleft}{\isachardoublequoteopen}{\isacharunderscore}{\isachardoublequoteclose}\ {\isadigit{5}}{\isacharparenright}%
+\begin{isamarkuptext}%
+\noindent Note that the object-logic judgement is implicit in the
+ syntax: writing \isa{A} produces \isa{{\isachardoublequote}Trueprop\ A{\isachardoublequote}} internally.
+ From the Pure perspective this means ``\isa{A} is derivable in the
+ object-logic''.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Equational reasoning \label{sec:framework-ex-equal}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Equality is axiomatized as a binary predicate on individuals, with
+ reflexivity as introduction, and substitution as elimination
+ principle. Note that the latter is particularly convenient in a
+ framework like Isabelle, because syntactic congruences are
+ implicitly produced by unification of \isa{{\isachardoublequote}B\ x{\isachardoublequote}} against
+ expressions containing occurrences of \isa{x}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ equal\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}i\ {\isasymRightarrow}\ i\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{infix}\ {\isachardoublequoteopen}{\isacharequal}{\isachardoublequoteclose}\ {\isadigit{5}}{\isadigit{0}}{\isacharparenright}\isanewline
+\isakeyword{where}\isanewline
+\ \ refl\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ subst\ {\isacharbrackleft}elim{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isacharequal}\ y\ {\isasymLongrightarrow}\ B\ x\ {\isasymLongrightarrow}\ B\ y{\isachardoublequoteclose}%
+\begin{isamarkuptext}%
+\noindent Substitution is very powerful, but also hard to control in
+ full generality. We derive some common symmetry~/ transitivity
+ schemes of as particular consequences.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{theorem}\isamarkupfalse%
+\ sym\ {\isacharbrackleft}sym{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}x\ {\isacharequal}\ y{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}y\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{with}\isamarkupfalse%
+\ {\isacharbackquoteopen}x\ {\isacharequal}\ y{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}y\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ forw{\isacharunderscore}subst\ {\isacharbrackleft}trans{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}y\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}B\ x{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}y\ {\isacharequal}\ x{\isacharbackquoteclose}\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isacharequal}\ y{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ this\ \isakeyword{and}\ {\isacharbackquoteopen}B\ x{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ back{\isacharunderscore}subst\ {\isacharbrackleft}trans{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}B\ x{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}x\ {\isacharequal}\ y{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}x\ {\isacharequal}\ y{\isacharbackquoteclose}\ \isakeyword{and}\ {\isacharbackquoteopen}B\ x{\isacharbackquoteclose}\isanewline
+\ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ trans\ {\isacharbrackleft}trans{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}x\ {\isacharequal}\ y{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}y\ {\isacharequal}\ z{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}x\ {\isacharequal}\ z{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}y\ {\isacharequal}\ z{\isacharbackquoteclose}\ \isakeyword{and}\ {\isacharbackquoteopen}x\ {\isacharequal}\ y{\isacharbackquoteclose}\isanewline
+\ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isacharequal}\ z{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isamarkupsubsection{Basic group theory%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+As an example for equational reasoning we consider some bits of
+ group theory. The subsequent locale definition postulates group
+ operations and axioms; we also derive some consequences of this
+ specification.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{locale}\isamarkupfalse%
+\ group\ {\isacharequal}\isanewline
+\ \ \isakeyword{fixes}\ prod\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}i\ {\isasymRightarrow}\ i\ {\isasymRightarrow}\ i{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{infix}\ {\isachardoublequoteopen}{\isasymcirc}{\isachardoublequoteclose}\ {\isadigit{7}}{\isadigit{0}}{\isacharparenright}\isanewline
+\ \ \ \ \isakeyword{and}\ inv\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}i\ {\isasymRightarrow}\ i{\isachardoublequoteclose}\ \ {\isacharparenleft}{\isachardoublequoteopen}{\isacharparenleft}{\isacharunderscore}{\isasyminverse}{\isacharparenright}{\isachardoublequoteclose}\ {\isacharbrackleft}{\isadigit{1}}{\isadigit{0}}{\isadigit{0}}{\isadigit{0}}{\isacharbrackright}\ {\isadigit{9}}{\isadigit{9}}{\isadigit{9}}{\isacharparenright}\isanewline
+\ \ \ \ \isakeyword{and}\ unit\ {\isacharcolon}{\isacharcolon}\ i\ \ {\isacharparenleft}{\isachardoublequoteopen}{\isadigit{1}}{\isachardoublequoteclose}{\isacharparenright}\isanewline
+\ \ \isakeyword{assumes}\ assoc{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}x\ {\isasymcirc}\ y{\isacharparenright}\ {\isasymcirc}\ z\ {\isacharequal}\ x\ {\isasymcirc}\ {\isacharparenleft}y\ {\isasymcirc}\ z{\isacharparenright}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isakeyword{and}\ left{\isacharunderscore}unit{\isacharcolon}\ \ {\isachardoublequoteopen}{\isadigit{1}}\ {\isasymcirc}\ x\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isakeyword{and}\ left{\isacharunderscore}inv{\isacharcolon}\ {\isachardoublequoteopen}x{\isasyminverse}\ {\isasymcirc}\ x\ {\isacharequal}\ {\isadigit{1}}{\isachardoublequoteclose}\isanewline
+\isakeyword{begin}\isanewline
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ right{\isacharunderscore}inv{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymcirc}\ x{\isasyminverse}\ {\isacharequal}\ {\isadigit{1}}{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ x{\isasyminverse}\ {\isacharequal}\ {\isadigit{1}}\ {\isasymcirc}\ {\isacharparenleft}x\ {\isasymcirc}\ x{\isasyminverse}{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}unit\ {\isacharbrackleft}symmetric{\isacharbrackright}{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isacharequal}\ {\isacharparenleft}{\isadigit{1}}\ {\isasymcirc}\ x{\isacharparenright}\ {\isasymcirc}\ x{\isasyminverse}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ assoc\ {\isacharbrackleft}symmetric{\isacharbrackright}{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isadigit{1}}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminverse}{\isacharparenright}{\isasyminverse}\ {\isasymcirc}\ x{\isasyminverse}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}inv\ {\isacharbrackleft}symmetric{\isacharbrackright}{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isasymcirc}\ x\ {\isacharequal}\ {\isacharparenleft}x{\isasyminverse}{\isacharparenright}{\isasyminverse}\ {\isasymcirc}\ {\isacharparenleft}x{\isasyminverse}\ {\isasymcirc}\ x{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ assoc{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x{\isasyminverse}\ {\isasymcirc}\ x\ {\isacharequal}\ {\isadigit{1}}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}inv{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isacharparenleft}{\isacharparenleft}x{\isasyminverse}{\isacharparenright}{\isasyminverse}\ {\isasymcirc}\ {\isasymdots}{\isacharparenright}\ {\isasymcirc}\ x{\isasyminverse}\ {\isacharequal}\ {\isacharparenleft}x{\isasyminverse}{\isacharparenright}{\isasyminverse}\ {\isasymcirc}\ {\isacharparenleft}{\isadigit{1}}\ {\isasymcirc}\ x{\isasyminverse}{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ assoc{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isadigit{1}}\ {\isasymcirc}\ x{\isasyminverse}\ {\isacharequal}\ x{\isasyminverse}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}unit{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isacharparenleft}x{\isasyminverse}{\isacharparenright}{\isasyminverse}\ {\isasymcirc}\ {\isasymdots}\ {\isacharequal}\ {\isadigit{1}}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}inv{\isacharparenright}\isanewline
+\ \ \isacommand{finally}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ x{\isasyminverse}\ {\isacharequal}\ {\isadigit{1}}{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ right{\isacharunderscore}unit{\isacharcolon}\ {\isachardoublequoteopen}x\ {\isasymcirc}\ {\isadigit{1}}\ {\isacharequal}\ x{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isadigit{1}}\ {\isacharequal}\ x{\isasyminverse}\ {\isasymcirc}\ x{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}inv\ {\isacharbrackleft}symmetric{\isacharbrackright}{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ {\isasymdots}\ {\isacharequal}\ {\isacharparenleft}x\ {\isasymcirc}\ x{\isasyminverse}{\isacharparenright}\ {\isasymcirc}\ x{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ assoc\ {\isacharbrackleft}symmetric{\isacharbrackright}{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ x{\isasyminverse}\ {\isacharequal}\ {\isadigit{1}}{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ right{\isacharunderscore}inv{\isacharparenright}\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isasymcirc}\ x\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ left{\isacharunderscore}unit{\isacharparenright}\isanewline
+\ \ \isacommand{finally}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ {\isadigit{1}}\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent Reasoning from basic axioms is often tedious. Our proofs
+ work by producing various instances of the given rules (potentially
+ the symmetric form) using the pattern ``\hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{eq}~\hyperlink{command.by}{\mbox{\isa{\isacommand{by}}}}~\isa{{\isachardoublequote}{\isacharparenleft}rule\ r{\isacharparenright}{\isachardoublequote}}'' and composing the chain of
+ results via \hyperlink{command.also}{\mbox{\isa{\isacommand{also}}}}/\hyperlink{command.finally}{\mbox{\isa{\isacommand{finally}}}}. These steps may
+ involve any of the transitivity rules declared in
+ \secref{sec:framework-ex-equal}, namely \isa{trans} in combining
+ the first two results in \isa{right{\isacharunderscore}inv} and in the final steps of
+ both proofs, \isa{forw{\isacharunderscore}subst} in the first combination of \isa{right{\isacharunderscore}unit}, and \isa{back{\isacharunderscore}subst} in all other calculational steps.
+
+ Occasional substitutions in calculations are adequate, but should
+ not be over-emphasized. The other extreme is to compose a chain by
+ plain transitivity only, with replacements occurring always in
+ topmost position. For example:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ {\isadigit{1}}\ {\isacharequal}\ x\ {\isasymcirc}\ {\isacharparenleft}x{\isasyminverse}\ {\isasymcirc}\ x{\isacharparenright}{\isachardoublequoteclose}\ \isacommand{unfolding}\isamarkupfalse%
+\ left{\isacharunderscore}inv\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isacharequal}\ {\isacharparenleft}x\ {\isasymcirc}\ x{\isasyminverse}{\isacharparenright}\ {\isasymcirc}\ x{\isachardoublequoteclose}\ \isacommand{unfolding}\isamarkupfalse%
+\ assoc\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isacharequal}\ {\isadigit{1}}\ {\isasymcirc}\ x{\isachardoublequoteclose}\ \isacommand{unfolding}\isamarkupfalse%
+\ right{\isacharunderscore}inv\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{unfolding}\isamarkupfalse%
+\ left{\isacharunderscore}unit\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{finally}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymcirc}\ {\isadigit{1}}\ {\isacharequal}\ x{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent Here we have re-used the built-in mechanism for unfolding
+ definitions in order to normalize each equational problem. A more
+ realistic object-logic would include proper setup for the Simplifier
+ (\secref{sec:simplifier}), the main automated tool for equational
+ reasoning in Isabelle. Then ``\hyperlink{command.unfolding}{\mbox{\isa{\isacommand{unfolding}}}}~\isa{left{\isacharunderscore}inv}~\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}'' would become ``\hyperlink{command.by}{\mbox{\isa{\isacommand{by}}}}~\isa{{\isachardoublequote}{\isacharparenleft}simp\ only{\isacharcolon}\ left{\isacharunderscore}inv{\isacharparenright}{\isachardoublequote}}'' etc.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{end}\isamarkupfalse%
+%
+\isamarkupsubsection{Propositional logic \label{sec:framework-ex-prop}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+We axiomatize basic connectives of propositional logic: implication,
+ disjunction, and conjunction. The associated rules are modeled
+ after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ imp\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}o\ {\isasymRightarrow}\ o\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{infixr}\ {\isachardoublequoteopen}{\isasymlongrightarrow}{\isachardoublequoteclose}\ {\isadigit{2}}{\isadigit{5}}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ impI\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymlongrightarrow}\ B{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ impD\ {\isacharbrackleft}dest{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}A\ {\isasymlongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ A\ {\isasymLongrightarrow}\ B{\isachardoublequoteclose}\isanewline
+\isanewline
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ disj\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}o\ {\isasymRightarrow}\ o\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{infixr}\ {\isachardoublequoteopen}{\isasymor}{\isachardoublequoteclose}\ {\isadigit{3}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ disjI\isactrlisub {\isadigit{1}}\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymLongrightarrow}\ A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ disjI\isactrlisub {\isadigit{2}}\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}B\ {\isasymLongrightarrow}\ A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ disjE\ {\isacharbrackleft}elim{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymor}\ B\ {\isasymLongrightarrow}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequoteclose}\isanewline
+\isanewline
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ conj\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}o\ {\isasymRightarrow}\ o\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{infixr}\ {\isachardoublequoteopen}{\isasymand}{\isachardoublequoteclose}\ {\isadigit{3}}{\isadigit{5}}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ conjI\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isasymand}\ B{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ conjD\isactrlisub {\isadigit{1}}{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ A{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ conjD\isactrlisub {\isadigit{2}}{\isacharcolon}\ {\isachardoublequoteopen}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B{\isachardoublequoteclose}%
+\begin{isamarkuptext}%
+\noindent The conjunctive destructions have the disadvantage that
+ decomposing \isa{{\isachardoublequote}A\ {\isasymand}\ B{\isachardoublequote}} involves an immediate decision which
+ component should be projected. The more convenient simultaneous
+ elimination \isa{{\isachardoublequote}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ {\isacharparenleft}A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequote}} can be derived as
+ follows:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{theorem}\isamarkupfalse%
+\ conjE\ {\isacharbrackleft}elim{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{obtains}\ A\ \isakeyword{and}\ B\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}A\ {\isasymand}\ B{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ A\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ conjD\isactrlisub {\isadigit{1}}{\isacharparenright}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}A\ {\isasymand}\ B{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ B\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ conjD\isactrlisub {\isadigit{2}}{\isacharparenright}\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent Here is an example of swapping conjuncts with a single
+ intermediate elimination step:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{obtain}\isamarkupfalse%
+\ B\ \isakeyword{and}\ A\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ {\isasymand}\ A{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent Note that the analogous elimination rule for disjunction
+ ``\isa{{\isachardoublequote}{\isasymASSUMES}\ A\ {\isasymor}\ B\ {\isasymOBTAINS}\ A\ {\isasymBBAR}\ B{\isachardoublequote}}'' coincides with
+ the original axiomatization of \isa{disjE}.
+
+ \medskip We continue propositional logic by introducing absurdity
+ with its characteristic elimination. Plain truth may then be
+ defined as a proposition that is trivially true.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ false\ {\isacharcolon}{\isacharcolon}\ o\ \ {\isacharparenleft}{\isachardoublequoteopen}{\isasymbottom}{\isachardoublequoteclose}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ falseE\ {\isacharbrackleft}elim{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}{\isasymbottom}\ {\isasymLongrightarrow}\ A{\isachardoublequoteclose}\isanewline
+\isanewline
+\isacommand{definition}\isamarkupfalse%
+\isanewline
+\ \ true\ {\isacharcolon}{\isacharcolon}\ o\ \ {\isacharparenleft}{\isachardoublequoteopen}{\isasymtop}{\isachardoublequoteclose}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ {\isachardoublequoteopen}{\isasymtop}\ {\isasymequiv}\ {\isasymbottom}\ {\isasymlongrightarrow}\ {\isasymbottom}{\isachardoublequoteclose}\isanewline
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ trueI\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isasymtop}\isanewline
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{unfolding}\isamarkupfalse%
+\ true{\isacharunderscore}def\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\medskip\noindent Now negation represents an implication towards
+ absurdity:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{definition}\isamarkupfalse%
+\isanewline
+\ \ not\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}o\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}{\isachardoublequoteopen}{\isasymnot}\ {\isacharunderscore}{\isachardoublequoteclose}\ {\isacharbrackleft}{\isadigit{4}}{\isadigit{0}}{\isacharbrackright}\ {\isadigit{4}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ {\isachardoublequoteopen}{\isasymnot}\ A\ {\isasymequiv}\ A\ {\isasymlongrightarrow}\ {\isasymbottom}{\isachardoublequoteclose}\isanewline
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ notI\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}A\ {\isasymLongrightarrow}\ {\isasymbottom}{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}{\isasymnot}\ A{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{unfolding}\isamarkupfalse%
+\ not{\isacharunderscore}def\isanewline
+\isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{assume}\isamarkupfalse%
+\ A\isanewline
+\ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ {\isasymbottom}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ {\isacharbackquoteopen}A\ {\isasymLongrightarrow}\ {\isasymbottom}{\isacharbackquoteclose}{\isacharparenright}\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ notE\ {\isacharbrackleft}elim{\isacharbrackright}{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}{\isasymnot}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ A\isanewline
+\ \ \isakeyword{shows}\ B\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymnot}\ A{\isacharbackquoteclose}\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymlongrightarrow}\ {\isasymbottom}{\isachardoublequoteclose}\ \isacommand{unfolding}\isamarkupfalse%
+\ not{\isacharunderscore}def\ \isacommand{{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}A\ {\isasymlongrightarrow}\ {\isasymbottom}{\isacharbackquoteclose}\ \isakeyword{and}\ {\isacharbackquoteopen}A{\isacharbackquoteclose}\ \isacommand{have}\isamarkupfalse%
+\ {\isasymbottom}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ B\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isamarkupsubsection{Classical logic%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Subsequently we state the principle of classical contradiction as a
+ local assumption. Thus we refrain from forcing the object-logic
+ into the classical perspective. Within that context, we may derive
+ well-known consequences of the classical principle.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{locale}\isamarkupfalse%
+\ classical\ {\isacharequal}\isanewline
+\ \ \isakeyword{assumes}\ classical{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}{\isasymnot}\ C\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequoteclose}\isanewline
+\isakeyword{begin}\isanewline
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ double{\isacharunderscore}negation{\isacharcolon}\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}{\isasymnot}\ {\isasymnot}\ C{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ C\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharparenleft}rule\ classical{\isacharparenright}\isanewline
+\ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymnot}\ C{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{with}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymnot}\ {\isasymnot}\ C{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ C\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+\isanewline
+\isacommand{theorem}\isamarkupfalse%
+\ tertium{\isacharunderscore}non{\isacharunderscore}datur{\isacharcolon}\ {\isachardoublequoteopen}C\ {\isasymor}\ {\isasymnot}\ C{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharparenleft}rule\ double{\isacharunderscore}negation{\isacharparenright}\isanewline
+\ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymnot}\ {\isasymnot}\ {\isacharparenleft}C\ {\isasymor}\ {\isasymnot}\ C{\isacharparenright}{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymnot}\ {\isacharparenleft}C\ {\isasymor}\ {\isasymnot}\ C{\isacharparenright}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymnot}\ C{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ C\ \isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ {\isasymor}\ {\isasymnot}\ C{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \ \ \isacommand{with}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymnot}\ {\isacharparenleft}C\ {\isasymor}\ {\isasymnot}\ C{\isacharparenright}{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ {\isasymbottom}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{qed}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ {\isasymor}\ {\isasymnot}\ C{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{with}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymnot}\ {\isacharparenleft}C\ {\isasymor}\ {\isasymnot}\ C{\isacharparenright}{\isacharbackquoteclose}\ \isacommand{show}\isamarkupfalse%
+\ {\isasymbottom}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{qed}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent These examples illustrate both classical reasoning and
+ non-trivial propositional proofs in general. All three rules
+ characterize classical logic independently, but the original rule is
+ already the most convenient to use, because it leaves the conclusion
+ unchanged. Note that \isa{{\isachardoublequote}{\isacharparenleft}{\isasymnot}\ C\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequote}} fits again into our
+ format for eliminations, despite the additional twist that the
+ context refers to the main conclusion. So we may write \isa{classical} as the Isar statement ``\isa{{\isachardoublequote}{\isasymOBTAINS}\ {\isasymnot}\ thesis{\isachardoublequote}}''.
+ This also explains nicely how classical reasoning really works:
+ whatever the main \isa{thesis} might be, we may always assume its
+ negation!%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{end}\isamarkupfalse%
+%
+\isamarkupsubsection{Quantifiers \label{sec:framework-ex-quant}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Representing quantifiers is easy, thanks to the higher-order nature
+ of the underlying framework. According to the well-known technique
+ introduced by Church \cite{church40}, quantifiers are operators on
+ predicates, which are syntactically represented as \isa{{\isachardoublequote}{\isasymlambda}{\isachardoublequote}}-terms
+ of type \isa{{\isachardoublequote}i\ {\isasymRightarrow}\ o{\isachardoublequote}}. Binder notation turns \isa{{\isachardoublequote}All\ {\isacharparenleft}{\isasymlambda}x{\isachardot}\ B\ x{\isacharparenright}{\isachardoublequote}} into \isa{{\isachardoublequote}{\isasymforall}x{\isachardot}\ B\ x{\isachardoublequote}} etc.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ All\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}i\ {\isasymRightarrow}\ o{\isacharparenright}\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{binder}\ {\isachardoublequoteopen}{\isasymforall}{\isachardoublequoteclose}\ {\isadigit{1}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ allI\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymforall}x{\isachardot}\ B\ x{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ allD\ {\isacharbrackleft}dest{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}{\isasymforall}x{\isachardot}\ B\ x{\isacharparenright}\ {\isasymLongrightarrow}\ B\ a{\isachardoublequoteclose}\isanewline
+\isanewline
+\isacommand{axiomatization}\isamarkupfalse%
+\isanewline
+\ \ Ex\ {\isacharcolon}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}i\ {\isasymRightarrow}\ o{\isacharparenright}\ {\isasymRightarrow}\ o{\isachardoublequoteclose}\ \ {\isacharparenleft}\isakeyword{binder}\ {\isachardoublequoteopen}{\isasymexists}{\isachardoublequoteclose}\ {\isadigit{1}}{\isadigit{0}}{\isacharparenright}\ \isakeyword{where}\isanewline
+\ \ exI\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}B\ a\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymexists}x{\isachardot}\ B\ x{\isacharparenright}{\isachardoublequoteclose}\ \isakeyword{and}\isanewline
+\ \ exE\ {\isacharbrackleft}elim{\isacharbrackright}{\isacharcolon}\ {\isachardoublequoteopen}{\isacharparenleft}{\isasymexists}x{\isachardot}\ B\ x{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x\ {\isasymLongrightarrow}\ C{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequoteclose}%
+\begin{isamarkuptext}%
+\noindent The statement of \isa{exE} corresponds to ``\isa{{\isachardoublequote}{\isasymASSUMES}\ {\isasymexists}x{\isachardot}\ B\ x\ {\isasymOBTAINS}\ x\ {\isasymWHERE}\ B\ x{\isachardoublequote}}'' in Isar. In the
+ subsequent example we illustrate quantifier reasoning involving all
+ four rules:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+\isacommand{theorem}\isamarkupfalse%
+\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}{\isasymexists}x{\isachardot}\ {\isasymforall}y{\isachardot}\ R\ x\ y{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}{\isasymforall}y{\isachardot}\ {\isasymexists}x{\isachardot}\ R\ x\ y{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ \ \ \ %
+\isamarkupcmt{\isa{{\isachardoublequote}{\isasymforall}{\isachardoublequote}} introduction%
+}
+\isanewline
+\ \ \isacommand{obtain}\isamarkupfalse%
+\ x\ \isakeyword{where}\ {\isachardoublequoteopen}{\isasymforall}y{\isachardot}\ R\ x\ y{\isachardoublequoteclose}\ \isacommand{using}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymexists}x{\isachardot}\ {\isasymforall}y{\isachardot}\ R\ x\ y{\isacharbackquoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\ \ \ \ %
+\isamarkupcmt{\isa{{\isachardoublequote}{\isasymexists}{\isachardoublequote}} elimination%
+}
+\isanewline
+\ \ \isacommand{fix}\isamarkupfalse%
+\ y\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}R\ x\ y{\isachardoublequoteclose}\ \isacommand{using}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymforall}y{\isachardot}\ R\ x\ y{\isacharbackquoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\ \ \ \ %
+\isamarkupcmt{\isa{{\isachardoublequote}{\isasymforall}{\isachardoublequote}} destruction%
+}
+\isanewline
+\ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymexists}x{\isachardot}\ R\ x\ y{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\ \ \ \ %
+\isamarkupcmt{\isa{{\isachardoublequote}{\isasymexists}{\isachardoublequote}} introduction%
+}
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isamarkupsubsection{Canonical reasoning patterns%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The main rules of first-order predicate logic from
+ \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
+ can now be summarized as follows, using the native Isar statement
+ format of \secref{sec:framework-stmt}.
+
+ \medskip
+ \begin{tabular}{l}
+ \isa{{\isachardoublequote}impI{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymLongrightarrow}\ B\ {\isasymSHOWS}\ A\ {\isasymlongrightarrow}\ B{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}impD{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymlongrightarrow}\ B\ {\isasymAND}\ A\ {\isasymSHOWS}\ B{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}disjI\isactrlisub {\isadigit{1}}{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymSHOWS}\ A\ {\isasymor}\ B{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}disjI\isactrlisub {\isadigit{2}}{\isacharcolon}\ {\isasymASSUMES}\ B\ {\isasymSHOWS}\ A\ {\isasymor}\ B{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}disjE{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymor}\ B\ {\isasymOBTAINS}\ A\ {\isasymBBAR}\ B{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}conjI{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymAND}\ B\ {\isasymSHOWS}\ A\ {\isasymand}\ B{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}conjE{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymand}\ B\ {\isasymOBTAINS}\ A\ {\isasymAND}\ B{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}falseE{\isacharcolon}\ {\isasymASSUMES}\ {\isasymbottom}\ {\isasymSHOWS}\ A{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}trueI{\isacharcolon}\ {\isasymSHOWS}\ {\isasymtop}{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}notI{\isacharcolon}\ {\isasymASSUMES}\ A\ {\isasymLongrightarrow}\ {\isasymbottom}\ {\isasymSHOWS}\ {\isasymnot}\ A{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}notE{\isacharcolon}\ {\isasymASSUMES}\ {\isasymnot}\ A\ {\isasymAND}\ A\ {\isasymSHOWS}\ B{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}allI{\isacharcolon}\ {\isasymASSUMES}\ {\isasymAnd}x{\isachardot}\ B\ x\ {\isasymSHOWS}\ {\isasymforall}x{\isachardot}\ B\ x{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}allE{\isacharcolon}\ {\isasymASSUMES}\ {\isasymforall}x{\isachardot}\ B\ x\ {\isasymSHOWS}\ B\ a{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}exI{\isacharcolon}\ {\isasymASSUMES}\ B\ a\ {\isasymSHOWS}\ {\isasymexists}x{\isachardot}\ B\ x{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}exE{\isacharcolon}\ {\isasymASSUMES}\ {\isasymexists}x{\isachardot}\ B\ x\ {\isasymOBTAINS}\ a\ {\isasymWHERE}\ B\ a{\isachardoublequote}}
+ \end{tabular}
+ \medskip
+
+ \noindent This essentially provides a declarative reading of Pure
+ rules as Isar reasoning patterns: the rule statements tells how a
+ canonical proof outline shall look like. Since the above rules have
+ already been declared as \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}}, \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}}, \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}} --- each according to its
+ particular shape --- we can immediately write Isar proof texts as
+ follows:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+%
+\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymlongrightarrow}\ B{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ B%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymlongrightarrow}\ B{\isachardoublequoteclose}\ \isakeyword{and}\ A%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ B\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ A%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ B%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymor}\ B{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ C\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ C%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{next}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ B\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ C%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ A\ \isakeyword{and}\ B%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymand}\ B{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{obtain}\isamarkupfalse%
+\ A\ \isakeyword{and}\ B\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymbottom}{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ A\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymtop}{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymnot}\ A{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymbottom}{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymnot}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ A%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ B\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymforall}x{\isachardot}\ B\ x{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ x\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ x{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymforall}x{\isachardot}\ B\ x{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ a{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymexists}x{\isachardot}\ B\ x{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ a{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}
+\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymexists}x{\isachardot}\ B\ x{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{obtain}\isamarkupfalse%
+\ a\ \isakeyword{where}\ {\isachardoublequoteopen}B\ a{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\end{minipage}
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\bigskip\noindent Of course, these proofs are merely examples. As
+ sketched in \secref{sec:framework-subproof}, there is a fair amount
+ of flexibility in expressing Pure deductions in Isar. Here the user
+ is asked to express himself adequately, aiming at proof texts of
+ literary quality.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimvisible
+%
+\endisadelimvisible
+%
+\isatagvisible
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagvisible
+{\isafoldvisible}%
+%
+\isadelimvisible
+%
+\endisadelimvisible
+\isanewline
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Thy/document/Framework.tex Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,1518 @@
+%
+\begin{isabellebody}%
+\def\isabellecontext{Framework}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{theory}\isamarkupfalse%
+\ Framework\isanewline
+\isakeyword{imports}\ Main\isanewline
+\isakeyword{begin}%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isamarkupchapter{The Isabelle/Isar Framework \label{ch:isar-framework}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Isabelle/Isar
+ \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
+ is intended as a generic framework for developing formal
+ mathematical documents with full proof checking. Definitions and
+ proofs are organized as theories. An assembly of theory sources may
+ be presented as a printed document; see also
+ \chref{ch:document-prep}.
+
+ The main objective of Isar is the design of a human-readable
+ structured proof language, which is called the ``primary proof
+ format'' in Isar terminology. Such a primary proof language is
+ somewhere in the middle between the extremes of primitive proof
+ objects and actual natural language. In this respect, Isar is a bit
+ more formalistic than Mizar
+ \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
+ using logical symbols for certain reasoning schemes where Mizar
+ would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
+ further comparisons of these systems.
+
+ So Isar challenges the traditional way of recording informal proofs
+ in mathematical prose, as well as the common tendency to see fully
+ formal proofs directly as objects of some logical calculus (e.g.\
+ \isa{{\isachardoublequote}{\isasymlambda}{\isachardoublequote}}-terms in a version of type theory). In fact, Isar is
+ better understood as an interpreter of a simple block-structured
+ language for describing the data flow of local facts and goals,
+ interspersed with occasional invocations of proof methods.
+ Everything is reduced to logical inferences internally, but these
+ steps are somewhat marginal compared to the overall bookkeeping of
+ the interpretation process. Thanks to careful design of the syntax
+ and semantics of Isar language elements, a formal record of Isar
+ instructions may later appear as an intelligible text to the
+ attentive reader.
+
+ The Isar proof language has emerged from careful analysis of some
+ inherent virtues of the existing logical framework of Isabelle/Pure
+ \cite{paulson-found,paulson700}, notably composition of higher-order
+ natural deduction rules, which is a generalization of Gentzen's
+ original calculus \cite{Gentzen:1935}. The approach of generic
+ inference systems in Pure is continued by Isar towards actual proof
+ texts.
+
+ Concrete applications require another intermediate layer: an
+ object-logic. Isabelle/HOL \cite{isa-tutorial} (simply-typed
+ set-theory) is being used most of the time; Isabelle/ZF
+ \cite{isabelle-ZF} is less extensively developed, although it would
+ probably fit better for classical mathematics.
+
+ \medskip In order to illustrate natural deduction in Isar, we shall
+ refer to the background theory and library of Isabelle/HOL. This
+ includes common notions of predicate logic, naive set-theory etc.\
+ using fairly standard mathematical notation. From the perspective
+ of generic natural deduction there is nothing special about the
+ logical connectives of HOL (\isa{{\isachardoublequote}{\isasymand}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymor}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymforall}{\isachardoublequote}},
+ \isa{{\isachardoublequote}{\isasymexists}{\isachardoublequote}}, etc.), only the resulting reasoning principles are
+ relevant to the user. There are similar rules available for
+ set-theory operators (\isa{{\isachardoublequote}{\isasyminter}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymunion}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymInter}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymUnion}{\isachardoublequote}}, etc.), or any other theory developed in the library (lattice
+ theory, topology etc.).
+
+ Subsequently we briefly review fragments of Isar proof texts
+ corresponding directly to such general deduction schemes. The
+ examples shall refer to set-theory, to minimize the danger of
+ understanding connectives of predicate logic as something special.
+
+ \medskip The following deduction performs \isa{{\isachardoublequote}{\isasyminter}{\isachardoublequote}}-introduction,
+ working forwards from assumptions towards the conclusion. We give
+ both the Isar text, and depict the primitive rule involved, as
+ determined by unification of the problem against rules that are
+ declared in the library context.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\medskip\begin{minipage}{0.6\textwidth}
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}x\ {\isasymin}\ B{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ A\ {\isasyminter}\ B{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\end{minipage}\begin{minipage}{0.4\textwidth}
+%
+\begin{isamarkuptext}%
+\infer{\isa{{\isachardoublequote}x\ {\isasymin}\ A\ {\isasyminter}\ B{\isachardoublequote}}}{\isa{{\isachardoublequote}x\ {\isasymin}\ A{\isachardoublequote}} & \isa{{\isachardoublequote}x\ {\isasymin}\ B{\isachardoublequote}}}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\end{minipage}
+%
+\begin{isamarkuptext}%
+\medskip\noindent Note that \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}} augments the proof
+ context, \hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}} indicates that the current fact shall be
+ used in the next step, and \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}} states an intermediate
+ goal. The two dots ``\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}'' refer to a complete proof of
+ this claim, using the indicated facts and a canonical rule from the
+ context. We could have been more explicit here by spelling out the
+ final proof step via the \hyperlink{command.by}{\mbox{\isa{\isacommand{by}}}} command:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}x\ {\isasymin}\ B{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ A\ {\isasyminter}\ B{\isachardoublequoteclose}\ \isacommand{by}\isamarkupfalse%
+\ {\isacharparenleft}rule\ IntI{\isacharparenright}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent The format of the \isa{{\isachardoublequote}{\isasyminter}{\isachardoublequote}}-introduction rule represents
+ the most basic inference, which proceeds from given premises to a
+ conclusion, without any nested proof context involved.
+
+ The next example performs backwards introduction on \isa{{\isachardoublequote}{\isasymInter}{\isasymA}{\isachardoublequote}},
+ the intersection of all sets within a given set. This requires a
+ nested proof of set membership within a local context, where \isa{A} is an arbitrary-but-fixed member of the collection:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\medskip\begin{minipage}{0.6\textwidth}
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ {\isasymInter}{\isasymA}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymin}\ {\isasymA}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ \ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\end{minipage}\begin{minipage}{0.4\textwidth}
+%
+\begin{isamarkuptext}%
+\infer{\isa{{\isachardoublequote}x\ {\isasymin}\ {\isasymInter}{\isasymA}{\isachardoublequote}}}{\infer*{\isa{{\isachardoublequote}x\ {\isasymin}\ A{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isacharbrackleft}A{\isacharbrackright}{\isacharbrackleft}A\ {\isasymin}\ {\isasymA}{\isacharbrackright}{\isachardoublequote}}}}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\end{minipage}
+%
+\begin{isamarkuptext}%
+\medskip\noindent This Isar reasoning pattern again refers to the
+ primitive rule depicted above. The system determines it in the
+ ``\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}'' step, which could have been spelt out more
+ explicitly as ``\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}{\isacharparenleft}rule\ InterI{\isacharparenright}{\isachardoublequote}}''. Note
+ that the rule involves both a local parameter \isa{{\isachardoublequote}A{\isachardoublequote}} and an
+ assumption \isa{{\isachardoublequote}A\ {\isasymin}\ {\isasymA}{\isachardoublequote}} in the nested reasoning. This kind of
+ compound rule typically demands a genuine sub-proof in Isar, working
+ backwards rather than forwards as seen before. In the proof body we
+ encounter the \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}-\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}-\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}
+ outline of nested sub-proofs that is typical for Isar. The final
+ \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}} is like \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}} followed by an additional
+ refinement of the enclosing claim, using the rule derived from the
+ proof body.
+
+ \medskip The next example involves \isa{{\isachardoublequote}{\isasymUnion}{\isasymA}{\isachardoublequote}}, which can be
+ characterized as the set of all \isa{{\isachardoublequote}x{\isachardoublequote}} such that \isa{{\isachardoublequote}{\isasymexists}A{\isachardot}\ x\ {\isasymin}\ A\ {\isasymand}\ A\ {\isasymin}\ {\isasymA}{\isachardoublequote}}. The elimination rule for \isa{{\isachardoublequote}x\ {\isasymin}\ {\isasymUnion}{\isasymA}{\isachardoublequote}} does
+ not mention \isa{{\isachardoublequote}{\isasymexists}{\isachardoublequote}} and \isa{{\isachardoublequote}{\isasymand}{\isachardoublequote}} at all, but admits to obtain
+ directly a local \isa{{\isachardoublequote}A{\isachardoublequote}} such that \isa{{\isachardoublequote}x\ {\isasymin}\ A{\isachardoublequote}} and \isa{{\isachardoublequote}A\ {\isasymin}\ {\isasymA}{\isachardoublequote}} hold. This corresponds to the following Isar proof and
+ inference rule, respectively:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\medskip\begin{minipage}{0.6\textwidth}
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ {\isasymUnion}{\isasymA}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ C\isanewline
+\ \ \ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}A\ {\isasymin}\ {\isasymA}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \ \ \isacommand{show}\isamarkupfalse%
+\ C%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ \ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\end{minipage}\begin{minipage}{0.4\textwidth}
+%
+\begin{isamarkuptext}%
+\infer{\isa{{\isachardoublequote}C{\isachardoublequote}}}{\isa{{\isachardoublequote}x\ {\isasymin}\ {\isasymUnion}{\isasymA}{\isachardoublequote}} & \infer*{\isa{{\isachardoublequote}C{\isachardoublequote}}~}{\isa{{\isachardoublequote}{\isacharbrackleft}A{\isacharbrackright}{\isacharbrackleft}x\ {\isasymin}\ A{\isacharcomma}\ A\ {\isasymin}\ {\isasymA}{\isacharbrackright}{\isachardoublequote}}}}%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\end{minipage}
+%
+\begin{isamarkuptext}%
+\medskip\noindent Although the Isar proof follows the natural
+ deduction rule closely, the text reads not as natural as
+ anticipated. There is a double occurrence of an arbitrary
+ conclusion \isa{{\isachardoublequote}C{\isachardoublequote}}, which represents the final result, but is
+ irrelevant for now. This issue arises for any elimination rule
+ involving local parameters. Isar provides the derived language
+ element \hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}, which is able to perform the same
+ elimination proof more conveniently:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}x\ {\isasymin}\ {\isasymUnion}{\isasymA}{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{then}\isamarkupfalse%
+\ \isacommand{obtain}\isamarkupfalse%
+\ A\ \isakeyword{where}\ {\isachardoublequoteopen}x\ {\isasymin}\ A{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}A\ {\isasymin}\ {\isasymA}{\isachardoublequoteclose}\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent Here we avoid to mention the final conclusion \isa{{\isachardoublequote}C{\isachardoublequote}}
+ and return to plain forward reasoning. The rule involved in the
+ ``\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}'' proof is the same as before.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{The Pure framework \label{sec:framework-pure}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
+ fragment of higher-order logic \cite{church40}. In type-theoretic
+ parlance, there are three levels of \isa{{\isachardoublequote}{\isasymlambda}{\isachardoublequote}}-calculus with
+ corresponding arrows \isa{{\isachardoublequote}{\isasymRightarrow}{\isachardoublequote}}/\isa{{\isachardoublequote}{\isasymAnd}{\isachardoublequote}}/\isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}}:
+
+ \medskip
+ \begin{tabular}{ll}
+ \isa{{\isachardoublequote}{\isasymalpha}\ {\isasymRightarrow}\ {\isasymbeta}{\isachardoublequote}} & syntactic function space (terms depending on terms) \\
+ \isa{{\isachardoublequote}{\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}{\isachardoublequote}} & universal quantification (proofs depending on terms) \\
+ \isa{{\isachardoublequote}A\ {\isasymLongrightarrow}\ B{\isachardoublequote}} & implication (proofs depending on proofs) \\
+ \end{tabular}
+ \medskip
+
+ \noindent Here only the types of syntactic terms, and the
+ propositions of proof terms have been shown. The \isa{{\isachardoublequote}{\isasymlambda}{\isachardoublequote}}-structure of proofs can be recorded as an optional feature of
+ the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
+ the formal system can never depend on them due to \emph{proof
+ irrelevance}.
+
+ On top of this most primitive layer of proofs, Pure implements a
+ generic calculus for nested natural deduction rules, similar to
+ \cite{Schroeder-Heister:1984}. Here object-logic inferences are
+ internalized as formulae over \isa{{\isachardoublequote}{\isasymAnd}{\isachardoublequote}} and \isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}}.
+ Combining such rule statements may involve higher-order unification
+ \cite{paulson-natural}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Primitive inferences%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Term syntax provides explicit notation for abstraction \isa{{\isachardoublequote}{\isasymlambda}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ b{\isacharparenleft}x{\isacharparenright}{\isachardoublequote}} and application \isa{{\isachardoublequote}b\ a{\isachardoublequote}}, while types are usually
+ implicit thanks to type-inference; terms of type \isa{{\isachardoublequote}prop{\isachardoublequote}} are
+ called propositions. Logical statements are composed via \isa{{\isachardoublequote}{\isasymAnd}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}{\isachardoublequote}} and \isa{{\isachardoublequote}A\ {\isasymLongrightarrow}\ B{\isachardoublequote}}. Primitive reasoning operates on
+ judgments of the form \isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}{\isachardoublequote}}, with standard introduction
+ and elimination rules for \isa{{\isachardoublequote}{\isasymAnd}{\isachardoublequote}} and \isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}} that refer to
+ fixed parameters \isa{{\isachardoublequote}x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub m{\isachardoublequote}} and hypotheses
+ \isa{{\isachardoublequote}A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n{\isachardoublequote}} from the context \isa{{\isachardoublequote}{\isasymGamma}{\isachardoublequote}};
+ the corresponding proof terms are left implicit. The subsequent
+ inference rules define \isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}{\isachardoublequote}} inductively, relative to a
+ collection of axioms:
+
+ \[
+ \infer{\isa{{\isachardoublequote}{\isasymturnstile}\ A{\isachardoublequote}}}{(\isa{{\isachardoublequote}A{\isachardoublequote}} \text{~axiom})}
+ \qquad
+ \infer{\isa{{\isachardoublequote}A\ {\isasymturnstile}\ A{\isachardoublequote}}}{}
+ \]
+
+ \[
+ \infer{\isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ B{\isacharparenleft}x{\isacharparenright}{\isachardoublequote}} & \isa{{\isachardoublequote}x\ {\isasymnotin}\ {\isasymGamma}{\isachardoublequote}}}
+ \qquad
+ \infer{\isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ B{\isacharparenleft}a{\isacharparenright}{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}x{\isachardot}\ B{\isacharparenleft}x{\isacharparenright}{\isachardoublequote}}}
+ \]
+
+ \[
+ \infer{\isa{{\isachardoublequote}{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ B{\isachardoublequote}}}
+ \qquad
+ \infer{\isa{{\isachardoublequote}{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymunion}\ {\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ B{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymGamma}\isactrlsub {\isadigit{1}}\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymGamma}\isactrlsub {\isadigit{2}}\ {\isasymturnstile}\ A{\isachardoublequote}}}
+ \]
+
+ Furthermore, Pure provides a built-in equality \isa{{\isachardoublequote}{\isasymequiv}\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}\ {\isasymRightarrow}\ {\isasymalpha}\ {\isasymRightarrow}\ prop{\isachardoublequote}} with axioms for reflexivity, substitution, extensionality,
+ and \isa{{\isachardoublequote}{\isasymalpha}{\isasymbeta}{\isasymeta}{\isachardoublequote}}-conversion on \isa{{\isachardoublequote}{\isasymlambda}{\isachardoublequote}}-terms.
+
+ \medskip An object-logic introduces another layer on top of Pure,
+ e.g.\ with types \isa{{\isachardoublequote}i{\isachardoublequote}} for individuals and \isa{{\isachardoublequote}o{\isachardoublequote}} for
+ propositions, term constants \isa{{\isachardoublequote}Trueprop\ {\isacharcolon}{\isacharcolon}\ o\ {\isasymRightarrow}\ prop{\isachardoublequote}} as
+ (implicit) derivability judgment and connectives like \isa{{\isachardoublequote}{\isasymand}\ {\isacharcolon}{\isacharcolon}\ o\ {\isasymRightarrow}\ o\ {\isasymRightarrow}\ o{\isachardoublequote}} or \isa{{\isachardoublequote}{\isasymforall}\ {\isacharcolon}{\isacharcolon}\ {\isacharparenleft}i\ {\isasymRightarrow}\ o{\isacharparenright}\ {\isasymRightarrow}\ o{\isachardoublequote}}, and axioms for object-level
+ rules such as \isa{{\isachardoublequote}conjI{\isacharcolon}\ A\ {\isasymLongrightarrow}\ B\ {\isasymLongrightarrow}\ A\ {\isasymand}\ B{\isachardoublequote}} or \isa{{\isachardoublequote}allI{\isacharcolon}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}\ {\isasymLongrightarrow}\ {\isasymforall}x{\isachardot}\ B\ x{\isachardoublequote}}. Derived object rules are represented as theorems of
+ Pure. After the initial object-logic setup, further axiomatizations
+ are usually avoided; plain definitions and derived principles are
+ used exclusively.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Reasoning with rules \label{sec:framework-resolution}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Primitive inferences mostly serve foundational purposes. The main
+ reasoning mechanisms of Pure operate on nested natural deduction
+ rules expressed as formulae, using \isa{{\isachardoublequote}{\isasymAnd}{\isachardoublequote}} to bind local
+ parameters and \isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}} to express entailment. Multiple
+ parameters and premises are represented by repeating these
+ connectives in a right-associative manner.
+
+ Since \isa{{\isachardoublequote}{\isasymAnd}{\isachardoublequote}} and \isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}} commute thanks to the theorem
+ \isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymLongrightarrow}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ B\ x{\isacharparenright}{\isacharparenright}\ {\isasymequiv}\ {\isacharparenleft}{\isasymAnd}x{\isachardot}\ A\ {\isasymLongrightarrow}\ B\ x{\isacharparenright}{\isachardoublequote}}, we may assume w.l.o.g.\
+ that rule statements always observe the normal form where
+ quantifiers are pulled in front of implications at each level of
+ nesting. This means that any Pure proposition may be presented as a
+ \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
+ form \isa{{\isachardoublequote}{\isasymAnd}x\isactrlisub {\isadigit{1}}\ {\isasymdots}\ x\isactrlisub m{\isachardot}\ H\isactrlisub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ H\isactrlisub n\ {\isasymLongrightarrow}\ A{\isachardoublequote}} for \isa{{\isachardoublequote}m{\isacharcomma}\ n\ {\isasymge}\ {\isadigit{0}}{\isachardoublequote}}, and \isa{{\isachardoublequote}A{\isachardoublequote}} atomic, and \isa{{\isachardoublequote}H\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ H\isactrlisub n{\isachardoublequote}} being recursively of the same format.
+ Following the convention that outermost quantifiers are implicit,
+ Horn clauses \isa{{\isachardoublequote}A\isactrlisub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ A\isactrlisub n\ {\isasymLongrightarrow}\ A{\isachardoublequote}} are a special
+ case of this.
+
+ For example, \isa{{\isachardoublequote}{\isasyminter}{\isachardoublequote}}-introduction rule encountered before is
+ represented as a Pure theorem as follows:
+ \[
+ \isa{{\isachardoublequote}IntI{\isacharcolon}{\isachardoublequote}}~\isa{{\isachardoublequote}x\ {\isasymin}\ A\ {\isasymLongrightarrow}\ x\ {\isasymin}\ B\ {\isasymLongrightarrow}\ x\ {\isasymin}\ A\ {\isasyminter}\ B{\isachardoublequote}}
+ \]
+
+ \noindent This is a plain Horn clause, since no further nesting on
+ the left is involved. The general \isa{{\isachardoublequote}{\isasymInter}{\isachardoublequote}}-introduction
+ corresponds to a Hereditary Harrop Formula with one additional level
+ of nesting:
+ \[
+ \isa{{\isachardoublequote}InterI{\isacharcolon}{\isachardoublequote}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymAnd}A{\isachardot}\ A\ {\isasymin}\ {\isasymA}\ {\isasymLongrightarrow}\ x\ {\isasymin}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ x\ {\isasymin}\ {\isasymInter}{\isasymA}{\isachardoublequote}}
+ \]
+
+ \medskip Goals are also represented as rules: \isa{{\isachardoublequote}A\isactrlisub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ A\isactrlisub n\ {\isasymLongrightarrow}\ C{\isachardoublequote}} states that the sub-goals \isa{{\isachardoublequote}A\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ A\isactrlisub n{\isachardoublequote}} entail the result \isa{{\isachardoublequote}C{\isachardoublequote}}; for \isa{{\isachardoublequote}n\ {\isacharequal}\ {\isadigit{0}}{\isachardoublequote}} the
+ goal is finished. To allow \isa{{\isachardoublequote}C{\isachardoublequote}} being a rule statement
+ itself, we introduce the protective marker \isa{{\isachardoublequote}{\isacharhash}\ {\isacharcolon}{\isacharcolon}\ prop\ {\isasymRightarrow}\ prop{\isachardoublequote}}, which is defined as identity and hidden from the user. We
+ initialize and finish goal states as follows:
+
+ \[
+ \begin{array}{c@ {\qquad}c}
+ \infer[(\indexdef{}{inference}{init}\hypertarget{inference.init}{\hyperlink{inference.init}{\mbox{\isa{init}}}})]{\isa{{\isachardoublequote}C\ {\isasymLongrightarrow}\ {\isacharhash}C{\isachardoublequote}}}{} &
+ \infer[(\indexdef{}{inference}{finish}\hypertarget{inference.finish}{\hyperlink{inference.finish}{\mbox{\isa{finish}}}})]{\isa{C}}{\isa{{\isachardoublequote}{\isacharhash}C{\isachardoublequote}}}
+ \end{array}
+ \]
+
+ \noindent Goal states are refined in intermediate proof steps until
+ a finished form is achieved. Here the two main reasoning principles
+ are \hyperlink{inference.resolution}{\mbox{\isa{resolution}}}, for back-chaining a rule against a
+ sub-goal (replacing it by zero or more sub-goals), and \hyperlink{inference.assumption}{\mbox{\isa{assumption}}}, for solving a sub-goal (finding a short-circuit with
+ local assumptions). Below \isa{{\isachardoublequote}\isactrlvec x{\isachardoublequote}} stands for \isa{{\isachardoublequote}x\isactrlisub {\isadigit{1}}{\isacharcomma}\ {\isasymdots}{\isacharcomma}\ x\isactrlisub n{\isachardoublequote}} (\isa{{\isachardoublequote}n\ {\isasymge}\ {\isadigit{0}}{\isachardoublequote}}).
+
+ \[
+ \infer[(\indexdef{}{inference}{resolution}\hypertarget{inference.resolution}{\hyperlink{inference.resolution}{\mbox{\isa{resolution}}}})]
+ {\isa{{\isachardoublequote}{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ \isactrlvec A\ {\isacharparenleft}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}{\isachardoublequote}}}
+ {\begin{tabular}{rl}
+ \isa{{\isachardoublequote}rule{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}\isactrlvec A\ \isactrlvec a\ {\isasymLongrightarrow}\ B\ \isactrlvec a{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}goal{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}goal\ unifier{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isacharparenleft}{\isasymlambda}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}{\isachardoublequote}} \\
+ \end{tabular}}
+ \]
+
+ \medskip
+
+ \[
+ \infer[(\indexdef{}{inference}{assumption}\hypertarget{inference.assumption}{\hyperlink{inference.assumption}{\mbox{\isa{assumption}}}})]{\isa{{\isachardoublequote}C{\isasymvartheta}{\isachardoublequote}}}
+ {\begin{tabular}{rl}
+ \isa{{\isachardoublequote}goal{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ A\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}assm\ unifier{\isacharcolon}{\isachardoublequote}} & \isa{{\isachardoublequote}A{\isasymvartheta}\ {\isacharequal}\ H\isactrlsub i{\isasymvartheta}{\isachardoublequote}}~~\text{(for some~\isa{{\isachardoublequote}H\isactrlsub i{\isachardoublequote}})} \\
+ \end{tabular}}
+ \]
+
+ The following trace illustrates goal-oriented reasoning in
+ Isabelle/Pure:
+
+ {\footnotesize
+ \medskip
+ \begin{tabular}{r@ {\quad}l}
+ \isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B\ {\isasymand}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B\ {\isasymand}\ A{\isacharparenright}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}init{\isacharparenright}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isasymdots}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}resolution\ B\ {\isasymLongrightarrow}\ A\ {\isasymLongrightarrow}\ B\ {\isasymand}\ A{\isacharparenright}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ A\ {\isasymand}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isasymdots}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}resolution\ A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B{\isacharparenright}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isasymdots}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}assumption{\isacharparenright}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B\ {\isasymand}\ A{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isasymdots}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}resolution\ A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ A{\isacharparenright}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}{\isacharhash}{\isasymdots}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}assumption{\isacharparenright}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}A\ {\isasymand}\ B\ {\isasymLongrightarrow}\ B\ {\isasymand}\ A{\isachardoublequote}} & \isa{{\isachardoublequote}{\isacharparenleft}finish{\isacharparenright}{\isachardoublequote}} \\
+ \end{tabular}
+ \medskip
+ }
+
+ Compositions of \hyperlink{inference.assumption}{\mbox{\isa{assumption}}} after \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} occurs quite often, typically in elimination steps.
+ Traditional Isabelle tactics accommodate this by a combined
+ \indexdef{}{inference}{elim\_resolution}\hypertarget{inference.elim-resolution}{\hyperlink{inference.elim-resolution}{\mbox{\isa{elim{\isacharunderscore}resolution}}}} principle. In contrast, Isar uses
+ a slightly more refined combination, where the assumptions to be
+ closed are marked explicitly, using again the protective marker
+ \isa{{\isachardoublequote}{\isacharhash}{\isachardoublequote}}:
+
+ \[
+ \infer[(\hyperlink{inference.refinement}{\mbox{\isa{refinement}}})]
+ {\isa{{\isachardoublequote}{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ \isactrlvec G{\isacharprime}\ {\isacharparenleft}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isasymLongrightarrow}\ C{\isasymvartheta}{\isachardoublequote}}}
+ {\begin{tabular}{rl}
+ \isa{{\isachardoublequote}sub{\isasymdash}proof{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}\isactrlvec G\ \isactrlvec a\ {\isasymLongrightarrow}\ B\ \isactrlvec a{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}goal{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec H\ \isactrlvec x\ {\isasymLongrightarrow}\ B{\isacharprime}\ \isactrlvec x{\isacharparenright}\ {\isasymLongrightarrow}\ C{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}goal\ unifier{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isacharparenleft}{\isasymlambda}\isactrlvec x{\isachardot}\ B\ {\isacharparenleft}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isacharequal}\ B{\isacharprime}{\isasymvartheta}{\isachardoublequote}} \\
+ \isa{{\isachardoublequote}assm\ unifiers{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isacharparenleft}{\isasymlambda}\isactrlvec x{\isachardot}\ G\isactrlsub j\ {\isacharparenleft}\isactrlvec a\ \isactrlvec x{\isacharparenright}{\isacharparenright}{\isasymvartheta}\ {\isacharequal}\ {\isacharhash}H\isactrlsub i{\isasymvartheta}{\isachardoublequote}} \\
+ & \quad (for each marked \isa{{\isachardoublequote}G\isactrlsub j{\isachardoublequote}} some \isa{{\isachardoublequote}{\isacharhash}H\isactrlsub i{\isachardoublequote}}) \\
+ \end{tabular}}
+ \]
+
+ \noindent Here the \isa{{\isachardoublequote}sub{\isasymdash}proof{\isachardoublequote}} rule stems from the
+ main \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}-\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}-\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}} outline of
+ Isar (cf.\ \secref{sec:framework-subproof}): each assumption
+ indicated in the text results in a marked premise \isa{{\isachardoublequote}G{\isachardoublequote}} above.
+ The marking enforces resolution against one of the sub-goal's
+ premises. Consequently, \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}-\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}-\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}} enables to fit the result of a sub-proof quite robustly into a
+ pending sub-goal, while maintaining a good measure of flexibility.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsection{The Isar proof language \label{sec:framework-isar}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+Structured proofs are presented as high-level expressions for
+ composing entities of Pure (propositions, facts, and goals). The
+ Isar proof language allows to organize reasoning within the
+ underlying rule calculus of Pure, but Isar is not another logical
+ calculus!
+
+ Isar is an exercise in sound minimalism. Approximately half of the
+ language is introduced as primitive, the rest defined as derived
+ concepts. The following grammar describes the core language
+ (category \isa{{\isachardoublequote}proof{\isachardoublequote}}), which is embedded into theory
+ specification elements such as \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}; see also
+ \secref{sec:framework-stmt} for the separate category \isa{{\isachardoublequote}statement{\isachardoublequote}}.
+
+ \medskip
+ \begin{tabular}{rcl}
+ \isa{{\isachardoublequote}theory{\isasymdash}stmt{\isachardoublequote}} & = & \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}statement\ proof\ \ {\isacharbar}{\isachardoublequote}}~~\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isasymdots}\ \ {\isacharbar}\ \ {\isasymdots}{\isachardoublequote}} \\[1ex]
+
+ \isa{{\isachardoublequote}proof{\isachardoublequote}} & = & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}method\isactrlsup {\isacharquery}\ stmt\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}~\isa{{\isachardoublequote}method\isactrlsup {\isacharquery}{\isachardoublequote}} \\[1ex]
+
+ \isa{prfx} & = & \hyperlink{command.using}{\mbox{\isa{\isacommand{using}}}}~\isa{{\isachardoublequote}facts{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.unfolding}{\mbox{\isa{\isacommand{unfolding}}}}~\isa{{\isachardoublequote}facts{\isachardoublequote}} \\
+
+ \isa{stmt} & = & \hyperlink{command.braceleft}{\mbox{\isa{\isacommand{{\isacharbraceleft}}}}}~\isa{{\isachardoublequote}stmt\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.braceright}{\mbox{\isa{\isacommand{{\isacharbraceright}}}}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.next}{\mbox{\isa{\isacommand{next}}}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}}~\isa{{\isachardoublequote}name\ {\isacharequal}\ facts{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.let}{\mbox{\isa{\isacommand{let}}}}~\isa{{\isachardoublequote}term\ {\isacharequal}\ term{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{{\isachardoublequote}var\isactrlsup {\isacharplus}{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isasymguillemotleft}inference{\isasymguillemotright}\ name{\isacharcolon}\ props{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}}\isa{{\isachardoublequote}\isactrlsup {\isacharquery}{\isachardoublequote}}~\isa{goal} \\
+ \isa{goal} & = & \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}name{\isacharcolon}\ props\ proof{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}~\isa{{\isachardoublequote}name{\isacharcolon}\ props\ proof{\isachardoublequote}} \\
+ \end{tabular}
+
+ \medskip Simultaneous propositions or facts may be separated by the
+ \hyperlink{keyword.and}{\mbox{\isa{\isakeyword{and}}}} keyword.
+
+ \medskip The syntax for terms and propositions is inherited from
+ Pure (and the object-logic). A \isa{{\isachardoublequote}pattern{\isachardoublequote}} is a \isa{{\isachardoublequote}term{\isachardoublequote}} with schematic variables, to be bound by higher-order
+ matching.
+
+ \medskip Facts may be referenced by name or proposition. For
+ example, the result of ``\hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}a{\isacharcolon}\ A\ {\isasymlangle}proof{\isasymrangle}{\isachardoublequote}}''
+ becomes available both as \isa{{\isachardoublequote}a{\isachardoublequote}} and
+ \isacharbackquoteopen\isa{{\isachardoublequote}A{\isachardoublequote}}\isacharbackquoteclose. Moreover,
+ fact expressions may involve attributes that modify either the
+ theorem or the background context. For example, the expression
+ ``\isa{{\isachardoublequote}a\ {\isacharbrackleft}OF\ b{\isacharbrackright}{\isachardoublequote}}'' refers to the composition of two facts
+ according to the \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} inference of
+ \secref{sec:framework-resolution}, while ``\isa{{\isachardoublequote}a\ {\isacharbrackleft}intro{\isacharbrackright}{\isachardoublequote}}''
+ declares a fact as introduction rule in the context.
+
+ The special fact called ``\hyperlink{fact.this}{\mbox{\isa{this}}}'' always refers to the last
+ result, as produced by \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}}, \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}, \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}, or \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}. Since \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}} occurs
+ frequently together with \hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}} we provide some
+ abbreviations:
+
+ \medskip
+ \begin{tabular}{rcl}
+ \hyperlink{command.from}{\mbox{\isa{\isacommand{from}}}}~\isa{a} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}}~\isa{a}~\hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}} \\
+ \hyperlink{command.with}{\mbox{\isa{\isacommand{with}}}}~\isa{a} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \hyperlink{command.from}{\mbox{\isa{\isacommand{from}}}}~\isa{{\isachardoublequote}a\ {\isasymAND}\ this{\isachardoublequote}} \\
+ \end{tabular}
+ \medskip
+
+ The \isa{{\isachardoublequote}method{\isachardoublequote}} category is essentially a parameter and may be
+ populated later. Methods use the facts indicated by \hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}} or \hyperlink{command.using}{\mbox{\isa{\isacommand{using}}}}, and then operate on the goal state.
+ Some basic methods are predefined: ``\hyperlink{method.-}{\mbox{\isa{{\isacharminus}}}}'' leaves the goal
+ unchanged, ``\hyperlink{method.this}{\mbox{\isa{this}}}'' applies the facts as rules to the
+ goal, ``\hyperlink{method.rule}{\mbox{\isa{rule}}}'' applies the facts to another rule and the
+ result to the goal (both ``\hyperlink{method.this}{\mbox{\isa{this}}}'' and ``\hyperlink{method.rule}{\mbox{\isa{rule}}}''
+ refer to \hyperlink{inference.resolution}{\mbox{\isa{resolution}}} of
+ \secref{sec:framework-resolution}). The secondary arguments to
+ ``\hyperlink{method.rule}{\mbox{\isa{rule}}}'' may be specified explicitly as in ``\isa{{\isachardoublequote}{\isacharparenleft}rule\ a{\isacharparenright}{\isachardoublequote}}'', or picked from the context. In the latter case, the system
+ first tries rules declared as \hyperlink{attribute.Pure.elim}{\mbox{\isa{elim}}} or
+ \hyperlink{attribute.Pure.dest}{\mbox{\isa{dest}}}, followed by those declared as \hyperlink{attribute.Pure.intro}{\mbox{\isa{intro}}}.
+
+ The default method for \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}} is ``\hyperlink{method.rule}{\mbox{\isa{rule}}}''
+ (arguments picked from the context), for \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}} it is
+ ``\hyperlink{method.-}{\mbox{\isa{{\isacharminus}}}}''. Further abbreviations for terminal proof steps
+ are ``\hyperlink{command.by}{\mbox{\isa{\isacommand{by}}}}~\isa{{\isachardoublequote}method\isactrlsub {\isadigit{1}}\ method\isactrlsub {\isadigit{2}}{\isachardoublequote}}'' for
+ ``\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}method\isactrlsub {\isadigit{1}}{\isachardoublequote}}~\hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}~\isa{{\isachardoublequote}method\isactrlsub {\isadigit{2}}{\isachardoublequote}}'', and ``\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}'' for ``\hyperlink{command.by}{\mbox{\isa{\isacommand{by}}}}~\hyperlink{method.rule}{\mbox{\isa{rule}}}, and ``\hyperlink{command.dot}{\mbox{\isa{\isacommand{{\isachardot}}}}}'' for ``\hyperlink{command.by}{\mbox{\isa{\isacommand{by}}}}~\hyperlink{method.this}{\mbox{\isa{this}}}''. The \hyperlink{command.unfolding}{\mbox{\isa{\isacommand{unfolding}}}} element operates
+ directly on the current facts and goal by applying equalities.
+
+ \medskip Block structure can be indicated explicitly by ``\hyperlink{command.braceleft}{\mbox{\isa{\isacommand{{\isacharbraceleft}}}}}~\isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}}~\hyperlink{command.braceright}{\mbox{\isa{\isacommand{{\isacharbraceright}}}}}'', although the body of a sub-proof
+ already involves implicit nesting. In any case, \hyperlink{command.next}{\mbox{\isa{\isacommand{next}}}}
+ jumps into the next section of a block, i.e.\ it acts like closing
+ an implicit block scope and opening another one; there is no direct
+ correspondence to subgoals here.
+
+ The remaining elements \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}} and \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}} build up
+ a local context (see \secref{sec:framework-context}), while
+ \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}} refines a pending sub-goal by the rule resulting
+ from a nested sub-proof (see \secref{sec:framework-subproof}).
+ Further derived concepts will support calculational reasoning (see
+ \secref{sec:framework-calc}).%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Context elements \label{sec:framework-context}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+In judgments \isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ {\isasymphi}{\isachardoublequote}} of the primitive framework, \isa{{\isachardoublequote}{\isasymGamma}{\isachardoublequote}}
+ essentially acts like a proof context. Isar elaborates this idea
+ towards a higher-level notion, with additional information for
+ type-inference, term abbreviations, local facts, hypotheses etc.
+
+ The element \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{{\isachardoublequote}x\ {\isacharcolon}{\isacharcolon}\ {\isasymalpha}{\isachardoublequote}} declares a local
+ parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
+ results exported from the context, \isa{{\isachardoublequote}x{\isachardoublequote}} may become anything.
+ The \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isasymguillemotleft}inference{\isasymguillemotright}{\isachardoublequote}} element provides a
+ general interface to hypotheses: ``\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isasymguillemotleft}inference{\isasymguillemotright}\ A{\isachardoublequote}}'' produces \isa{{\isachardoublequote}A\ {\isasymturnstile}\ A{\isachardoublequote}} locally, while the
+ included inference tells how to discharge \isa{A} from results
+ \isa{{\isachardoublequote}A\ {\isasymturnstile}\ B{\isachardoublequote}} later on. There is no user-syntax for \isa{{\isachardoublequote}{\isasymguillemotleft}inference{\isasymguillemotright}{\isachardoublequote}}, i.e.\ it may only occur internally when derived
+ commands are defined in ML.
+
+ At the user-level, the default inference for \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}} is
+ \hyperlink{inference.discharge}{\mbox{\isa{discharge}}} as given below. The additional variants
+ \hyperlink{command.presume}{\mbox{\isa{\isacommand{presume}}}} and \hyperlink{command.def}{\mbox{\isa{\isacommand{def}}}} are defined as follows:
+
+ \medskip
+ \begin{tabular}{rcl}
+ \hyperlink{command.presume}{\mbox{\isa{\isacommand{presume}}}}~\isa{A} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isasymguillemotleft}weak{\isasymdash}discharge{\isasymguillemotright}\ A{\isachardoublequote}} \\
+ \hyperlink{command.def}{\mbox{\isa{\isacommand{def}}}}~\isa{{\isachardoublequote}x\ {\isasymequiv}\ a{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{x}~\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isasymguillemotleft}expansion{\isasymguillemotright}\ x\ {\isasymequiv}\ a{\isachardoublequote}} \\
+ \end{tabular}
+ \medskip
+
+ \[
+ \infer[(\indexdef{}{inference}{discharge}\hypertarget{inference.discharge}{\hyperlink{inference.discharge}{\mbox{\isa{discharge}}}})]{\isa{{\isachardoublequote}{\isasymstrut}{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ {\isacharhash}A\ {\isasymLongrightarrow}\ B{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymstrut}{\isasymGamma}\ {\isasymturnstile}\ B{\isachardoublequote}}}
+ \]
+ \[
+ \infer[(\indexdef{}{inference}{weak-discharge}\hypertarget{inference.weak-discharge}{\hyperlink{inference.weak-discharge}{\mbox{\isa{weak{\isasymdash}discharge}}}})]{\isa{{\isachardoublequote}{\isasymstrut}{\isasymGamma}\ {\isacharminus}\ A\ {\isasymturnstile}\ A\ {\isasymLongrightarrow}\ B{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymstrut}{\isasymGamma}\ {\isasymturnstile}\ B{\isachardoublequote}}}
+ \]
+ \[
+ \infer[(\indexdef{}{inference}{expansion}\hypertarget{inference.expansion}{\hyperlink{inference.expansion}{\mbox{\isa{expansion}}}})]{\isa{{\isachardoublequote}{\isasymstrut}{\isasymGamma}\ {\isacharminus}\ {\isacharparenleft}x\ {\isasymequiv}\ a{\isacharparenright}\ {\isasymturnstile}\ B\ a{\isachardoublequote}}}{\isa{{\isachardoublequote}{\isasymstrut}{\isasymGamma}\ {\isasymturnstile}\ B\ x{\isachardoublequote}}}
+ \]
+
+ \medskip Note that \hyperlink{inference.discharge}{\mbox{\isa{discharge}}} and \hyperlink{inference.weak-discharge}{\mbox{\isa{weak{\isasymdash}discharge}}} differ in the marker for \isa{A}, which is
+ relevant when the result of a \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}-\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}-\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}} outline is composed with a pending goal,
+ cf.\ \secref{sec:framework-subproof}.
+
+ The most interesting derived context element in Isar is \hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
+ elimination steps in a purely forward manner. The \hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}
+ command takes a specification of parameters \isa{{\isachardoublequote}\isactrlvec x{\isachardoublequote}} and
+ assumptions \isa{{\isachardoublequote}\isactrlvec A{\isachardoublequote}} to be added to the context, together
+ with a proof of a case rule stating that this extension is
+ conservative (i.e.\ may be removed from closed results later on):
+
+ \medskip
+ \begin{tabular}{l}
+ \isa{{\isachardoublequote}{\isasymlangle}facts{\isasymrangle}{\isachardoublequote}}~~\hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}~\isa{{\isachardoublequote}\isactrlvec x\ {\isasymWHERE}\ \isactrlvec A\ \isactrlvec x\ \ {\isasymlangle}proof{\isasymrangle}\ {\isasymequiv}{\isachardoublequote}} \\[0.5ex]
+ \quad \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}case{\isacharcolon}\ {\isasymAnd}thesis{\isachardot}\ {\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ \isactrlvec x\ {\isasymLongrightarrow}\ thesis{\isacharparenright}\ {\isasymLongrightarrow}\ thesis{\isasymrangle}{\isachardoublequote}} \\
+ \quad \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\hyperlink{method.-}{\mbox{\isa{{\isacharminus}}}} \\
+ \qquad \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{thesis} \\
+ \qquad \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ \isactrlvec x\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} \\
+ \qquad \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}~\isa{thesis}~\hyperlink{command.using}{\mbox{\isa{\isacommand{using}}}}~\isa{{\isachardoublequote}{\isasymlangle}facts{\isasymrangle}\ {\isasymlangle}proof{\isasymrangle}{\isachardoublequote}} \\
+ \quad \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}} \\
+ \quad \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{{\isachardoublequote}\isactrlvec x{\isachardoublequote}}~\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}{\isasymguillemotleft}elimination\ case{\isasymguillemotright}\ \isactrlvec A\ \isactrlvec x{\isachardoublequote}} \\
+ \end{tabular}
+ \medskip
+
+ \[
+ \infer[(\hyperlink{inference.elimination}{\mbox{\isa{elimination}}})]{\isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ B{\isachardoublequote}}}{
+ \begin{tabular}{rl}
+ \isa{{\isachardoublequote}case{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isasymGamma}\ {\isasymturnstile}\ {\isasymAnd}thesis{\isachardot}\ {\isacharparenleft}{\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ \isactrlvec x\ {\isasymLongrightarrow}\ thesis{\isacharparenright}\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} \\[0.2ex]
+ \isa{{\isachardoublequote}result{\isacharcolon}{\isachardoublequote}} &
+ \isa{{\isachardoublequote}{\isasymGamma}\ {\isasymunion}\ \isactrlvec A\ \isactrlvec y\ {\isasymturnstile}\ B{\isachardoublequote}} \\[0.2ex]
+ \end{tabular}}
+ \]
+
+ \noindent Here the name ``\isa{thesis}'' is a specific convention
+ for an arbitrary-but-fixed proposition; in the primitive natural
+ deduction rules shown before we have occasionally used \isa{C}.
+ The whole statement of ``\hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}~\isa{x}~\hyperlink{keyword.where}{\mbox{\isa{\isakeyword{where}}}}~\isa{{\isachardoublequote}A\ x{\isachardoublequote}}'' may be read as a claim that \isa{{\isachardoublequote}A\ x{\isachardoublequote}}
+ may be assumed for some arbitrary-but-fixed \isa{{\isachardoublequote}x{\isachardoublequote}}. Also note
+ that ``\hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}~\isa{{\isachardoublequote}A\ {\isasymAND}\ B{\isachardoublequote}}'' without parameters
+ is similar to ``\hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}A\ {\isasymAND}\ B{\isachardoublequote}}'', but the
+ latter involves multiple sub-goals.
+
+ \medskip The subsequent Isar proof texts explain all context
+ elements introduced above using the formal proof language itself.
+ After finishing a local proof within a block, we indicate the
+ exported result via \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}}.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+%
+\begin{minipage}[t]{0.4\textwidth}
+\ \ \isacommand{{\isacharbraceleft}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ x\isanewline
+\ \ \ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ x{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{{\isacharbraceright}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{note}\isamarkupfalse%
+\ {\isacharbackquoteopen}{\isasymAnd}x{\isachardot}\ B\ x{\isacharbackquoteclose}%
+\end{minipage}\quad\begin{minipage}[t]{0.4\textwidth}
+\ \ \isacommand{{\isacharbraceleft}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \isacommand{have}\isamarkupfalse%
+\ B%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{{\isacharbraceright}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{note}\isamarkupfalse%
+\ {\isacharbackquoteopen}A\ {\isasymLongrightarrow}\ B{\isacharbackquoteclose}%
+\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}
+\ \ \isacommand{{\isacharbraceleft}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{def}\isamarkupfalse%
+\ x\ {\isasymequiv}\ a\isanewline
+\ \ \ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ x{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{{\isacharbraceright}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{note}\isamarkupfalse%
+\ {\isacharbackquoteopen}B\ a{\isacharbackquoteclose}%
+\end{minipage}\quad\begin{minipage}[t]{0.4\textwidth}
+\ \ \isacommand{{\isacharbraceleft}}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{obtain}\isamarkupfalse%
+\ x\ \isakeyword{where}\ {\isachardoublequoteopen}A\ x{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ \ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{have}\isamarkupfalse%
+\ B%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{{\isacharbraceright}}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{note}\isamarkupfalse%
+\ {\isacharbackquoteopen}B{\isacharbackquoteclose}%
+\end{minipage}
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\bigskip\noindent This illustrates the meaning of Isar context
+ elements without goals getting in between.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Structured statements \label{sec:framework-stmt}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The category \isa{{\isachardoublequote}statement{\isachardoublequote}} of top-level theorem specifications
+ is defined as follows:
+
+ \medskip
+ \begin{tabular}{rcl}
+ \isa{{\isachardoublequote}statement{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \isa{{\isachardoublequote}name{\isacharcolon}\ props\ {\isasymAND}\ {\isasymdots}{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}context\isactrlsup {\isacharasterisk}\ conclusion{\isachardoublequote}} \\[0.5ex]
+
+ \isa{{\isachardoublequote}context{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymFIXES}\ vars\ {\isasymAND}\ {\isasymdots}{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymASSUMES}\ name{\isacharcolon}\ props\ {\isasymAND}\ {\isasymdots}{\isachardoublequote}} \\
+
+ \isa{{\isachardoublequote}conclusion{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymequiv}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymSHOWS}\ name{\isacharcolon}\ props\ {\isasymAND}\ {\isasymdots}{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}{\isasymOBTAINS}\ vars\ {\isasymAND}\ {\isasymdots}\ {\isasymWHERE}\ name{\isacharcolon}\ props\ {\isasymAND}\ {\isasymdots}{\isachardoublequote}} \\
+ & & \quad \isa{{\isachardoublequote}{\isasymBBAR}\ {\isasymdots}{\isachardoublequote}} \\
+ \end{tabular}
+
+ \medskip\noindent A simple \isa{{\isachardoublequote}statement{\isachardoublequote}} consists of named
+ propositions. The full form admits local context elements followed
+ by the actual conclusions, such as ``\hyperlink{keyword.fixes}{\mbox{\isa{\isakeyword{fixes}}}}~\isa{x}~\hyperlink{keyword.assumes}{\mbox{\isa{\isakeyword{assumes}}}}~\isa{{\isachardoublequote}A\ x{\isachardoublequote}}~\hyperlink{keyword.shows}{\mbox{\isa{\isakeyword{shows}}}}~\isa{{\isachardoublequote}B\ x{\isachardoublequote}}''. The final result emerges as a Pure rule after discharging
+ the context: \isa{{\isachardoublequote}{\isasymAnd}x{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ x{\isachardoublequote}}.
+
+ The \hyperlink{keyword.obtains}{\mbox{\isa{\isakeyword{obtains}}}} variant is another abbreviation defined
+ below; unlike \hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}} (cf.\
+ \secref{sec:framework-context}) there may be several ``cases''
+ separated by ``\isa{{\isachardoublequote}{\isasymBBAR}{\isachardoublequote}}'', each consisting of several
+ parameters (\isa{{\isachardoublequote}vars{\isachardoublequote}}) and several premises (\isa{{\isachardoublequote}props{\isachardoublequote}}).
+ This specifies multi-branch elimination rules.
+
+ \medskip
+ \begin{tabular}{l}
+ \isa{{\isachardoublequote}{\isasymOBTAINS}\ \isactrlvec x\ {\isasymWHERE}\ \isactrlvec A\ \isactrlvec x\ \ \ {\isasymBBAR}\ \ \ {\isasymdots}\ \ \ {\isasymequiv}{\isachardoublequote}} \\[0.5ex]
+ \quad \isa{{\isachardoublequote}{\isasymFIXES}\ thesis{\isachardoublequote}} \\
+ \quad \isa{{\isachardoublequote}{\isasymASSUMES}\ {\isacharbrackleft}intro{\isacharbrackright}{\isacharcolon}\ {\isasymAnd}\isactrlvec x{\isachardot}\ \isactrlvec A\ \isactrlvec x\ {\isasymLongrightarrow}\ thesis\ \ {\isasymAND}\ \ {\isasymdots}{\isachardoublequote}} \\
+ \quad \isa{{\isachardoublequote}{\isasymSHOWS}\ thesis{\isachardoublequote}} \\
+ \end{tabular}
+ \medskip
+
+ Presenting structured statements in such an ``open'' format usually
+ simplifies the subsequent proof, because the outer structure of the
+ problem is already laid out directly. E.g.\ consider the following
+ canonical patterns for \isa{{\isachardoublequote}{\isasymSHOWS}{\isachardoublequote}} and \isa{{\isachardoublequote}{\isasymOBTAINS}{\isachardoublequote}},
+ respectively:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\begin{minipage}{0.5\textwidth}
+\isacommand{theorem}\isamarkupfalse%
+\isanewline
+\ \ \isakeyword{fixes}\ x\ \isakeyword{and}\ y\isanewline
+\ \ \isakeyword{assumes}\ {\isachardoublequoteopen}A\ x{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+\ \ \isakeyword{shows}\ {\isachardoublequoteopen}C\ x\ y{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{from}\isamarkupfalse%
+\ {\isacharbackquoteopen}A\ x{\isacharbackquoteclose}\ \isakeyword{and}\ {\isacharbackquoteopen}B\ y{\isacharbackquoteclose}\isanewline
+\ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ x\ y{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\end{minipage}\begin{minipage}{0.5\textwidth}
+\isacommand{theorem}\isamarkupfalse%
+\isanewline
+\ \ \isakeyword{obtains}\ x\ \isakeyword{and}\ y\isanewline
+\ \ \isakeyword{where}\ {\isachardoublequoteopen}A\ x{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ a{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}B\ b{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{then}\isamarkupfalse%
+\ \isacommand{show}\isamarkupfalse%
+\ thesis\ \isacommand{{\isachardot}{\isachardot}}\isamarkupfalse%
+\isanewline
+\isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\end{minipage}
+%
+\begin{isamarkuptext}%
+\medskip\noindent Here local facts \isacharbackquoteopen\isa{{\isachardoublequote}A\ x{\isachardoublequote}}\isacharbackquoteclose\ and \isacharbackquoteopen\isa{{\isachardoublequote}B\ y{\isachardoublequote}}\isacharbackquoteclose\ are referenced immediately; there is no
+ need to decompose the logical rule structure again. In the second
+ proof the final ``\hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}}~\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}~\isa{thesis}~\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}'' involves the local rule case \isa{{\isachardoublequote}{\isasymAnd}x\ y{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ y\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} for the particular instance of terms \isa{{\isachardoublequote}a{\isachardoublequote}} and \isa{{\isachardoublequote}b{\isachardoublequote}} produced in the body.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Structured proof refinement \label{sec:framework-subproof}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+By breaking up the grammar for the Isar proof language, we may
+ understand a proof text as a linear sequence of individual proof
+ commands. These are interpreted as transitions of the Isar virtual
+ machine (Isar/VM), which operates on a block-structured
+ configuration in single steps. This allows users to write proof
+ texts in an incremental manner, and inspect intermediate
+ configurations for debugging.
+
+ The basic idea is analogous to evaluating algebraic expressions on a
+ stack machine: \isa{{\isachardoublequote}{\isacharparenleft}a\ {\isacharplus}\ b{\isacharparenright}\ {\isasymcdot}\ c{\isachardoublequote}} then corresponds to a sequence
+ of single transitions for each symbol \isa{{\isachardoublequote}{\isacharparenleft}{\isacharcomma}\ a{\isacharcomma}\ {\isacharplus}{\isacharcomma}\ b{\isacharcomma}\ {\isacharparenright}{\isacharcomma}\ {\isasymcdot}{\isacharcomma}\ c{\isachardoublequote}}.
+ In Isar the algebraic values are facts or goals, and the operations
+ are inferences.
+
+ \medskip The Isar/VM state maintains a stack of nodes, each node
+ contains the local proof context, the linguistic mode, and a pending
+ goal (optional). The mode determines the type of transition that
+ may be performed next, it essentially alternates between forward and
+ backward reasoning, with an intermediate stage for chained facts
+ (see \figref{fig:isar-vm}).
+
+ \begin{figure}[htb]
+ \begin{center}
+ \includegraphics[width=0.8\textwidth]{Thy/document/isar-vm}
+ \end{center}
+ \caption{Isar/VM modes}\label{fig:isar-vm}
+ \end{figure}
+
+ For example, in \isa{{\isachardoublequote}state{\isachardoublequote}} mode Isar acts like a mathematical
+ scratch-pad, accepting declarations like \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}, \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}, and claims like \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}, \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}. A goal
+ statement changes the mode to \isa{{\isachardoublequote}prove{\isachardoublequote}}, which means that we
+ may now refine the problem via \hyperlink{command.unfolding}{\mbox{\isa{\isacommand{unfolding}}}} or \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}. Then we are again in \isa{{\isachardoublequote}state{\isachardoublequote}} mode of a proof body,
+ which may issue \hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}} statements to solve pending
+ sub-goals. A concluding \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}} will return to the original
+ \isa{{\isachardoublequote}state{\isachardoublequote}} mode one level upwards. The subsequent Isar/VM
+ trace indicates block structure, linguistic mode, goal state, and
+ inferences:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\begingroup\footnotesize
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+%
+\begin{minipage}[t]{0.18\textwidth}
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ {\isasymlongrightarrow}\ B{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ A\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ B%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+\isanewline
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ \ \ \ \ \ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\quad
+\begin{minipage}[t]{0.06\textwidth}
+\isa{{\isachardoublequote}begin{\isachardoublequote}} \\
+\\
+\\
+\isa{{\isachardoublequote}begin{\isachardoublequote}} \\
+\isa{{\isachardoublequote}end{\isachardoublequote}} \\
+\isa{{\isachardoublequote}end{\isachardoublequote}} \\
+\end{minipage}
+\begin{minipage}[t]{0.08\textwidth}
+\isa{{\isachardoublequote}prove{\isachardoublequote}} \\
+\isa{{\isachardoublequote}state{\isachardoublequote}} \\
+\isa{{\isachardoublequote}state{\isachardoublequote}} \\
+\isa{{\isachardoublequote}prove{\isachardoublequote}} \\
+\isa{{\isachardoublequote}state{\isachardoublequote}} \\
+\isa{{\isachardoublequote}state{\isachardoublequote}} \\
+\end{minipage}\begin{minipage}[t]{0.35\textwidth}
+\isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymlongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isacharparenleft}A\ {\isasymlongrightarrow}\ B{\isacharparenright}{\isachardoublequote}} \\
+\isa{{\isachardoublequote}{\isacharparenleft}A\ {\isasymLongrightarrow}\ B{\isacharparenright}\ {\isasymLongrightarrow}\ {\isacharhash}{\isacharparenleft}A\ {\isasymlongrightarrow}\ B{\isacharparenright}{\isachardoublequote}} \\
+\\
+\\
+\isa{{\isachardoublequote}{\isacharhash}{\isacharparenleft}A\ {\isasymlongrightarrow}\ B{\isacharparenright}{\isachardoublequote}} \\
+\isa{{\isachardoublequote}A\ {\isasymlongrightarrow}\ B{\isachardoublequote}} \\
+\end{minipage}\begin{minipage}[t]{0.4\textwidth}
+\isa{{\isachardoublequote}{\isacharparenleft}init{\isacharparenright}{\isachardoublequote}} \\
+\isa{{\isachardoublequote}{\isacharparenleft}resolution\ impI{\isacharparenright}{\isachardoublequote}} \\
+\\
+\\
+\isa{{\isachardoublequote}{\isacharparenleft}refinement\ {\isacharhash}A\ {\isasymLongrightarrow}\ B{\isacharparenright}{\isachardoublequote}} \\
+\isa{{\isachardoublequote}{\isacharparenleft}finish{\isacharparenright}{\isachardoublequote}} \\
+\end{minipage}
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\endgroup
+%
+\begin{isamarkuptext}%
+\noindent Here the \hyperlink{inference.refinement}{\mbox{\isa{refinement}}} inference from
+ \secref{sec:framework-resolution} mediates composition of Isar
+ sub-proofs nicely. Observe that this principle incorporates some
+ degree of freedom in proof composition. In particular, the proof
+ body allows parameters and assumptions to be re-ordered, or commuted
+ according to Hereditary Harrop Form. Moreover, context elements
+ that are not used in a sub-proof may be omitted altogether. For
+ example:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\begin{minipage}{0.5\textwidth}
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymAnd}x\ y{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ y\ {\isasymLongrightarrow}\ C\ x\ y{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ x\ \isakeyword{and}\ y\isanewline
+\ \ \ \ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ x{\isachardoublequoteclose}\ \isakeyword{and}\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ x\ y{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\begin{minipage}{0.5\textwidth}
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymAnd}x\ y{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ y\ {\isasymLongrightarrow}\ C\ x\ y{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ x\ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ x{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ y\ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ x\ y{\isachardoublequoteclose}%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isadelimnoproof
+\ %
+\endisadelimnoproof
+%
+\isatagnoproof
+\isacommand{sorry}\isamarkupfalse%
+%
+\endisatagnoproof
+{\isafoldnoproof}%
+%
+\isadelimnoproof
+\isanewline
+%
+\endisadelimnoproof
+%
+\isadelimproof
+\ \ %
+\endisadelimproof
+%
+\isatagproof
+\isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymAnd}x\ y{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ y\ {\isasymLongrightarrow}\ C\ x\ y{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ y\ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ x\ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}A\ x{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ x\ y{\isachardoublequoteclose}\ \isacommand{sorry}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{qed}\isamarkupfalse%
+%
+\end{minipage}\begin{minipage}{0.5\textwidth}
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymAnd}x\ y{\isachardot}\ A\ x\ {\isasymLongrightarrow}\ B\ y\ {\isasymLongrightarrow}\ C\ x\ y{\isachardoublequoteclose}\isanewline
+\ \ \isacommand{proof}\isamarkupfalse%
+\ {\isacharminus}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ y\ \isacommand{assume}\isamarkupfalse%
+\ {\isachardoublequoteopen}B\ y{\isachardoublequoteclose}\isanewline
+\ \ \ \ \isacommand{fix}\isamarkupfalse%
+\ x\isanewline
+\ \ \ \ \isacommand{show}\isamarkupfalse%
+\ {\isachardoublequoteopen}C\ x\ y{\isachardoublequoteclose}\ \isacommand{sorry}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{qed}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\end{minipage}
+%
+\begin{isamarkuptext}%
+\medskip\noindent Such ``peephole optimizations'' of Isar texts are
+ practically important to improve readability, by rearranging
+ contexts elements according to the natural flow of reasoning in the
+ body, while still observing the overall scoping rules.
+
+ \medskip This illustrates the basic idea of structured proof
+ processing in Isar. The main mechanisms are based on natural
+ deduction rule composition within the Pure framework. In
+ particular, there are no direct operations on goal states within the
+ proof body. Moreover, there is no hidden automated reasoning
+ involved, just plain unification.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isamarkupsubsection{Calculational reasoning \label{sec:framework-calc}%
+}
+\isamarkuptrue%
+%
+\begin{isamarkuptext}%
+The existing Isar infrastructure is sufficiently flexible to support
+ calculational reasoning (chains of transitivity steps) as derived
+ concept. The generic proof elements introduced below depend on
+ rules declared as \hyperlink{attribute.trans}{\mbox{\isa{trans}}} in the context. It is left to
+ the object-logic to provide a suitable rule collection for mixed
+ relations of \isa{{\isachardoublequote}{\isacharequal}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isacharless}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymle}{\isachardoublequote}}, \isa{{\isachardoublequote}{\isasymsubset}{\isachardoublequote}},
+ \isa{{\isachardoublequote}{\isasymsubseteq}{\isachardoublequote}} etc. Due to the flexibility of rule composition
+ (\secref{sec:framework-resolution}), substitution of equals by
+ equals is covered as well, even substitution of inequalities
+ involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
+ and \cite{Bauer-Wenzel:2001}.
+
+ The generic calculational mechanism is based on the observation that
+ rules such as \isa{{\isachardoublequote}trans{\isacharcolon}{\isachardoublequote}}~\isa{{\isachardoublequote}x\ {\isacharequal}\ y\ {\isasymLongrightarrow}\ y\ {\isacharequal}\ z\ {\isasymLongrightarrow}\ x\ {\isacharequal}\ z{\isachardoublequote}}
+ proceed from the premises towards the conclusion in a deterministic
+ fashion. Thus we may reason in forward mode, feeding intermediate
+ results into rules selected from the context. The course of
+ reasoning is organized by maintaining a secondary fact called
+ ``\hyperlink{fact.calculation}{\mbox{\isa{calculation}}}'', apart from the primary ``\hyperlink{fact.this}{\mbox{\isa{this}}}''
+ already provided by the Isar primitives. In the definitions below,
+ \hyperlink{attribute.OF}{\mbox{\isa{OF}}} refers to \hyperlink{inference.resolution}{\mbox{\isa{resolution}}}
+ (\secref{sec:framework-resolution}) with multiple rule arguments,
+ and \isa{{\isachardoublequote}trans{\isachardoublequote}} represents to a suitable rule from the context:
+
+ \begin{matharray}{rcl}
+ \hyperlink{command.also}{\mbox{\isa{\isacommand{also}}}}\isa{{\isachardoublequote}\isactrlsub {\isadigit{0}}{\isachardoublequote}} & \equiv & \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}}~\isa{{\isachardoublequote}calculation\ {\isacharequal}\ this{\isachardoublequote}} \\
+ \hyperlink{command.also}{\mbox{\isa{\isacommand{also}}}}\isa{{\isachardoublequote}\isactrlsub n\isactrlsub {\isacharplus}\isactrlsub {\isadigit{1}}{\isachardoublequote}} & \equiv & \hyperlink{command.note}{\mbox{\isa{\isacommand{note}}}}~\isa{{\isachardoublequote}calculation\ {\isacharequal}\ trans\ {\isacharbrackleft}OF\ calculation\ this{\isacharbrackright}{\isachardoublequote}} \\[0.5ex]
+ \hyperlink{command.finally}{\mbox{\isa{\isacommand{finally}}}} & \equiv & \hyperlink{command.also}{\mbox{\isa{\isacommand{also}}}}~\hyperlink{command.from}{\mbox{\isa{\isacommand{from}}}}~\isa{calculation} \\
+ \end{matharray}
+
+ \noindent The start of a calculation is determined implicitly in the
+ text: here \hyperlink{command.also}{\mbox{\isa{\isacommand{also}}}} sets \hyperlink{fact.calculation}{\mbox{\isa{calculation}}} to the current
+ result; any subsequent occurrence will update \hyperlink{fact.calculation}{\mbox{\isa{calculation}}} by
+ combination with the next result and a transitivity rule. The
+ calculational sequence is concluded via \hyperlink{command.finally}{\mbox{\isa{\isacommand{finally}}}}, where
+ the final result is exposed for use in a concluding claim.
+
+ Here is a canonical proof pattern, using \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}} to
+ establish the intermediate results:%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\isatagproof
+\ \ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}a\ {\isacharequal}\ b{\isachardoublequoteclose}\ \isacommand{sorry}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isacharequal}\ c{\isachardoublequoteclose}\ \isacommand{sorry}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{also}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}{\isasymdots}\ {\isacharequal}\ d{\isachardoublequoteclose}\ \isacommand{sorry}\isamarkupfalse%
+\isanewline
+\ \ \isacommand{finally}\isamarkupfalse%
+\ \isacommand{have}\isamarkupfalse%
+\ {\isachardoublequoteopen}a\ {\isacharequal}\ d{\isachardoublequoteclose}\ \isacommand{{\isachardot}}\isamarkupfalse%
+%
+\endisatagproof
+{\isafoldproof}%
+%
+\isadelimproof
+%
+\endisadelimproof
+%
+\begin{isamarkuptext}%
+\noindent The term ``\isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}}'' above is a special abbreviation
+ provided by the Isabelle/Isar syntax layer: it statically refers to
+ the right-hand side argument of the previous statement given in the
+ text. Thus it happens to coincide with relevant sub-expressions in
+ the calculational chain, but the exact correspondence is dependent
+ on the transitivity rules being involved.
+
+ \medskip Symmetry rules such as \isa{{\isachardoublequote}x\ {\isacharequal}\ y\ {\isasymLongrightarrow}\ y\ {\isacharequal}\ x{\isachardoublequote}} are like
+ transitivities with only one premise. Isar maintains a separate
+ rule collection declared via the \hyperlink{attribute.sym}{\mbox{\isa{sym}}} attribute, to be
+ used in fact expressions ``\isa{{\isachardoublequote}a\ {\isacharbrackleft}symmetric{\isacharbrackright}{\isachardoublequote}}'', or single-step
+ proofs ``\hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}x\ {\isacharequal}\ y{\isachardoublequote}}~\hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}}~\hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}y\ {\isacharequal}\ x{\isachardoublequote}}~\hyperlink{command.ddot}{\mbox{\isa{\isacommand{{\isachardot}{\isachardot}}}}}''.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+%
+\isatagtheory
+\isacommand{end}\isamarkupfalse%
+%
+\endisatagtheory
+{\isafoldtheory}%
+%
+\isadelimtheory
+%
+\endisadelimtheory
+\end{isabellebody}%
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "root"
+%%% End:
--- a/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Inner_Syntax.tex Fri Feb 27 18:50:35 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Inner{\isacharunderscore}Syntax}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -120,19 +118,19 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{show\_types}\verb|show_types: bool ref| & default \verb|false| \\
- \indexml{show\_sorts}\verb|show_sorts: bool ref| & default \verb|false| \\
- \indexml{show\_consts}\verb|show_consts: bool ref| & default \verb|false| \\
- \indexml{long\_names}\verb|long_names: bool ref| & default \verb|false| \\
- \indexml{short\_names}\verb|short_names: bool ref| & default \verb|false| \\
- \indexml{unique\_names}\verb|unique_names: bool ref| & default \verb|true| \\
- \indexml{show\_brackets}\verb|show_brackets: bool ref| & default \verb|false| \\
- \indexml{eta\_contract}\verb|eta_contract: bool ref| & default \verb|true| \\
- \indexml{goals\_limit}\verb|goals_limit: int ref| & default \verb|10| \\
- \indexml{Proof.show\_main\_goal}\verb|Proof.show_main_goal: bool ref| & default \verb|false| \\
- \indexml{show\_hyps}\verb|show_hyps: bool ref| & default \verb|false| \\
- \indexml{show\_tags}\verb|show_tags: bool ref| & default \verb|false| \\
- \indexml{show\_question\_marks}\verb|show_question_marks: bool ref| & default \verb|true| \\
+ \indexdef{}{ML}{show\_types}\verb|show_types: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_sorts}\verb|show_sorts: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_consts}\verb|show_consts: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{long\_names}\verb|long_names: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{short\_names}\verb|short_names: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{unique\_names}\verb|unique_names: bool ref| & default \verb|true| \\
+ \indexdef{}{ML}{show\_brackets}\verb|show_brackets: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{eta\_contract}\verb|eta_contract: bool ref| & default \verb|true| \\
+ \indexdef{}{ML}{goals\_limit}\verb|goals_limit: int ref| & default \verb|10| \\
+ \indexdef{}{ML}{Proof.show\_main\_goal}\verb|Proof.show_main_goal: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_hyps}\verb|show_hyps: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_tags}\verb|show_tags: bool ref| & default \verb|false| \\
+ \indexdef{}{ML}{show\_question\_marks}\verb|show_question_marks: bool ref| & default \verb|true| \\
\end{mldecls}
These global ML variables control the detail of information that is
@@ -233,9 +231,9 @@
%
\begin{isamarkuptext}%
\begin{mldecls}
- \indexml{Pretty.setdepth}\verb|Pretty.setdepth: int -> unit| \\
- \indexml{Pretty.setmargin}\verb|Pretty.setmargin: int -> unit| \\
- \indexml{print\_depth}\verb|print_depth: int -> unit| \\
+ \indexdef{}{ML}{Pretty.setdepth}\verb|Pretty.setdepth: int -> unit| \\
+ \indexdef{}{ML}{Pretty.setmargin}\verb|Pretty.setmargin: int -> unit| \\
+ \indexdef{}{ML}{print\_depth}\verb|print_depth: int -> unit| \\
\end{mldecls}
These ML functions set limits for pretty printed text.
@@ -392,7 +390,7 @@
\end{matharray}
\begin{rail}
- ('notation' | 'no\_notation') target? mode? (nameref structmixfix + 'and')
+ ('notation' | 'no\_notation') target? mode? \\ (nameref structmixfix + 'and')
;
\end{rail}
@@ -551,13 +549,15 @@
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}tid\ \ {\isacharbar}\ \ tvar\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|_| \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}tid{\isachardoublequote}} \verb|::| \isa{{\isachardoublequote}sort\ \ {\isacharbar}\ \ tvar\ \ {\isachardoublequote}}\verb|::| \isa{{\isachardoublequote}sort\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|_| \verb|::| \isa{{\isachardoublequote}sort{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}id\ \ {\isacharbar}\ \ type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isacharparenright}\ id\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|(| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|)| \isa{id} \\
- & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}longid\ \ {\isacharbar}\ \ type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isacharparenright}\ longid\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|(| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|)| \isa{longid} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}longid\ \ {\isacharbar}\ \ type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isadigit{0}}\isactrlsup {\isacharparenright}\ longid{\isachardoublequote}} \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|(| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|)| \isa{longid} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isacharparenright}{\isachardoublequote}} \verb|=>| \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}type\isactrlsup {\isacharparenleft}\isactrlsup {\isadigit{1}}\isactrlsup {\isacharparenright}{\isachardoublequote}} \isa{{\isachardoublequote}{\isasymRightarrow}{\isachardoublequote}} \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|[| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|]| \verb|=>| \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|[| \isa{type} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{type} \verb|]| \isa{{\isachardoublequote}{\isasymRightarrow}{\isachardoublequote}} \isa{type} & \isa{{\isachardoublequote}{\isacharparenleft}{\isadigit{0}}{\isacharparenright}{\isachardoublequote}} \\\\
- \indexdef{inner}{syntax}{sort}\hypertarget{syntax.inner.sort}{\hyperlink{syntax.inner.sort}{\mbox{\isa{sort}}}} & = & \isa{{\isachardoublequote}id\ \ {\isacharbar}\ \ longid\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|{}|\isa{{\isachardoublequote}\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|{| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|}| \\
+ \indexdef{inner}{syntax}{sort}\hypertarget{syntax.inner.sort}{\hyperlink{syntax.inner.sort}{\mbox{\isa{sort}}}} & = & \isa{{\isachardoublequote}id\ \ {\isacharbar}\ \ longid\ \ {\isacharbar}\ \ {\isachardoublequote}}\verb|{}| \\
+ & \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \verb|{| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isasymdots}{\isachardoublequote}} \verb|,| \isa{{\isachardoublequote}{\isacharparenleft}id\ {\isacharbar}\ longid{\isacharparenright}{\isachardoublequote}} \verb|}| \\
\end{supertabular}
\end{center}
--- a/doc-src/IsarRef/Thy/document/Introduction.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Introduction.tex Fri Feb 27 18:50:35 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Introduction}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -32,27 +30,27 @@
The \emph{Isabelle} system essentially provides a generic
infrastructure for building deductive systems (programmed in
Standard ML), with a special focus on interactive theorem proving in
- higher-order logics. In the olden days even end-users would refer
- to certain ML functions (goal commands, tactics, tacticals etc.) to
- pursue their everyday theorem proving tasks
- \cite{isabelle-intro,isabelle-ref}.
+ higher-order logics. Many years ago, even end-users would refer to
+ certain ML functions (goal commands, tactics, tacticals etc.) to
+ pursue their everyday theorem proving tasks.
In contrast \emph{Isar} provides an interpreted language environment
of its own, which has been specifically tailored for the needs of
theory and proof development. Compared to raw ML, the Isabelle/Isar
top-level provides a more robust and comfortable development
- platform, with proper support for theory development graphs,
- single-step transactions with unlimited undo, etc. The
- Isabelle/Isar version of the \emph{Proof~General} user interface
- \cite{proofgeneral,Aspinall:TACAS:2000} provides an adequate
- front-end for interactive theory and proof development in this
- advanced theorem proving environment.
+ platform, with proper support for theory development graphs, managed
+ transactions with unlimited undo etc. The Isabelle/Isar version of
+ the \emph{Proof~General} user interface
+ \cite{proofgeneral,Aspinall:TACAS:2000} provides a decent front-end
+ for interactive theory and proof development in this advanced
+ theorem proving environment, even though it is somewhat biased
+ towards old-style proof scripts.
\medskip Apart from the technical advances over bare-bones ML
programming, the main purpose of the Isar language is to provide a
conceptually different view on machine-checked proofs
- \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. ``Isar'' stands for
- ``Intelligible semi-automated reasoning''. Drawing from both the
+ \cite{Wenzel:1999:TPHOL,Wenzel-PhD}. \emph{Isar} stands for
+ \emph{Intelligible semi-automated reasoning}. Drawing from both the
traditions of informal mathematical proof texts and high-level
programming languages, Isar offers a versatile environment for
structured formal proof documents. Thus properly written Isar
@@ -67,12 +65,12 @@
Despite its grand design of structured proof texts, Isar is able to
assimilate the old tactical style as an ``improper'' sub-language.
This provides an easy upgrade path for existing tactic scripts, as
- well as additional means for interactive experimentation and
- debugging of structured proofs. Isabelle/Isar supports a broad
- range of proof styles, both readable and unreadable ones.
+ well as some means for interactive experimentation and debugging of
+ structured proofs. Isabelle/Isar supports a broad range of proof
+ styles, both readable and unreadable ones.
- \medskip The Isabelle/Isar framework \cite{Wenzel:2006:Festschrift}
- is generic and should work reasonably well for any Isabelle
+ \medskip The generic Isabelle/Isar framework (see
+ \chref{ch:isar-framework}) works reasonably well for any Isabelle
object-logic that conforms to the natural deduction view of the
Isabelle/Pure framework. Specific language elements introduced by
the major object-logics are described in \chref{ch:hol}
@@ -92,207 +90,6 @@
\end{isamarkuptext}%
\isamarkuptrue%
%
-\isamarkupsection{User interfaces%
-}
-\isamarkuptrue%
-%
-\isamarkupsubsection{Terminal sessions%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The Isabelle \texttt{tty} tool provides a very interface for running
- the Isar interaction loop, with some support for command line
- editing. For example:
-\begin{ttbox}
-isabelle tty\medskip
-{\out Welcome to Isabelle/HOL (Isabelle2008)}\medskip
-theory Foo imports Main begin;
-definition foo :: nat where "foo == 1";
-lemma "0 < foo" by (simp add: foo_def);
-end;
-\end{ttbox}
-
- Any Isabelle/Isar command may be retracted by \hyperlink{command.undo}{\mbox{\isa{\isacommand{undo}}}}.
- See the Isabelle/Isar Quick Reference (\appref{ap:refcard}) for a
- comprehensive overview of available commands and other language
- elements.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsection{Emacs Proof General%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Plain TTY-based interaction as above used to be quite feasible with
- traditional tactic based theorem proving, but developing Isar
- documents really demands some better user-interface support. The
- Proof~General environment by David Aspinall
- \cite{proofgeneral,Aspinall:TACAS:2000} offers a generic Emacs
- interface for interactive theorem provers that organizes all the
- cut-and-paste and forward-backward walk through the text in a very
- neat way. In Isabelle/Isar, the current position within a partial
- proof document is equally important than the actual proof state.
- Thus Proof~General provides the canonical working environment for
- Isabelle/Isar, both for getting acquainted (e.g.\ by replaying
- existing Isar documents) and for production work.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsubsection{Proof~General as default Isabelle interface%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-The Isabelle interface wrapper script provides an easy way to invoke
- Proof~General (including XEmacs or GNU Emacs). The default
- configuration of Isabelle is smart enough to detect the
- Proof~General distribution in several canonical places (e.g.\
- \verb|$ISABELLE_HOME/contrib/ProofGeneral|). Thus the
- capital \verb|Isabelle| executable would already refer to the
- \verb|ProofGeneral/isar| interface without further ado. The
- Isabelle interface script provides several options; pass \verb|-?| to see its usage.
-
- With the proper Isabelle interface setup, Isar documents may now be edited by
- visiting appropriate theory files, e.g.\
-\begin{ttbox}
-Isabelle \({\langle}isabellehome{\rangle}\)/src/HOL/Isar_examples/Summation.thy
-\end{ttbox}
- Beginners may note the tool bar for navigating forward and backward
- through the text (this depends on the local Emacs installation).
- Consult the Proof~General documentation \cite{proofgeneral} for
- further basic command sequences, in particular ``\verb|C-c C-return|''
- and ``\verb|C-c u|''.
-
- \medskip Proof~General may be also configured manually by giving
- Isabelle settings like this (see also \cite{isabelle-sys}):
-
-\begin{ttbox}
-ISABELLE_INTERFACE=\$ISABELLE_HOME/contrib/ProofGeneral/isar/interface
-PROOFGENERAL_OPTIONS=""
-\end{ttbox}
- You may have to change \verb|$ISABELLE_HOME/contrib/ProofGeneral| to the actual installation
- directory of Proof~General.
-
- \medskip Apart from the Isabelle command line, defaults for
- interface options may be given by the \verb|PROOFGENERAL_OPTIONS|
- setting. For example, the Emacs executable to be used may be
- configured in Isabelle's settings like this:
-\begin{ttbox}
-PROOFGENERAL_OPTIONS="-p xemacs-mule"
-\end{ttbox}
-
- Occasionally, a user's \verb|~/.emacs| file contains code
- that is incompatible with the (X)Emacs version used by
- Proof~General, causing the interface startup to fail prematurely.
- Here the \verb|-u false| option helps to get the interface
- process up and running. Note that additional Lisp customization
- code may reside in \verb|proofgeneral-settings.el| of
- \verb|$ISABELLE_HOME/etc| or \verb|$ISABELLE_HOME_USER/etc|.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsubsubsection{The X-Symbol package%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Proof~General incorporates a version of the Emacs X-Symbol package
- \cite{x-symbol}, which handles proper mathematical symbols displayed
- on screen. Pass option \verb|-x true| to the Isabelle
- interface script, or check the appropriate Proof~General menu
- setting by hand. The main challenge of getting X-Symbol to work
- properly is the underlying (semi-automated) X11 font setup.
-
- \medskip Using proper mathematical symbols in Isabelle theories can
- be very convenient for readability of large formulas. On the other
- hand, the plain ASCII sources easily become somewhat unintelligible.
- For example, \isa{{\isachardoublequote}{\isasymLongrightarrow}{\isachardoublequote}} would appear as \verb|\<Longrightarrow>| according
- the default set of Isabelle symbols. Nevertheless, the Isabelle
- document preparation system (see \chref{ch:document-prep}) will be
- happy to print non-ASCII symbols properly. It is even possible to
- invent additional notation beyond the display capabilities of Emacs
- and X-Symbol.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{Isabelle/Isar theories%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-Isabelle/Isar offers the following main improvements over classic
- Isabelle.
-
- \begin{enumerate}
-
- \item A \emph{theory format} that integrates specifications and
- proofs, supporting interactive development and unlimited undo
- operation.
-
- \item A \emph{formal proof document language} designed to support
- intelligible semi-automated reasoning. Instead of putting together
- unreadable tactic scripts, the author is enabled to express the
- reasoning in way that is close to usual mathematical practice. The
- old tactical style has been assimilated as ``improper'' language
- elements.
-
- \item A simple document preparation system, for typesetting formal
- developments together with informal text. The resulting
- hyper-linked PDF documents are equally well suited for WWW
- presentation and as printed copies.
-
- \end{enumerate}
-
- The Isar proof language is embedded into the new theory format as a
- proper sub-language. Proof mode is entered by stating some
- \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}} or \hyperlink{command.lemma}{\mbox{\isa{\isacommand{lemma}}}} at the theory level, and
- left again with the final conclusion (e.g.\ via \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}).
- A few theory specification mechanisms also require some proof, such
- as HOL's \hyperlink{command.typedef}{\mbox{\isa{\isacommand{typedef}}}} which demands non-emptiness of the
- representing sets.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
-\isamarkupsection{How to write Isar proofs anyway? \label{sec:isar-howto}%
-}
-\isamarkuptrue%
-%
-\begin{isamarkuptext}%
-This is one of the key questions, of course. First of all, the
- tactic script emulation of Isabelle/Isar essentially provides a
- clarified version of the very same unstructured proof style of
- classic Isabelle. Old-time users should quickly become acquainted
- with that (slightly degenerative) view of Isar.
-
- Writing \emph{proper} Isar proof texts targeted at human readers is
- quite different, though. Experienced users of the unstructured
- style may even have to unlearn some of their habits to master proof
- composition in Isar. In contrast, new users with less experience in
- old-style tactical proving, but a good understanding of mathematical
- proof in general, often get started easier.
-
- \medskip The present text really is only a reference manual on
- Isabelle/Isar, not a tutorial. Nevertheless, we will attempt to
- give some clues of how the concepts introduced here may be put into
- practice. Especially note that \appref{ap:refcard} provides a quick
- reference card of the most common Isabelle/Isar language elements.
-
- Further issues concerning the Isar concepts are covered in the
- literature
- \cite{Wenzel:1999:TPHOL,Wiedijk:2000:MV,Bauer-Wenzel:2000:HB,Bauer-Wenzel:2001}.
- The author's PhD thesis \cite{Wenzel-PhD} presently provides the
- most complete exposition of Isar foundations, techniques, and
- applications. A number of example applications are distributed with
- Isabelle, and available via the Isabelle WWW library (e.g.\
- \url{http://isabelle.in.tum.de/library/}). The ``Archive of Formal
- Proofs'' \url{http://afp.sourceforge.net/} also provides plenty of
- examples, both in proper Isar proof style and unstructured tactic
- scripts.%
-\end{isamarkuptext}%
-\isamarkuptrue%
-%
\isadelimtheory
%
\endisadelimtheory
--- a/doc-src/IsarRef/Thy/document/Outer_Syntax.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Outer_Syntax.tex Fri Feb 27 18:50:35 2009 +0100
@@ -185,10 +185,10 @@
Isabelle as \verb|\<forall>|. There are infinitely many Isabelle
symbols like this, although proper presentation is left to front-end
tools such as {\LaTeX} or Proof~General with the X-Symbol package.
- A list of standard Isabelle symbols that work well with these tools
- is given in \appref{app:symbols}. Note that \verb|\<lambda>| does
- not belong to the \isa{letter} category, since it is already used
- differently in the Pure term language.%
+ A list of predefined Isabelle symbols that work well with these
+ tools is given in \appref{app:symbols}. Note that \verb|\<lambda>|
+ does not belong to the \isa{letter} category, since it is already
+ used differently in the Pure term language.%
\end{isamarkuptext}%
\isamarkuptrue%
%
--- a/doc-src/IsarRef/Thy/document/Proof.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Proof.tex Fri Feb 27 18:50:35 2009 +0100
@@ -3,8 +3,6 @@
\def\isabellecontext{Proof}%
%
\isadelimtheory
-\isanewline
-\isanewline
%
\endisadelimtheory
%
@@ -20,7 +18,7 @@
%
\endisadelimtheory
%
-\isamarkupchapter{Proofs%
+\isamarkupchapter{Proofs \label{ch:proofs}%
}
\isamarkuptrue%
%
@@ -28,8 +26,8 @@
Proof commands perform transitions of Isar/VM machine
configurations, which are block-structured, consisting of a stack of
nodes with three main components: logical proof context, current
- facts, and open goals. Isar/VM transitions are \emph{typed}
- according to the following three different modes of operation:
+ facts, and open goals. Isar/VM transitions are typed according to
+ the following three different modes of operation:
\begin{description}
@@ -49,13 +47,17 @@
\end{description}
- The proof mode indicator may be read as a verb telling the writer
- what kind of operation may be performed next. The corresponding
- typings of proof commands restricts the shape of well-formed proof
- texts to particular command sequences. So dynamic arrangements of
- commands eventually turn out as static texts of a certain structure.
- \Appref{ap:refcard} gives a simplified grammar of the overall
- (extensible) language emerging that way.%
+ The proof mode indicator may be understood as an instruction to the
+ writer, telling what kind of operation may be performed next. The
+ corresponding typings of proof commands restricts the shape of
+ well-formed proof texts to particular command sequences. So dynamic
+ arrangements of commands eventually turn out as static texts of a
+ certain structure.
+
+ \Appref{ap:refcard} gives a simplified grammar of the (extensible)
+ language emerging that way from the different types of proof
+ commands. The main ideas of the overall Isar framework are
+ explained in \chref{ch:isar-framework}.%
\end{isamarkuptext}%
\isamarkuptrue%
%
@@ -966,7 +968,7 @@
\begin{matharray}{l}
\isa{{\isachardoublequote}{\isasymlangle}using\ b\isactrlsub {\isadigit{1}}\ {\isasymdots}\ b\isactrlsub k{\isasymrangle}{\isachardoublequote}}~~\hyperlink{command.obtain}{\mbox{\isa{\isacommand{obtain}}}}~\isa{{\isachardoublequote}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m\ {\isasymWHERE}\ a{\isacharcolon}\ {\isasymphi}\isactrlsub {\isadigit{1}}\ {\isasymdots}\ {\isasymphi}\isactrlsub n\ \ {\isasymlangle}proof{\isasymrangle}\ {\isasymequiv}{\isachardoublequote}} \\[1ex]
\quad \hyperlink{command.have}{\mbox{\isa{\isacommand{have}}}}~\isa{{\isachardoublequote}{\isasymAnd}thesis{\isachardot}\ {\isacharparenleft}{\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m{\isachardot}\ {\isasymphi}\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymphi}\isactrlsub n\ {\isasymLongrightarrow}\ thesis{\isacharparenright}\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} \\
- \quad \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{succeed} \\
+ \quad \hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\hyperlink{method.succeed}{\mbox{\isa{succeed}}} \\
\qquad \hyperlink{command.fix}{\mbox{\isa{\isacommand{fix}}}}~\isa{thesis} \\
\qquad \hyperlink{command.assume}{\mbox{\isa{\isacommand{assume}}}}~\isa{{\isachardoublequote}that\ {\isacharbrackleft}Pure{\isachardot}intro{\isacharquery}{\isacharbrackright}{\isacharcolon}\ {\isasymAnd}x\isactrlsub {\isadigit{1}}\ {\isasymdots}\ x\isactrlsub m{\isachardot}\ {\isasymphi}\isactrlsub {\isadigit{1}}\ {\isasymLongrightarrow}\ {\isasymdots}\ {\isasymphi}\isactrlsub n\ {\isasymLongrightarrow}\ thesis{\isachardoublequote}} \\
\qquad \hyperlink{command.then}{\mbox{\isa{\isacommand{then}}}}~\hyperlink{command.show}{\mbox{\isa{\isacommand{show}}}}~\isa{thesis} \\
--- a/doc-src/IsarRef/Thy/document/Quick_Reference.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Quick_Reference.tex Fri Feb 27 18:50:35 2009 +0100
@@ -52,7 +52,7 @@
\begin{tabular}{rcl}
\isa{{\isachardoublequote}theory{\isasymdash}stmt{\isachardoublequote}} & = & \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}name{\isacharcolon}\ props\ proof\ \ {\isacharbar}{\isachardoublequote}}~~\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isasymdots}\ \ {\isacharbar}\ \ {\isasymdots}{\isachardoublequote}} \\[1ex]
- \isa{{\isachardoublequote}proof{\isachardoublequote}} & = & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}method\ stmt\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}~\isa{method} \\
+ \isa{{\isachardoublequote}proof{\isachardoublequote}} & = & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.proof}{\mbox{\isa{\isacommand{proof}}}}~\isa{{\isachardoublequote}method\isactrlsup {\isacharquery}\ stmt\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}~\isa{{\isachardoublequote}method\isactrlsup {\isacharquery}{\isachardoublequote}} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \isa{{\isachardoublequote}prfx\isactrlsup {\isacharasterisk}{\isachardoublequote}}~\hyperlink{command.done}{\mbox{\isa{\isacommand{done}}}} \\[1ex]
\isa{prfx} & = & \hyperlink{command.apply}{\mbox{\isa{\isacommand{apply}}}}~\isa{method} \\
& \isa{{\isachardoublequote}{\isacharbar}{\isachardoublequote}} & \hyperlink{command.using}{\mbox{\isa{\isacommand{using}}}}~\isa{{\isachardoublequote}facts{\isachardoublequote}} \\
--- a/doc-src/IsarRef/Thy/document/Spec.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Spec.tex Fri Feb 27 18:50:35 2009 +0100
@@ -22,6 +22,23 @@
}
\isamarkuptrue%
%
+\begin{isamarkuptext}%
+The Isabelle/Isar theory format integrates specifications and
+ proofs, supporting interactive development with unlimited undo
+ operation. There is an integrated document preparation system (see
+ \chref{ch:document-prep}), for typesetting formal developments
+ together with informal text. The resulting hyper-linked PDF
+ documents can be used both for WWW presentation and printed copies.
+
+ The Isar proof language (see \chref{ch:proofs}) is embedded into the
+ theory language as a proper sub-language. Proof mode is entered by
+ stating some \hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}} or \hyperlink{command.lemma}{\mbox{\isa{\isacommand{lemma}}}} at the theory
+ level, and left again with the final conclusion (e.g.\ via \hyperlink{command.qed}{\mbox{\isa{\isacommand{qed}}}}). Some theory specification mechanisms also require a proof,
+ such as \hyperlink{command.typedef}{\mbox{\isa{\isacommand{typedef}}}} in HOL, which demands non-emptiness of
+ the representing sets.%
+\end{isamarkuptext}%
+\isamarkuptrue%
+%
\isamarkupsection{Defining theories \label{sec:begin-thy}%
}
\isamarkuptrue%
@@ -127,8 +144,9 @@
\hyperlink{command.global.end}{\mbox{\isa{\isacommand{end}}}} has a different meaning: it concludes the
theory itself (\secref{sec:begin-thy}).
- \item \isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}{\isachardoublequote}} given after any local theory command
- specifies an immediate target, e.g.\ ``\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}'' or ``\hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}''. This works both in a local or
+ \item \isa{{\isachardoublequote}{\isacharparenleft}{\isachardoublequote}}\indexdef{}{keyword}{in}\hypertarget{keyword.in}{\hyperlink{keyword.in}{\mbox{\isa{\isakeyword{in}}}}}~\isa{{\isachardoublequote}c{\isacharparenright}{\isachardoublequote}} given after any
+ local theory command specifies an immediate target, e.g.\
+ ``\hyperlink{command.definition}{\mbox{\isa{\isacommand{definition}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}'' or ``\hyperlink{command.theorem}{\mbox{\isa{\isacommand{theorem}}}}~\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ c{\isacharparenright}\ {\isasymdots}{\isachardoublequote}}''. This works both in a local or
global theory context; the current target context will be suspended
for this command only. Note that ``\isa{{\isachardoublequote}{\isacharparenleft}{\isasymIN}\ {\isacharminus}{\isacharparenright}{\isachardoublequote}}'' will
always produce a global result independently of the current target
@@ -792,8 +810,8 @@
\end{matharray}
\begin{mldecls}
- \indexml{bind\_thms}\verb|bind_thms: string * thm list -> unit| \\
- \indexml{bind\_thm}\verb|bind_thm: string * thm -> unit| \\
+ \indexdef{}{ML}{bind\_thms}\verb|bind_thms: string * thm list -> unit| \\
+ \indexdef{}{ML}{bind\_thm}\verb|bind_thm: string * thm -> unit| \\
\end{mldecls}
\begin{rail}
@@ -1178,7 +1196,7 @@
\end{description}
- See \hyperlink{file.~~/src/FOL/ex/IffOracle.thy}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}FOL{\isacharslash}ex{\isacharslash}IffOracle{\isachardot}thy}}}} for a worked example of
+ See \hyperlink{file.~~/src/FOL/ex/Iff-Oracle.thy}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}src{\isacharslash}FOL{\isacharslash}ex{\isacharslash}Iff{\isacharunderscore}Oracle{\isachardot}thy}}}} for a worked example of
defining a new primitive rule as oracle, and turning it into a proof
method.%
\end{isamarkuptext}%
--- a/doc-src/IsarRef/Thy/document/Symbols.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/Thy/document/Symbols.tex Fri Feb 27 18:50:35 2009 +0100
@@ -20,7 +20,7 @@
%
\endisadelimtheory
%
-\isamarkupchapter{Standard Isabelle symbols \label{app:symbols}%
+\isamarkupchapter{Predefined Isabelle symbols \label{app:symbols}%
}
\isamarkuptrue%
%
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Thy/document/isar-vm.eps Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,2694 @@
+%!PS-Adobe-3.0 EPSF-3.0
+%%Creator: inkscape 0.46
+%%Pages: 1
+%%Orientation: Portrait
+%%BoundingBox: 0 0 435 173
+%%HiResBoundingBox: 0 0 435 173
+%%EndComments
+%%BeginSetup
+%%EndSetup
+%%Page: 1 1
+0 173 translate
+0.8 -0.8 scale
+0 0 0 setrgbcolor
+[] 0 setdash
+1 setlinewidth
+0 setlinejoin
+0 setlinecap
+gsave [1 0 0 1 0 0] concat
+gsave [1 0 0 1 -44.641342 -76.87234] concat
+gsave [1 0 0 1 70.838012 79.725562] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+0.99921262 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+229.77649 131.52507 moveto
+265.28729 131.52507 lineto
+275.08072 131.52507 282.96496 139.40931 282.96496 149.20274 curveto
+282.96496 166.99701 lineto
+282.96496 176.79043 275.08072 184.67467 265.28729 184.67467 curveto
+229.77649 184.67467 lineto
+219.98306 184.67467 212.09882 176.79043 212.09882 166.99701 curveto
+212.09882 149.20274 lineto
+212.09882 139.40931 219.98306 131.52507 229.77649 131.52507 curveto
+closepath
+stroke
+gsave
+0 0 0 setrgbcolor
+newpath
+231.92252 155.58815 moveto
+231.92252 157.8694 lineto
+231.5423 157.60899 231.15949 157.41628 230.77408 157.29128 curveto
+230.39386 157.16628 229.99803 157.10378 229.58658 157.10378 curveto
+228.80532 157.10378 228.19595 157.33295 227.75845 157.79128 curveto
+227.32616 158.24441 227.11001 158.87982 227.11002 159.69753 curveto
+227.11001 160.51524 227.32616 161.15326 227.75845 161.61159 curveto
+228.19595 162.06471 228.80532 162.29128 229.58658 162.29128 curveto
+230.02407 162.29128 230.43813 162.22617 230.82877 162.09596 curveto
+231.22459 161.96576 231.58917 161.77305 231.92252 161.51784 curveto
+231.92252 163.8069 lineto
+231.48501 163.96836 231.0397 164.08815 230.58658 164.16628 curveto
+230.13866 164.24961 229.68813 164.29127 229.23502 164.29128 curveto
+227.65689 164.29127 226.42251 163.88763 225.53189 163.08034 curveto
+224.64126 162.26784 224.19595 161.14024 224.19595 159.69753 curveto
+224.19595 158.25482 224.64126 157.12982 225.53189 156.32253 curveto
+226.42251 155.51003 227.65689 155.10378 229.23502 155.10378 curveto
+229.69334 155.10378 230.14386 155.14545 230.58658 155.22878 curveto
+231.03449 155.30691 231.4798 155.4267 231.92252 155.58815 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+243.14908 158.73659 moveto
+243.14908 164.06471 lineto
+240.33658 164.06471 lineto
+240.33658 163.19753 lineto
+240.33658 160.00221 lineto
+240.33657 159.23659 240.31834 158.71055 240.28189 158.42409 curveto
+240.25063 158.13764 240.19334 157.9267 240.11002 157.79128 curveto
+240.00063 157.60899 239.8522 157.46836 239.6647 157.3694 curveto
+239.4772 157.26524 239.26366 157.21316 239.02408 157.21315 curveto
+238.44074 157.21316 237.98241 157.43972 237.64908 157.89284 curveto
+237.31574 158.34076 237.14907 158.96316 237.14908 159.76003 curveto
+237.14908 164.06471 lineto
+234.3522 164.06471 lineto
+234.3522 151.90846 lineto
+237.14908 151.90846 lineto
+237.14908 156.59596 lineto
+237.57095 156.08555 238.01887 155.71055 238.49283 155.47096 curveto
+238.96678 155.22618 239.49022 155.10378 240.06314 155.10378 curveto
+241.07355 155.10378 241.83917 155.41368 242.36002 156.03346 curveto
+242.88605 156.65326 243.14907 157.5543 243.14908 158.73659 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+249.68033 160.12721 moveto
+249.09699 160.12722 248.65689 160.22617 248.36002 160.42409 curveto
+248.06835 160.62201 247.92251 160.91367 247.92252 161.29909 curveto
+247.92251 161.65326 248.0397 161.9319 248.27408 162.13503 curveto
+248.51366 162.33294 248.84439 162.4319 249.26627 162.4319 curveto
+249.7923 162.4319 250.23501 162.2444 250.59439 161.8694 curveto
+250.95376 161.48919 251.13345 161.01524 251.13345 160.44753 curveto
+251.13345 160.12721 lineto
+249.68033 160.12721 lineto
+253.95377 159.07253 moveto
+253.95377 164.06471 lineto
+251.13345 164.06471 lineto
+251.13345 162.76784 lineto
+250.75845 163.29909 250.33657 163.68711 249.86783 163.9319 curveto
+249.39907 164.17148 248.82876 164.29127 248.15689 164.29128 curveto
+247.25064 164.29127 246.51366 164.02825 245.94595 163.50221 curveto
+245.38345 162.97096 245.1022 162.28346 245.1022 161.43971 curveto
+245.1022 160.41367 245.45376 159.66107 246.15689 159.1819 curveto
+246.86522 158.70274 247.9746 158.46316 249.48502 158.46315 curveto
+251.13345 158.46315 lineto
+251.13345 158.2444 lineto
+251.13345 157.8017 250.95897 157.47878 250.61002 157.27565 curveto
+250.26105 157.06732 249.71678 156.96316 248.9772 156.96315 curveto
+248.37824 156.96316 247.82095 157.02305 247.30533 157.14284 curveto
+246.7897 157.26264 246.31053 157.44232 245.86783 157.6819 curveto
+245.86783 155.54909 lineto
+246.46678 155.40326 247.06835 155.29389 247.67252 155.22096 curveto
+248.27668 155.14285 248.88084 155.10378 249.48502 155.10378 curveto
+251.06313 155.10378 252.20115 155.41628 252.89908 156.04128 curveto
+253.60219 156.66107 253.95376 157.67149 253.95377 159.07253 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+256.57095 155.31471 moveto
+259.36783 155.31471 lineto
+259.36783 164.06471 lineto
+256.57095 164.06471 lineto
+256.57095 155.31471 lineto
+256.57095 151.90846 moveto
+259.36783 151.90846 lineto
+259.36783 154.18971 lineto
+256.57095 154.18971 lineto
+256.57095 151.90846 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+270.86783 158.73659 moveto
+270.86783 164.06471 lineto
+268.05533 164.06471 lineto
+268.05533 163.19753 lineto
+268.05533 159.98659 lineto
+268.05532 159.23138 268.03709 158.71055 268.00064 158.42409 curveto
+267.96938 158.13764 267.91209 157.9267 267.82877 157.79128 curveto
+267.71938 157.60899 267.57095 157.46836 267.38345 157.3694 curveto
+267.19595 157.26524 266.98241 157.21316 266.74283 157.21315 curveto
+266.15949 157.21316 265.70116 157.43972 265.36783 157.89284 curveto
+265.03449 158.34076 264.86782 158.96316 264.86783 159.76003 curveto
+264.86783 164.06471 lineto
+262.07095 164.06471 lineto
+262.07095 155.31471 lineto
+264.86783 155.31471 lineto
+264.86783 156.59596 lineto
+265.2897 156.08555 265.73762 155.71055 266.21158 155.47096 curveto
+266.68553 155.22618 267.20897 155.10378 267.78189 155.10378 curveto
+268.7923 155.10378 269.55792 155.41368 270.07877 156.03346 curveto
+270.6048 156.65326 270.86782 157.5543 270.86783 158.73659 curveto
+fill
+grestore
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+0.99921262 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+424.72469 236.82544 moveto
+356.83209 236.82544 lineto
+356.83209 236.82544 lineto
+stroke
+gsave [-0.39968505 4.8945685e-17 -4.8945685e-17 -0.39968505 356.83209 236.82544] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+0.99921268 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+282.35183 236.82544 moveto
+215.11403 236.82544 lineto
+215.11403 236.82544 lineto
+stroke
+gsave [-0.39968507 4.8945688e-17 -4.8945688e-17 -0.39968507 215.11403 236.82544] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+0.99999994 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+424.69726 192.5341 moveto
+215.13005 192.5341 lineto
+stroke
+gsave [-0.39999998 4.8984251e-17 -4.8984251e-17 -0.39999998 215.13005 192.5341] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+211.98429 148.24276 moveto
+422.13162 148.24276 lineto
+stroke
+gsave [0.4 0 0 0.4 422.13162 148.24276] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+gsave [1 0 0 1 70.866146 78.725567] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+0.99921262 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+88.044201 42.942394 moveto
+123.555 42.942394 lineto
+133.34843 42.942394 141.23267 50.826635 141.23267 60.620064 curveto
+141.23267 166.99701 lineto
+141.23267 176.79044 133.34843 184.67468 123.555 184.67468 curveto
+88.044201 184.67468 lineto
+78.250772 184.67468 70.366531 176.79044 70.366531 166.99701 curveto
+70.366531 60.620064 lineto
+70.366531 50.826635 78.250772 42.942394 88.044201 42.942394 curveto
+closepath
+stroke
+gsave
+0 0 0 setrgbcolor
+newpath
+83.823044 115.35931 moveto
+83.823044 119.95306 lineto
+81.026169 119.95306 lineto
+81.026169 107.87494 lineto
+83.823044 107.87494 lineto
+83.823044 109.15619 lineto
+84.208456 108.64578 84.635539 108.27078 85.104294 108.03119 curveto
+85.573038 107.78641 86.1121 107.66401 86.721481 107.664 curveto
+87.799598 107.66401 88.685014 108.0937 89.377731 108.95306 curveto
+90.070429 109.80724 90.416783 110.9088 90.416794 112.25775 curveto
+90.416783 113.60671 90.070429 114.71088 89.377731 115.57025 curveto
+88.685014 116.42442 87.799598 116.8515 86.721481 116.8515 curveto
+86.1121 116.8515 85.573038 116.73171 85.104294 116.49213 curveto
+84.635539 116.24734 84.208456 115.86973 83.823044 115.35931 curveto
+85.682419 109.69525 moveto
+85.083455 109.69526 84.622518 109.91661 84.299606 110.35931 curveto
+83.981894 110.79682 83.82304 111.42963 83.823044 112.25775 curveto
+83.82304 113.08588 83.981894 113.7213 84.299606 114.164 curveto
+84.622518 114.6015 85.083455 114.82025 85.682419 114.82025 curveto
+86.281371 114.82025 86.737099 114.6015 87.049606 114.164 curveto
+87.367307 113.7265 87.526161 113.09109 87.526169 112.25775 curveto
+87.526161 111.42442 87.367307 110.78901 87.049606 110.3515 curveto
+86.737099 109.91401 86.281371 109.69526 85.682419 109.69525 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+98.994919 110.25775 moveto
+98.75012 110.14317 98.505328 110.05984 98.260544 110.00775 curveto
+98.020954 109.95047 97.778766 109.92182 97.533981 109.92181 curveto
+96.815226 109.92182 96.260539 110.15359 95.869919 110.61713 curveto
+95.484498 111.07547 95.29179 111.73432 95.291794 112.59369 curveto
+95.291794 116.62494 lineto
+92.494919 116.62494 lineto
+92.494919 107.87494 lineto
+95.291794 107.87494 lineto
+95.291794 109.31244 lineto
+95.651164 108.73953 96.062622 108.32286 96.526169 108.06244 curveto
+96.994913 107.79682 97.554808 107.66401 98.205856 107.664 curveto
+98.299599 107.66401 98.401162 107.66922 98.510544 107.67963 curveto
+98.619911 107.68484 98.778765 107.70047 98.987106 107.7265 curveto
+98.994919 110.25775 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+104.56523 109.664 moveto
+103.94543 109.66401 103.47148 109.88797 103.14336 110.33588 curveto
+102.82044 110.77859 102.65898 111.41922 102.65898 112.25775 curveto
+102.65898 113.0963 102.82044 113.73953 103.14336 114.18744 curveto
+103.47148 114.63015 103.94543 114.8515 104.56523 114.8515 curveto
+105.1746 114.8515 105.64075 114.63015 105.96367 114.18744 curveto
+106.28658 113.73953 106.44804 113.0963 106.44804 112.25775 curveto
+106.44804 111.41922 106.28658 110.77859 105.96367 110.33588 curveto
+105.64075 109.88797 105.1746 109.66401 104.56523 109.664 curveto
+104.56523 107.664 moveto
+106.07043 107.66401 107.24491 108.07026 108.08867 108.88275 curveto
+108.93762 109.69526 109.3621 110.82026 109.36211 112.25775 curveto
+109.3621 113.69525 108.93762 114.82025 108.08867 115.63275 curveto
+107.24491 116.44525 106.07043 116.8515 104.56523 116.8515 curveto
+103.05481 116.8515 101.87252 116.44525 101.01836 115.63275 curveto
+100.1694 114.82025 99.744918 113.69525 99.744919 112.25775 curveto
+99.744918 110.82026 100.1694 109.69526 101.01836 108.88275 curveto
+101.87252 108.07026 103.05481 107.66401 104.56523 107.664 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+110.29961 107.87494 moveto
+113.09648 107.87494 lineto
+115.27617 113.92181 lineto
+117.44804 107.87494 lineto
+120.25273 107.87494 lineto
+116.80742 116.62494 lineto
+113.73711 116.62494 lineto
+110.29961 107.87494 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+130.57304 112.2265 moveto
+130.57304 113.02338 lineto
+124.03398 113.02338 lineto
+124.10169 113.67963 124.33866 114.17182 124.74492 114.49994 curveto
+125.15116 114.82807 125.71887 114.99213 126.44804 114.99213 curveto
+127.03658 114.99213 127.63814 114.90619 128.25273 114.73431 curveto
+128.87251 114.55723 129.50793 114.29161 130.15898 113.93744 curveto
+130.15898 116.09369 lineto
+129.49751 116.34369 128.83606 116.53119 128.17461 116.65619 curveto
+127.51314 116.7864 126.85168 116.8515 126.19023 116.8515 curveto
+124.60689 116.8515 123.37512 116.45046 122.49492 115.64838 curveto
+121.61992 114.84109 121.18242 113.71088 121.18242 112.25775 curveto
+121.18242 110.83067 121.61211 109.70828 122.47148 108.89056 curveto
+123.33606 108.07286 124.52356 107.66401 126.03398 107.664 curveto
+127.40897 107.66401 128.50793 108.07807 129.33086 108.90619 curveto
+130.15897 109.73432 130.57303 110.84109 130.57304 112.2265 curveto
+127.69804 111.29681 moveto
+127.69804 110.76557 127.54179 110.33849 127.22929 110.01556 curveto
+126.922 109.68745 126.51835 109.52338 126.01836 109.52338 curveto
+125.47668 109.52338 125.03658 109.67703 124.69804 109.98431 curveto
+124.3595 110.2864 124.14856 110.7239 124.06523 111.29681 curveto
+127.69804 111.29681 lineto
+fill
+grestore
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+176.66575 92.035445 moveto
+176.66575 118.61025 lineto
+stroke
+gsave [2.4492127e-17 0.4 -0.4 2.4492127e-17 176.66575 118.61025] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+gsave [0.2378166 0 0 -0.2269133 90.621413 253.06251] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+4.3013706 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+208.65508 282.05865 moveto
+193.86388 310.15339 141.95677 326.09523 92.790977 317.64312 curveto
+43.625187 309.19101 15.726964 279.5298 30.518156 251.43506 curveto
+45.309349 223.34033 97.216466 207.39848 146.38226 215.85059 curveto
+177.29043 221.16403 199.42278 233.82562 208.68579 251.49353 curveto
+stroke
+gsave [0.79891445 1.5238182 -1.5238182 0.79891445 208.68579 251.49353] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+grestore
+gsave [1 0 0 1 70.866151 78.725565] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+0.99921262 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+371.50879 42.942394 moveto
+407.01959 42.942394 lineto
+416.81302 42.942394 424.69726 50.826635 424.69726 60.620064 curveto
+424.69726 166.99701 lineto
+424.69726 176.79044 416.81302 184.67468 407.01959 184.67468 curveto
+371.50879 184.67468 lineto
+361.71536 184.67468 353.83112 176.79044 353.83112 166.99701 curveto
+353.83112 60.620064 lineto
+353.83112 50.826635 361.71536 42.942394 371.50879 42.942394 curveto
+closepath
+stroke
+gsave
+0 0 0 setrgbcolor
+newpath
+374.16263 110.83588 moveto
+374.16263 112.96088 lineto
+373.56366 112.71088 372.98554 112.52338 372.42825 112.39838 curveto
+371.87096 112.27338 371.34491 112.21088 370.85013 112.21088 curveto
+370.31887 112.21088 369.92304 112.27859 369.66263 112.414 curveto
+369.40742 112.54422 369.27981 112.74734 369.27982 113.02338 curveto
+369.27981 113.24734 369.37617 113.41922 369.56888 113.539 curveto
+369.76679 113.6588 370.11835 113.74734 370.62357 113.80463 curveto
+371.11575 113.87494 lineto
+372.54804 114.05724 373.51158 114.35671 374.00638 114.77338 curveto
+374.50116 115.19005 374.74856 115.84369 374.74857 116.73431 curveto
+374.74856 117.66661 374.40481 118.36713 373.71732 118.83588 curveto
+373.02981 119.30463 372.00377 119.539 370.63919 119.539 curveto
+370.06106 119.539 369.4621 119.49213 368.84232 119.39838 curveto
+368.22773 119.30983 367.59492 119.17442 366.94388 118.99213 curveto
+366.94388 116.86713 lineto
+367.50117 117.13796 368.07148 117.34109 368.65482 117.4765 curveto
+369.24335 117.61192 369.83971 117.67963 370.44388 117.67963 curveto
+370.99075 117.67963 371.40221 117.60411 371.67825 117.45306 curveto
+371.95429 117.30202 372.09231 117.07807 372.09232 116.78119 curveto
+372.09231 116.53119 371.99596 116.3463 371.80325 116.2265 curveto
+371.61575 116.1015 371.23814 116.00515 370.67044 115.93744 curveto
+370.17825 115.87494 lineto
+368.93346 115.71869 368.06106 115.42963 367.56107 115.00775 curveto
+367.06106 114.58588 366.81106 113.94526 366.81107 113.08588 curveto
+366.81106 112.1588 367.12877 111.4713 367.76419 111.02338 curveto
+368.3996 110.57547 369.37356 110.35151 370.68607 110.3515 curveto
+371.20169 110.35151 371.74335 110.39057 372.31107 110.46869 curveto
+372.87877 110.54682 373.49595 110.66922 374.16263 110.83588 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+379.91263 108.07806 moveto
+379.91263 110.56244 lineto
+382.79544 110.56244 lineto
+382.79544 112.56244 lineto
+379.91263 112.56244 lineto
+379.91263 116.27338 lineto
+379.91262 116.67963 379.99335 116.95567 380.15482 117.1015 curveto
+380.31627 117.24213 380.63658 117.31244 381.11575 117.31244 curveto
+382.55325 117.31244 lineto
+382.55325 119.31244 lineto
+380.15482 119.31244 lineto
+379.05065 119.31244 378.26679 119.08327 377.80325 118.62494 curveto
+377.34492 118.1614 377.11575 117.37755 377.11575 116.27338 curveto
+377.11575 112.56244 lineto
+375.72513 112.56244 lineto
+375.72513 110.56244 lineto
+377.11575 110.56244 lineto
+377.11575 108.07806 lineto
+379.91263 108.07806 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+388.43607 115.37494 moveto
+387.85273 115.37494 387.41262 115.4739 387.11575 115.67181 curveto
+386.82408 115.86973 386.67825 116.1614 386.67825 116.54681 curveto
+386.67825 116.90098 386.79544 117.17963 387.02982 117.38275 curveto
+387.26939 117.58067 387.60012 117.67963 388.022 117.67963 curveto
+388.54804 117.67963 388.99075 117.49213 389.35013 117.11713 curveto
+389.7095 116.73692 389.88918 116.26296 389.88919 115.69525 curveto
+389.88919 115.37494 lineto
+388.43607 115.37494 lineto
+392.7095 114.32025 moveto
+392.7095 119.31244 lineto
+389.88919 119.31244 lineto
+389.88919 118.01556 lineto
+389.51418 118.54681 389.09231 118.93484 388.62357 119.17963 curveto
+388.15481 119.41921 387.5845 119.539 386.91263 119.539 curveto
+386.00638 119.539 385.2694 119.27598 384.70169 118.74994 curveto
+384.13919 118.21869 383.85794 117.53119 383.85794 116.68744 curveto
+383.85794 115.6614 384.2095 114.9088 384.91263 114.42963 curveto
+385.62096 113.95047 386.73033 113.71088 388.24075 113.71088 curveto
+389.88919 113.71088 lineto
+389.88919 113.49213 lineto
+389.88918 113.04942 389.7147 112.72651 389.36575 112.52338 curveto
+389.01679 112.31505 388.47252 112.21088 387.73294 112.21088 curveto
+387.13398 112.21088 386.57669 112.27078 386.06107 112.39056 curveto
+385.54544 112.51036 385.06627 112.69005 384.62357 112.92963 curveto
+384.62357 110.79681 lineto
+385.22252 110.65099 385.82408 110.54161 386.42825 110.46869 curveto
+387.03242 110.39057 387.63658 110.35151 388.24075 110.3515 curveto
+389.81887 110.35151 390.95689 110.66401 391.65482 111.289 curveto
+392.35793 111.9088 392.70949 112.91922 392.7095 114.32025 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+398.38138 108.07806 moveto
+398.38138 110.56244 lineto
+401.26419 110.56244 lineto
+401.26419 112.56244 lineto
+398.38138 112.56244 lineto
+398.38138 116.27338 lineto
+398.38137 116.67963 398.4621 116.95567 398.62357 117.1015 curveto
+398.78502 117.24213 399.10533 117.31244 399.5845 117.31244 curveto
+401.022 117.31244 lineto
+401.022 119.31244 lineto
+398.62357 119.31244 lineto
+397.5194 119.31244 396.73554 119.08327 396.272 118.62494 curveto
+395.81367 118.1614 395.5845 117.37755 395.5845 116.27338 curveto
+395.5845 112.56244 lineto
+394.19388 112.56244 lineto
+394.19388 110.56244 lineto
+395.5845 110.56244 lineto
+395.5845 108.07806 lineto
+398.38138 108.07806 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+411.71732 114.914 moveto
+411.71732 115.71088 lineto
+405.17825 115.71088 lineto
+405.24596 116.36713 405.48294 116.85932 405.88919 117.18744 curveto
+406.29544 117.51557 406.86314 117.67963 407.59232 117.67963 curveto
+408.18085 117.67963 408.78241 117.59369 409.397 117.42181 curveto
+410.01679 117.24473 410.6522 116.97911 411.30325 116.62494 curveto
+411.30325 118.78119 lineto
+410.64179 119.03119 409.98033 119.21869 409.31888 119.34369 curveto
+408.65741 119.4739 407.99596 119.539 407.3345 119.539 curveto
+405.75117 119.539 404.5194 119.13796 403.63919 118.33588 curveto
+402.76419 117.52859 402.32669 116.39838 402.32669 114.94525 curveto
+402.32669 113.51817 402.75638 112.39578 403.61575 111.57806 curveto
+404.48033 110.76036 405.66783 110.35151 407.17825 110.3515 curveto
+408.55325 110.35151 409.6522 110.76557 410.47513 111.59369 curveto
+411.30324 112.42182 411.71731 113.52859 411.71732 114.914 curveto
+408.84232 113.98431 moveto
+408.84231 113.45307 408.68606 113.02599 408.37357 112.70306 curveto
+408.06627 112.37495 407.66262 112.21088 407.16263 112.21088 curveto
+406.62096 112.21088 406.18085 112.36453 405.84232 112.67181 curveto
+405.50377 112.9739 405.29283 113.4114 405.2095 113.98431 curveto
+408.84232 113.98431 lineto
+fill
+grestore
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+460.13031 263.40024 moveto
+460.13031 289.97505 lineto
+stroke
+gsave [2.4492127e-17 0.4 -0.4 2.4492127e-17 460.13031 289.97505] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+gsave [-0.2378166 0 0 0.2269133 546.17466 132.00569] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+4.3013706 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+208.65508 282.05865 moveto
+193.86388 310.15339 141.95677 326.09523 92.790977 317.64312 curveto
+43.625187 309.19101 15.726964 279.5298 30.518156 251.43506 curveto
+45.309349 223.34033 97.216466 207.39848 146.38226 215.85059 curveto
+177.29043 221.16403 199.42278 233.82562 208.68579 251.49353 curveto
+stroke
+gsave [0.79891445 1.5238182 -1.5238182 0.79891445 208.68579 251.49353] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+grestore
+gsave [-0.2378166 0 0 0.2269133 546.17465 87.714359] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+4.3013706 setlinewidth
+2 setlinejoin
+1 setlinecap
+newpath
+208.65508 282.05865 moveto
+193.86388 310.15339 141.95677 326.09523 92.790977 317.64312 curveto
+43.625187 309.19101 15.726964 279.5298 30.518156 251.43506 curveto
+45.309349 223.34033 97.216466 207.39848 146.38226 215.85059 curveto
+177.29043 221.16403 199.42278 233.82562 208.68579 251.49353 curveto
+stroke
+gsave [0.79891445 1.5238182 -1.5238182 0.79891445 208.68579 251.49353] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+grestore
+gsave [-0.2378166 0 0 0.2269133 546.17465 176.29703] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+4.3013706 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+208.65508 282.05865 moveto
+193.86388 310.15339 141.95677 326.09523 92.790977 317.64312 curveto
+43.625187 309.19101 15.726964 279.5298 30.518156 251.43506 curveto
+45.309349 223.34033 97.216466 207.39848 146.38226 215.85059 curveto
+177.29043 221.16403 199.42278 233.82562 208.68579 251.49353 curveto
+stroke
+gsave [0.79891445 1.5238182 -1.5238182 0.79891445 208.68579 251.49353] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+grestore
+gsave [0 0.2378166 0.2269133 0 399.60191 71.056696] concat
+0 0 0 setrgbcolor
+[] 0 setdash
+4.3013706 setlinewidth
+1 setlinejoin
+1 setlinecap
+newpath
+208.65508 282.05865 moveto
+193.86388 310.15339 141.95677 326.09523 92.790977 317.64312 curveto
+43.625187 309.19101 15.726964 279.5298 30.518156 251.43506 curveto
+45.309349 223.34033 97.216466 207.39848 146.38226 215.85059 curveto
+177.29043 221.16403 199.42278 233.82562 208.68579 251.49353 curveto
+stroke
+gsave [0.79891445 1.5238182 -1.5238182 0.79891445 208.68579 251.49353] concat
+gsave
+0 0 0 setrgbcolor
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+eofill
+grestore
+0 0 0 setrgbcolor
+[] 0 setdash
+1.25 setlinewidth
+0 setlinejoin
+0 setlinecap
+newpath
+5.77 0 moveto
+-2.88 5 lineto
+-2.88 -5 lineto
+5.77 0 lineto
+closepath
+stroke
+grestore
+grestore
+gsave [1 0 0 1 17.216929 6.5104864] concat
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+187.35507 103.05839 moveto
+187.35507 104.61112 lineto
+189.20566 104.61112 lineto
+189.20566 105.30936 lineto
+187.35507 105.30936 lineto
+187.35507 108.27811 lineto
+187.35507 108.72408 187.41529 109.01054 187.53574 109.13749 curveto
+187.65943 109.26444 187.90846 109.32792 188.28281 109.32792 curveto
+189.20566 109.32792 lineto
+189.20566 110.07987 lineto
+188.28281 110.07987 lineto
+187.58944 110.07987 187.11093 109.95129 186.84726 109.69413 curveto
+186.58359 109.43371 186.45175 108.96171 186.45175 108.27811 curveto
+186.45175 105.30936 lineto
+185.79257 105.30936 lineto
+185.79257 104.61112 lineto
+186.45175 104.61112 lineto
+186.45175 103.05839 lineto
+187.35507 103.05839 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+194.93808 106.77909 moveto
+194.93808 110.07987 lineto
+194.03964 110.07987 lineto
+194.03964 106.80839 lineto
+194.03964 106.29081 193.93873 105.90344 193.73691 105.64628 curveto
+193.53508 105.38912 193.23235 105.26054 192.8287 105.26054 curveto
+192.34368 105.26054 191.96119 105.41516 191.68124 105.7244 curveto
+191.40129 106.03365 191.26132 106.4552 191.26132 106.98905 curveto
+191.26132 110.07987 lineto
+190.358 110.07987 lineto
+190.358 102.48222 lineto
+191.26132 102.48222 lineto
+191.26132 105.46073 lineto
+191.47616 105.13196 191.72844 104.88619 192.01816 104.72343 curveto
+192.31112 104.56067 192.64804 104.47929 193.0289 104.47929 curveto
+193.65715 104.47929 194.13241 104.6746 194.45468 105.06522 curveto
+194.77694 105.4526 194.93807 106.02389 194.93808 106.77909 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+201.41757 107.12089 moveto
+201.41757 107.56034 lineto
+197.28671 107.56034 lineto
+197.32577 108.17883 197.51132 108.65084 197.84335 108.97636 curveto
+198.17864 109.29862 198.64413 109.45976 199.23984 109.45975 curveto
+199.58489 109.45976 199.91854 109.41744 200.24081 109.3328 curveto
+200.56633 109.24817 200.8886 109.12121 201.20761 108.95194 curveto
+201.20761 109.80155 lineto
+200.88534 109.93827 200.55494 110.04244 200.2164 110.11405 curveto
+199.87785 110.18567 199.53443 110.22147 199.18613 110.22147 curveto
+198.31373 110.22147 197.622 109.96757 197.11093 109.45975 curveto
+196.60312 108.95194 196.34921 108.2651 196.34921 107.39921 curveto
+196.34921 106.50403 196.5901 105.79439 197.07187 105.2703 curveto
+197.55689 104.74296 198.20956 104.47929 199.02988 104.47929 curveto
+199.76555 104.47929 200.3466 104.71692 200.77304 105.19218 curveto
+201.20272 105.66419 201.41757 106.30709 201.41757 107.12089 curveto
+200.51913 106.85722 moveto
+200.51262 106.36568 200.37427 105.97343 200.1041 105.68046 curveto
+199.83716 105.38749 199.48235 105.24101 199.03964 105.241 curveto
+198.53834 105.24101 198.13632 105.38261 197.83359 105.66581 curveto
+197.53411 105.94902 197.36158 106.34778 197.31601 106.8621 curveto
+200.51913 106.85722 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+205.01132 105.241 moveto
+204.52955 105.24101 204.14869 105.42981 203.86874 105.80741 curveto
+203.58879 106.18176 203.44882 106.69609 203.44882 107.35038 curveto
+203.44882 108.00468 203.58717 108.52063 203.86386 108.89823 curveto
+204.14381 109.27258 204.52629 109.45976 205.01132 109.45975 curveto
+205.48983 109.45976 205.86907 109.27095 206.14902 108.89335 curveto
+206.42896 108.51575 206.56893 108.00142 206.56894 107.35038 curveto
+206.56893 106.7026 206.42896 106.1899 206.14902 105.81229 curveto
+205.86907 105.43144 205.48983 105.24101 205.01132 105.241 curveto
+205.01132 104.47929 moveto
+205.79257 104.47929 206.40617 104.7332 206.85214 105.241 curveto
+207.2981 105.74882 207.52108 106.45195 207.52109 107.35038 curveto
+207.52108 108.24556 207.2981 108.94869 206.85214 109.45975 curveto
+206.40617 109.96757 205.79257 110.22147 205.01132 110.22147 curveto
+204.22681 110.22147 203.61158 109.96757 203.16562 109.45975 curveto
+202.72291 108.94869 202.50156 108.24556 202.50156 107.35038 curveto
+202.50156 106.45195 202.72291 105.74882 203.16562 105.241 curveto
+203.61158 104.7332 204.22681 104.47929 205.01132 104.47929 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+212.17441 105.45097 moveto
+212.07349 105.39238 211.96282 105.35006 211.84238 105.32401 curveto
+211.72519 105.29472 211.59498 105.28007 211.45175 105.28007 curveto
+210.94394 105.28007 210.55331 105.44609 210.27988 105.77811 curveto
+210.00969 106.10689 209.8746 106.58053 209.8746 107.19901 curveto
+209.8746 110.07987 lineto
+208.97128 110.07987 lineto
+208.97128 104.61112 lineto
+209.8746 104.61112 lineto
+209.8746 105.46073 lineto
+210.0634 105.12871 210.30917 104.88294 210.61191 104.72343 curveto
+210.91464 104.56067 211.28248 104.47929 211.71542 104.47929 curveto
+211.77727 104.47929 211.84563 104.48417 211.9205 104.49393 curveto
+211.99537 104.50045 212.07838 104.51184 212.16953 104.52811 curveto
+212.17441 105.45097 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+217.58945 107.12089 moveto
+217.58945 107.56034 lineto
+213.45859 107.56034 lineto
+213.49765 108.17883 213.6832 108.65084 214.01523 108.97636 curveto
+214.35051 109.29862 214.81601 109.45976 215.41171 109.45975 curveto
+215.75676 109.45976 216.09042 109.41744 216.41269 109.3328 curveto
+216.73821 109.24817 217.06047 109.12121 217.37949 108.95194 curveto
+217.37949 109.80155 lineto
+217.05722 109.93827 216.72681 110.04244 216.38828 110.11405 curveto
+216.04973 110.18567 215.70631 110.22147 215.358 110.22147 curveto
+214.4856 110.22147 213.79387 109.96757 213.28281 109.45975 curveto
+212.77499 108.95194 212.52109 108.2651 212.52109 107.39921 curveto
+212.52109 106.50403 212.76197 105.79439 213.24374 105.2703 curveto
+213.72877 104.74296 214.38144 104.47929 215.20175 104.47929 curveto
+215.93742 104.47929 216.51848 104.71692 216.94492 105.19218 curveto
+217.3746 105.66419 217.58944 106.30709 217.58945 107.12089 curveto
+216.69101 106.85722 moveto
+216.68449 106.36568 216.54615 105.97343 216.27597 105.68046 curveto
+216.00904 105.38749 215.65422 105.24101 215.21152 105.241 curveto
+214.71021 105.24101 214.30819 105.38261 214.00546 105.66581 curveto
+213.70598 105.94902 213.53346 106.34778 213.48788 106.8621 curveto
+216.69101 106.85722 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+223.32187 105.66093 moveto
+223.54647 105.25729 223.81503 104.95943 224.12753 104.76737 curveto
+224.44003 104.57532 224.80786 104.47929 225.23105 104.47929 curveto
+225.8007 104.47929 226.24016 104.67949 226.54941 105.07987 curveto
+226.85864 105.47701 227.01327 106.04342 227.01328 106.77909 curveto
+227.01328 110.07987 lineto
+226.10995 110.07987 lineto
+226.10995 106.80839 lineto
+226.10995 106.2843 226.01717 105.89531 225.83163 105.6414 curveto
+225.64608 105.38749 225.36288 105.26054 224.98203 105.26054 curveto
+224.51652 105.26054 224.14869 105.41516 223.87851 105.7244 curveto
+223.60832 106.03365 223.47323 106.4552 223.47324 106.98905 curveto
+223.47324 110.07987 lineto
+222.56992 110.07987 lineto
+222.56992 106.80839 lineto
+222.56991 106.28105 222.47714 105.89205 222.2916 105.6414 curveto
+222.10604 105.38749 221.81959 105.26054 221.43222 105.26054 curveto
+220.97323 105.26054 220.60865 105.41679 220.33847 105.72929 curveto
+220.06829 106.03854 219.9332 106.45846 219.9332 106.98905 curveto
+219.9332 110.07987 lineto
+219.02988 110.07987 lineto
+219.02988 104.61112 lineto
+219.9332 104.61112 lineto
+219.9332 105.46073 lineto
+220.13827 105.12545 220.38404 104.87805 220.6705 104.71854 curveto
+220.95696 104.55904 221.29713 104.47929 221.69101 104.47929 curveto
+222.08814 104.47929 222.42505 104.5802 222.70175 104.78202 curveto
+222.98169 104.98385 223.1884 105.27682 223.32187 105.66093 curveto
+fill
+grestore
+gsave [1 0 0 1 17.216929 6.5104864] concat
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+470.46808 277.74594 moveto
+470.46808 278.40675 470.60317 278.92596 470.87335 279.30356 curveto
+471.14679 279.67791 471.52114 279.86508 471.9964 279.86508 curveto
+472.47166 279.86508 472.846 279.67791 473.11945 279.30356 curveto
+473.39288 278.92596 473.5296 278.40675 473.5296 277.74594 curveto
+473.5296 277.08514 473.39288 276.56756 473.11945 276.19321 curveto
+472.846 275.81561 472.47166 275.62681 471.9964 275.6268 curveto
+471.52114 275.62681 471.14679 275.81561 470.87335 276.19321 curveto
+470.60317 276.56756 470.46808 277.08514 470.46808 277.74594 curveto
+473.5296 279.65512 moveto
+473.3408 279.98064 473.10154 280.22315 472.81183 280.38266 curveto
+472.52537 280.53891 472.18032 280.61703 471.77667 280.61703 curveto
+471.11586 280.61703 470.57713 280.35336 470.16046 279.82602 curveto
+469.74705 279.29868 469.54034 278.60532 469.54034 277.74594 curveto
+469.54034 276.88657 469.74705 276.19321 470.16046 275.66586 curveto
+470.57713 275.13852 471.11586 274.87485 471.77667 274.87485 curveto
+472.18032 274.87485 472.52537 274.95461 472.81183 275.11411 curveto
+473.10154 275.27036 473.3408 275.51125 473.5296 275.83676 curveto
+473.5296 275.00668 lineto
+474.42804 275.00668 lineto
+474.42804 282.55551 lineto
+473.5296 282.55551 lineto
+473.5296 279.65512 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+480.95636 277.51645 moveto
+480.95636 277.9559 lineto
+476.8255 277.9559 lineto
+476.86456 278.57439 477.05011 279.0464 477.38214 279.37192 curveto
+477.71743 279.69418 478.18292 279.85532 478.77863 279.85532 curveto
+479.12367 279.85532 479.45733 279.813 479.7796 279.72836 curveto
+480.10512 279.64373 480.42738 279.51678 480.7464 279.3475 curveto
+480.7464 280.19711 lineto
+480.42413 280.33383 480.09372 280.438 479.75519 280.50961 curveto
+479.41664 280.58123 479.07322 280.61703 478.72491 280.61703 curveto
+477.85252 280.61703 477.16079 280.36313 476.64972 279.85532 curveto
+476.14191 279.3475 475.888 278.66066 475.888 277.79477 curveto
+475.888 276.89959 476.12889 276.18996 476.61066 275.66586 curveto
+477.09568 275.13852 477.74835 274.87485 478.56866 274.87485 curveto
+479.30434 274.87485 479.88539 275.11248 480.31183 275.58774 curveto
+480.74151 276.05975 480.95635 276.70265 480.95636 277.51645 curveto
+480.05792 277.25278 moveto
+480.05141 276.76124 479.91306 276.36899 479.64288 276.07602 curveto
+479.37595 275.78306 479.02113 275.63657 478.57843 275.63657 curveto
+478.07713 275.63657 477.67511 275.77817 477.37238 276.06137 curveto
+477.07289 276.34458 476.90037 276.74334 476.8548 277.25766 curveto
+480.05792 277.25278 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+486.0296 275.83676 moveto
+486.0296 272.87778 lineto
+486.92804 272.87778 lineto
+486.92804 280.47543 lineto
+486.0296 280.47543 lineto
+486.0296 279.65512 lineto
+485.8408 279.98064 485.60154 280.22315 485.31183 280.38266 curveto
+485.02537 280.53891 484.68032 280.61703 484.27667 280.61703 curveto
+483.61586 280.61703 483.07713 280.35336 482.66046 279.82602 curveto
+482.24705 279.29868 482.04034 278.60532 482.04034 277.74594 curveto
+482.04034 276.88657 482.24705 276.19321 482.66046 275.66586 curveto
+483.07713 275.13852 483.61586 274.87485 484.27667 274.87485 curveto
+484.68032 274.87485 485.02537 274.95461 485.31183 275.11411 curveto
+485.60154 275.27036 485.8408 275.51125 486.0296 275.83676 curveto
+482.96808 277.74594 moveto
+482.96808 278.40675 483.10317 278.92596 483.37335 279.30356 curveto
+483.64679 279.67791 484.02114 279.86508 484.4964 279.86508 curveto
+484.97166 279.86508 485.346 279.67791 485.61945 279.30356 curveto
+485.89288 278.92596 486.0296 278.40675 486.0296 277.74594 curveto
+486.0296 277.08514 485.89288 276.56756 485.61945 276.19321 curveto
+485.346 275.81561 484.97166 275.62681 484.4964 275.6268 curveto
+484.02114 275.62681 483.64679 275.81561 483.37335 276.19321 curveto
+483.10317 276.56756 482.96808 277.08514 482.96808 277.74594 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+550.54895 236.85474 moveto
+550.54895 237.51555 550.68404 238.03475 550.95422 238.41235 curveto
+551.22766 238.7867 551.60201 238.97388 552.07727 238.97388 curveto
+552.55253 238.97388 552.92688 238.7867 553.20032 238.41235 curveto
+553.47375 238.03475 553.61047 237.51555 553.61047 236.85474 curveto
+553.61047 236.19393 553.47375 235.67635 553.20032 235.302 curveto
+552.92688 234.9244 552.55253 234.7356 552.07727 234.7356 curveto
+551.60201 234.7356 551.22766 234.9244 550.95422 235.302 curveto
+550.68404 235.67635 550.54895 236.19393 550.54895 236.85474 curveto
+553.61047 238.76392 moveto
+553.42167 239.08944 553.18241 239.33195 552.8927 239.49146 curveto
+552.60624 239.64771 552.26119 239.72583 551.85754 239.72583 curveto
+551.19673 239.72583 550.658 239.46216 550.24133 238.93481 curveto
+549.82792 238.40747 549.62122 237.71411 549.62122 236.85474 curveto
+549.62122 235.99536 549.82792 235.30201 550.24133 234.77466 curveto
+550.658 234.24732 551.19673 233.98365 551.85754 233.98364 curveto
+552.26119 233.98365 552.60624 234.0634 552.8927 234.2229 curveto
+553.18241 234.37916 553.42167 234.62004 553.61047 234.94556 curveto
+553.61047 234.11548 lineto
+554.50891 234.11548 lineto
+554.50891 241.66431 lineto
+553.61047 241.66431 lineto
+553.61047 238.76392 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+561.03723 236.62524 moveto
+561.03723 237.0647 lineto
+556.90637 237.0647 lineto
+556.94543 237.68319 557.13098 238.15519 557.46301 238.48071 curveto
+557.7983 238.80298 558.26379 238.96411 558.8595 238.96411 curveto
+559.20455 238.96411 559.5382 238.92179 559.86047 238.83716 curveto
+560.18599 238.75252 560.50825 238.62557 560.82727 238.4563 curveto
+560.82727 239.30591 lineto
+560.505 239.44263 560.1746 239.54679 559.83606 239.61841 curveto
+559.49751 239.69002 559.15409 239.72583 558.80579 239.72583 curveto
+557.93339 239.72583 557.24166 239.47192 556.73059 238.96411 curveto
+556.22278 238.4563 555.96887 237.76945 555.96887 236.90356 curveto
+555.96887 236.00839 556.20976 235.29875 556.69153 234.77466 curveto
+557.17655 234.24732 557.82922 233.98365 558.64954 233.98364 curveto
+559.38521 233.98365 559.96626 234.22128 560.3927 234.69653 curveto
+560.82238 235.16854 561.03723 235.81145 561.03723 236.62524 curveto
+560.13879 236.36157 moveto
+560.13228 235.87004 559.99393 235.47779 559.72375 235.18481 curveto
+559.45682 234.89185 559.10201 234.74537 558.6593 234.74536 curveto
+558.158 234.74537 557.75598 234.88697 557.45325 235.17017 curveto
+557.15377 235.45337 556.98124 235.85214 556.93567 236.36646 curveto
+560.13879 236.36157 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+566.11047 234.94556 moveto
+566.11047 231.98657 lineto
+567.00891 231.98657 lineto
+567.00891 239.58423 lineto
+566.11047 239.58423 lineto
+566.11047 238.76392 lineto
+565.92167 239.08944 565.68241 239.33195 565.3927 239.49146 curveto
+565.10624 239.64771 564.76119 239.72583 564.35754 239.72583 curveto
+563.69673 239.72583 563.158 239.46216 562.74133 238.93481 curveto
+562.32792 238.40747 562.12122 237.71411 562.12122 236.85474 curveto
+562.12122 235.99536 562.32792 235.30201 562.74133 234.77466 curveto
+563.158 234.24732 563.69673 233.98365 564.35754 233.98364 curveto
+564.76119 233.98365 565.10624 234.0634 565.3927 234.2229 curveto
+565.68241 234.37916 565.92167 234.62004 566.11047 234.94556 curveto
+563.04895 236.85474 moveto
+563.04895 237.51555 563.18404 238.03475 563.45422 238.41235 curveto
+563.72766 238.7867 564.10201 238.97388 564.57727 238.97388 curveto
+565.05253 238.97388 565.42688 238.7867 565.70032 238.41235 curveto
+565.97375 238.03475 566.11047 237.51555 566.11047 236.85474 curveto
+566.11047 236.19393 565.97375 235.67635 565.70032 235.302 curveto
+565.42688 234.9244 565.05253 234.7356 564.57727 234.7356 curveto
+564.10201 234.7356 563.72766 234.9244 563.45422 235.302 curveto
+563.18404 235.67635 563.04895 236.19393 563.04895 236.85474 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+553.10266 183.66447 moveto
+553.10266 184.41154 lineto
+552.24329 184.41154 lineto
+551.92102 184.41155 551.69641 184.47666 551.56946 184.60686 curveto
+551.44576 184.73707 551.38391 184.97145 551.38391 185.30998 curveto
+551.38391 185.79338 lineto
+552.8634 185.79338 lineto
+552.8634 186.49162 lineto
+551.38391 186.49162 lineto
+551.38391 191.26213 lineto
+550.48059 191.26213 lineto
+550.48059 186.49162 lineto
+549.62122 186.49162 lineto
+549.62122 185.79338 lineto
+550.48059 185.79338 lineto
+550.48059 185.41252 lineto
+550.48059 184.8038 550.62219 184.3611 550.9054 184.0844 curveto
+551.1886 183.80446 551.63782 183.66448 552.25305 183.66447 curveto
+553.10266 183.66447 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+553.84973 185.79338 moveto
+554.74817 185.79338 lineto
+554.74817 191.26213 lineto
+553.84973 191.26213 lineto
+553.84973 185.79338 lineto
+553.84973 183.66447 moveto
+554.74817 183.66447 lineto
+554.74817 184.80217 lineto
+553.84973 184.80217 lineto
+553.84973 183.66447 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+561.16907 185.79338 moveto
+559.19153 188.45451 lineto
+561.27161 191.26213 lineto
+560.21204 191.26213 lineto
+558.62024 189.11369 lineto
+557.02844 191.26213 lineto
+555.96887 191.26213 lineto
+558.0929 188.4008 lineto
+556.14954 185.79338 lineto
+557.20911 185.79338 lineto
+558.6593 187.74162 lineto
+560.1095 185.79338 lineto
+561.16907 185.79338 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+552.81946 198.51311 moveto
+552.09354 198.51311 551.59061 198.59612 551.31067 198.76213 curveto
+551.03072 198.92815 550.89075 199.21135 550.89075 199.61174 curveto
+550.89075 199.93075 550.99491 200.18466 551.20325 200.37346 curveto
+551.41483 200.55901 551.70129 200.65178 552.06262 200.65178 curveto
+552.56067 200.65178 552.95943 200.476 553.25891 200.12444 curveto
+553.56164 199.76962 553.71301 199.29924 553.71301 198.7133 curveto
+553.71301 198.51311 lineto
+552.81946 198.51311 lineto
+554.61145 198.14201 moveto
+554.61145 201.26213 lineto
+553.71301 201.26213 lineto
+553.71301 200.43205 lineto
+553.50793 200.76408 553.2524 201.00985 552.94641 201.16936 curveto
+552.64042 201.32561 552.26607 201.40373 551.82336 201.40373 curveto
+551.26347 201.40373 550.8175 201.24748 550.48547 200.93498 curveto
+550.1567 200.61923 549.99231 200.19768 549.99231 199.67033 curveto
+549.99231 199.0551 550.19739 198.59123 550.60754 198.27873 curveto
+551.02095 197.96624 551.63619 197.80999 552.45325 197.80998 curveto
+553.71301 197.80998 lineto
+553.71301 197.72209 lineto
+553.71301 197.30868 553.57629 196.98967 553.30286 196.76506 curveto
+553.03267 196.5372 552.65181 196.42327 552.16028 196.42326 curveto
+551.84778 196.42327 551.54341 196.4607 551.24719 196.53557 curveto
+550.95097 196.61044 550.66614 196.72275 550.3927 196.87248 curveto
+550.3927 196.0424 lineto
+550.72147 195.91546 551.04049 195.82106 551.34973 195.7592 curveto
+551.65897 195.6941 551.96008 195.66155 552.25305 195.66154 curveto
+553.04406 195.66155 553.63488 195.86663 554.02551 196.27678 curveto
+554.41613 196.68694 554.61144 197.30868 554.61145 198.14201 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+559.95325 195.95451 moveto
+559.95325 196.80412 lineto
+559.69934 196.67392 559.43567 196.57626 559.16223 196.51115 curveto
+558.88879 196.44605 558.60559 196.4135 558.31262 196.4135 curveto
+557.86666 196.4135 557.53137 196.48186 557.30676 196.61858 curveto
+557.08541 196.7553 556.97473 196.96038 556.97473 197.23381 curveto
+556.97473 197.44215 557.05448 197.60654 557.21399 197.72697 curveto
+557.37349 197.84417 557.69413 197.95647 558.1759 198.06389 curveto
+558.48352 198.13225 lineto
+559.12154 198.26897 559.57401 198.46265 559.84094 198.7133 curveto
+560.11112 198.9607 560.24621 199.30738 560.24622 199.75334 curveto
+560.24621 200.26116 560.04439 200.66317 559.64075 200.9594 curveto
+559.24035 201.25562 558.6886 201.40373 557.98547 201.40373 curveto
+557.6925 201.40373 557.38651 201.37444 557.0675 201.31584 curveto
+556.75175 201.2605 556.41809 201.17587 556.06653 201.06194 curveto
+556.06653 200.1342 lineto
+556.39856 200.30673 556.72571 200.43694 557.04797 200.52483 curveto
+557.37024 200.60946 557.68925 200.65178 558.005 200.65178 curveto
+558.42818 200.65178 558.7537 200.58017 558.98157 200.43694 curveto
+559.20943 200.29045 559.32336 200.08537 559.32336 199.8217 curveto
+559.32336 199.57756 559.24035 199.39039 559.07434 199.26018 curveto
+558.91158 199.12997 558.55188 199.00465 557.99524 198.8842 curveto
+557.68274 198.81096 lineto
+557.1261 198.69377 556.72408 198.51474 556.47668 198.27385 curveto
+556.22929 198.02971 556.10559 197.69605 556.10559 197.27287 curveto
+556.10559 196.75855 556.28788 196.36142 556.65247 196.08147 curveto
+557.01705 195.80152 557.53463 195.66155 558.2052 195.66154 curveto
+558.53723 195.66155 558.84973 195.68596 559.1427 195.73479 curveto
+559.43567 195.78362 559.70585 195.85686 559.95325 195.95451 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+565.16809 195.95451 moveto
+565.16809 196.80412 lineto
+564.91418 196.67392 564.65051 196.57626 564.37708 196.51115 curveto
+564.10363 196.44605 563.82043 196.4135 563.52747 196.4135 curveto
+563.0815 196.4135 562.74621 196.48186 562.52161 196.61858 curveto
+562.30025 196.7553 562.18957 196.96038 562.18958 197.23381 curveto
+562.18957 197.44215 562.26933 197.60654 562.42883 197.72697 curveto
+562.58834 197.84417 562.90897 197.95647 563.39075 198.06389 curveto
+563.69836 198.13225 lineto
+564.33638 198.26897 564.78886 198.46265 565.05579 198.7133 curveto
+565.32596 198.9607 565.46105 199.30738 565.46106 199.75334 curveto
+565.46105 200.26116 565.25923 200.66317 564.85559 200.9594 curveto
+564.4552 201.25562 563.90344 201.40373 563.20032 201.40373 curveto
+562.90735 201.40373 562.60136 201.37444 562.28235 201.31584 curveto
+561.96659 201.2605 561.63293 201.17587 561.28137 201.06194 curveto
+561.28137 200.1342 lineto
+561.6134 200.30673 561.94055 200.43694 562.26282 200.52483 curveto
+562.58508 200.60946 562.90409 200.65178 563.21985 200.65178 curveto
+563.64302 200.65178 563.96854 200.58017 564.19641 200.43694 curveto
+564.42427 200.29045 564.5382 200.08537 564.53821 199.8217 curveto
+564.5382 199.57756 564.4552 199.39039 564.28918 199.26018 curveto
+564.12642 199.12997 563.76672 199.00465 563.21008 198.8842 curveto
+562.89758 198.81096 lineto
+562.34094 198.69377 561.93892 198.51474 561.69153 198.27385 curveto
+561.44413 198.02971 561.32043 197.69605 561.32043 197.27287 curveto
+561.32043 196.75855 561.50273 196.36142 561.86731 196.08147 curveto
+562.23189 195.80152 562.74947 195.66155 563.42004 195.66154 curveto
+563.75207 195.66155 564.06457 195.68596 564.35754 195.73479 curveto
+564.65051 195.78362 564.92069 195.85686 565.16809 195.95451 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+566.80383 199.10393 moveto
+566.80383 195.79338 lineto
+567.70227 195.79338 lineto
+567.70227 199.06975 lineto
+567.70227 199.58733 567.80318 199.97632 568.005 200.23674 curveto
+568.20683 200.4939 568.50956 200.62248 568.91321 200.62248 curveto
+569.39823 200.62248 569.78072 200.46786 570.06067 200.15862 curveto
+570.34387 199.84937 570.48547 199.42782 570.48547 198.89397 curveto
+570.48547 195.79338 lineto
+571.38391 195.79338 lineto
+571.38391 201.26213 lineto
+570.48547 201.26213 lineto
+570.48547 200.42229 lineto
+570.26737 200.75432 570.01346 201.00171 569.72375 201.16447 curveto
+569.43729 201.32398 569.10363 201.40373 568.72278 201.40373 curveto
+568.09452 201.40373 567.61763 201.20842 567.29211 200.81779 curveto
+566.96659 200.42717 566.80383 199.85588 566.80383 199.10393 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+577.50208 196.84319 moveto
+577.72668 196.43954 577.99523 196.14169 578.30774 195.94963 curveto
+578.62023 195.75758 578.98807 195.66155 579.41125 195.66154 curveto
+579.98091 195.66155 580.42036 195.86175 580.72961 196.26213 curveto
+581.03885 196.65927 581.19347 197.22568 581.19348 197.96135 curveto
+581.19348 201.26213 lineto
+580.29016 201.26213 lineto
+580.29016 197.99065 lineto
+580.29015 197.46656 580.19738 197.07756 580.01184 196.82365 curveto
+579.82629 196.56975 579.54308 196.4428 579.16223 196.44279 curveto
+578.69673 196.4428 578.32889 196.59742 578.05872 196.90666 curveto
+577.78853 197.21591 577.65344 197.63746 577.65344 198.17131 curveto
+577.65344 201.26213 lineto
+576.75012 201.26213 lineto
+576.75012 197.99065 lineto
+576.75012 197.46331 576.65734 197.07431 576.4718 196.82365 curveto
+576.28625 196.56975 575.99979 196.4428 575.61243 196.44279 curveto
+575.15344 196.4428 574.78886 196.59905 574.51868 196.91154 curveto
+574.24849 197.22079 574.1134 197.64072 574.1134 198.17131 curveto
+574.1134 201.26213 lineto
+573.21008 201.26213 lineto
+573.21008 195.79338 lineto
+574.1134 195.79338 lineto
+574.1134 196.64299 lineto
+574.31848 196.30771 574.56425 196.06031 574.85071 195.9008 curveto
+575.13716 195.7413 575.47733 195.66155 575.87122 195.66154 curveto
+576.26835 195.66155 576.60526 195.76246 576.88196 195.96428 curveto
+577.1619 196.16611 577.36861 196.45908 577.50208 196.84319 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+587.66809 198.30315 moveto
+587.66809 198.7426 lineto
+583.53723 198.7426 lineto
+583.57629 199.36109 583.76184 199.8331 584.09387 200.15862 curveto
+584.42916 200.48088 584.89465 200.64201 585.49036 200.64201 curveto
+585.8354 200.64201 586.16906 200.5997 586.49133 200.51506 curveto
+586.81685 200.43043 587.13911 200.30347 587.45813 200.1342 curveto
+587.45813 200.98381 lineto
+587.13586 201.12053 586.80546 201.2247 586.46692 201.29631 curveto
+586.12837 201.36792 585.78495 201.40373 585.43665 201.40373 curveto
+584.56425 201.40373 583.87252 201.14983 583.36145 200.64201 curveto
+582.85364 200.1342 582.59973 199.44735 582.59973 198.58147 curveto
+582.59973 197.68629 582.84062 196.97665 583.32239 196.45256 curveto
+583.80741 195.92522 584.46008 195.66155 585.2804 195.66154 curveto
+586.01607 195.66155 586.59712 195.89918 587.02356 196.37444 curveto
+587.45324 196.84645 587.66809 197.48935 587.66809 198.30315 curveto
+586.76965 198.03947 moveto
+586.76314 197.54794 586.62479 197.15569 586.35461 196.86272 curveto
+586.08768 196.56975 585.73287 196.42327 585.29016 196.42326 curveto
+584.78886 196.42327 584.38684 196.56487 584.08411 196.84807 curveto
+583.78463 197.13128 583.6121 197.53004 583.56653 198.04436 curveto
+586.76965 198.03947 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+553.82532 147.89853 moveto
+553.82532 148.60165 lineto
+553.52258 148.60165 lineto
+552.71203 148.60165 552.16841 148.48121 551.89172 148.24033 curveto
+551.61828 147.99944 551.48156 147.5193 551.48157 146.7999 curveto
+551.48157 145.6329 lineto
+551.48156 145.14137 551.39367 144.8012 551.2179 144.6124 curveto
+551.04211 144.4236 550.7231 144.3292 550.26086 144.32919 curveto
+549.96301 144.32919 lineto
+549.96301 143.63095 lineto
+550.26086 143.63095 lineto
+550.72636 143.63095 551.04537 143.53818 551.2179 143.35263 curveto
+551.39367 143.16383 551.48156 142.82692 551.48157 142.34189 curveto
+551.48157 141.17001 lineto
+551.48156 140.45062 551.61828 139.9721 551.89172 139.73447 curveto
+552.16841 139.49359 552.71203 139.37315 553.52258 139.37314 curveto
+553.82532 139.37314 lineto
+553.82532 140.07138 lineto
+553.49329 140.07138 lineto
+553.0343 140.07139 552.73482 140.143 552.59485 140.28622 curveto
+552.45487 140.42946 552.38488 140.73057 552.38489 141.18954 curveto
+552.38489 142.40048 lineto
+552.38488 142.91155 552.31001 143.28265 552.16028 143.51376 curveto
+552.01379 143.74489 551.76151 143.90114 551.40344 143.98251 curveto
+551.76477 144.07041 552.01867 144.22991 552.16516 144.46103 curveto
+552.31164 144.69215 552.38488 145.06162 552.38489 145.56943 curveto
+552.38489 146.78036 lineto
+552.38488 147.23935 552.45487 147.54046 552.59485 147.68369 curveto
+552.73482 147.82691 553.0343 147.89853 553.49329 147.89853 curveto
+553.82532 147.89853 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+559.51379 147.89853 moveto
+559.85559 147.89853 lineto
+560.31132 147.89853 560.60754 147.82854 560.74426 147.68857 curveto
+560.88423 147.54859 560.95422 147.24586 560.95422 146.78036 curveto
+560.95422 145.56943 lineto
+560.95422 145.06162 561.02746 144.69215 561.17395 144.46103 curveto
+561.32043 144.22991 561.57434 144.07041 561.93567 143.98251 curveto
+561.57434 143.90114 561.32043 143.74489 561.17395 143.51376 curveto
+561.02746 143.28265 560.95422 142.91155 560.95422 142.40048 curveto
+560.95422 141.18954 lineto
+560.95422 140.72731 560.88423 140.4262 560.74426 140.28622 curveto
+560.60754 140.143 560.31132 140.07139 559.85559 140.07138 curveto
+559.51379 140.07138 lineto
+559.51379 139.37314 lineto
+559.82141 139.37314 lineto
+560.63196 139.37315 561.17232 139.49359 561.4425 139.73447 curveto
+561.71594 139.9721 561.85266 140.45062 561.85266 141.17001 curveto
+561.85266 142.34189 lineto
+561.85266 142.82692 561.94055 143.16383 562.11633 143.35263 curveto
+562.29211 143.53818 562.61112 143.63095 563.07336 143.63095 curveto
+563.3761 143.63095 lineto
+563.3761 144.32919 lineto
+563.07336 144.32919 lineto
+562.61112 144.3292 562.29211 144.4236 562.11633 144.6124 curveto
+561.94055 144.8012 561.85266 145.14137 561.85266 145.6329 curveto
+561.85266 146.7999 lineto
+561.85266 147.5193 561.71594 147.99944 561.4425 148.24033 curveto
+561.17232 148.48121 560.63196 148.60165 559.82141 148.60165 curveto
+559.51379 148.60165 lineto
+559.51379 147.89853 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+554.20129 153.67001 moveto
+554.20129 156.97079 lineto
+553.30286 156.97079 lineto
+553.30286 153.69931 lineto
+553.30285 153.18174 553.20194 152.79437 553.00012 152.5372 curveto
+552.7983 152.28004 552.49556 152.15146 552.09192 152.15146 curveto
+551.60689 152.15146 551.2244 152.30609 550.94446 152.61533 curveto
+550.66451 152.92457 550.52453 153.34612 550.52454 153.87997 curveto
+550.52454 156.97079 lineto
+549.62122 156.97079 lineto
+549.62122 151.50204 lineto
+550.52454 151.50204 lineto
+550.52454 152.35165 lineto
+550.73938 152.02288 550.99166 151.77711 551.28137 151.61435 curveto
+551.57434 151.45159 551.91125 151.37021 552.29211 151.37021 curveto
+552.92037 151.37021 553.39563 151.56553 553.7179 151.95615 curveto
+554.04016 152.34352 554.20129 152.91481 554.20129 153.67001 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+560.68079 154.01181 moveto
+560.68079 154.45126 lineto
+556.54993 154.45126 lineto
+556.58899 155.06975 556.77453 155.54176 557.10657 155.86728 curveto
+557.44185 156.18955 557.90735 156.35068 558.50305 156.35068 curveto
+558.8481 156.35068 559.18176 156.30836 559.50403 156.22372 curveto
+559.82954 156.13909 560.15181 156.01214 560.47083 155.84286 curveto
+560.47083 156.69247 lineto
+560.14855 156.82919 559.81815 156.93336 559.47961 157.00497 curveto
+559.14107 157.07659 558.79764 157.1124 558.44934 157.1124 curveto
+557.57694 157.1124 556.88521 156.85849 556.37415 156.35068 curveto
+555.86633 155.84287 555.61243 155.15602 555.61243 154.29013 curveto
+555.61243 153.39495 555.85331 152.68532 556.33508 152.16122 curveto
+556.82011 151.63389 557.47278 151.37021 558.29309 151.37021 curveto
+559.02876 151.37021 559.60982 151.60784 560.03625 152.0831 curveto
+560.46594 152.55511 560.68078 153.19801 560.68079 154.01181 curveto
+559.78235 153.74814 moveto
+559.77583 153.25661 559.63749 152.86435 559.36731 152.57138 curveto
+559.10038 152.27842 558.74556 152.13193 558.30286 152.13193 curveto
+557.80155 152.13193 557.39953 152.27353 557.0968 152.55673 curveto
+556.79732 152.83994 556.62479 153.2387 556.57922 153.75302 curveto
+559.78235 153.74814 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+566.52551 151.50204 moveto
+564.54797 154.16318 lineto
+566.62805 156.97079 lineto
+565.56848 156.97079 lineto
+563.97668 154.82236 lineto
+562.38489 156.97079 lineto
+561.32532 156.97079 lineto
+563.44934 154.10947 lineto
+561.50598 151.50204 lineto
+562.56555 151.50204 lineto
+564.01575 153.45029 lineto
+565.46594 151.50204 lineto
+566.52551 151.50204 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+568.78625 149.94931 moveto
+568.78625 151.50204 lineto
+570.63684 151.50204 lineto
+570.63684 152.20029 lineto
+568.78625 152.20029 lineto
+568.78625 155.16904 lineto
+568.78625 155.615 568.84647 155.90146 568.96692 156.02841 curveto
+569.09061 156.15537 569.33964 156.21884 569.71399 156.21884 curveto
+570.63684 156.21884 lineto
+570.63684 156.97079 lineto
+569.71399 156.97079 lineto
+569.02063 156.97079 568.54211 156.84221 568.27844 156.58505 curveto
+568.01477 156.32464 567.88293 155.85263 567.88293 155.16904 curveto
+567.88293 152.20029 lineto
+567.22375 152.20029 lineto
+567.22375 151.50204 lineto
+567.88293 151.50204 lineto
+567.88293 149.94931 lineto
+568.78625 149.94931 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+483.33514 94.963516 moveto
+483.33514 98.264297 lineto
+482.43671 98.264297 lineto
+482.43671 94.992813 lineto
+482.4367 94.475239 482.33579 94.087869 482.13397 93.830704 curveto
+481.93215 93.573547 481.62941 93.444966 481.22577 93.444962 curveto
+480.74074 93.444966 480.35825 93.599589 480.07831 93.908829 curveto
+479.79836 94.218078 479.65838 94.639627 479.65839 95.173477 curveto
+479.65839 98.264297 lineto
+478.75507 98.264297 lineto
+478.75507 92.795547 lineto
+479.65839 92.795547 lineto
+479.65839 93.645157 lineto
+479.87323 93.316386 480.12551 93.070618 480.41522 92.907852 curveto
+480.70819 92.745097 481.0451 92.663717 481.42596 92.663712 curveto
+482.05422 92.663717 482.52948 92.859029 482.85175 93.249649 curveto
+483.17401 93.637023 483.33514 94.208312 483.33514 94.963516 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+487.25604 93.42543 moveto
+486.77427 93.425435 486.39341 93.614237 486.11346 93.991837 curveto
+485.83351 94.366189 485.69354 94.880512 485.69354 95.534805 curveto
+485.69354 96.189104 485.83189 96.705054 486.10858 97.082657 curveto
+486.38853 97.457007 486.77101 97.644181 487.25604 97.64418 curveto
+487.73455 97.644181 488.11379 97.455379 488.39374 97.077774 curveto
+488.67368 96.700171 488.81366 96.185849 488.81366 95.534805 curveto
+488.81366 94.887022 488.67368 94.374327 488.39374 93.996719 curveto
+488.11379 93.615865 487.73455 93.425435 487.25604 93.42543 curveto
+487.25604 92.663712 moveto
+488.03729 92.663717 488.65089 92.917623 489.09686 93.42543 curveto
+489.54282 93.933247 489.7658 94.636371 489.76581 95.534805 curveto
+489.7658 96.429989 489.54282 97.133114 489.09686 97.64418 curveto
+488.65089 98.151993 488.03729 98.405899 487.25604 98.405899 curveto
+486.47153 98.405899 485.8563 98.151993 485.41034 97.64418 curveto
+484.96763 97.133114 484.74628 96.429989 484.74628 95.534805 curveto
+484.74628 94.636371 484.96763 93.933247 485.41034 93.42543 curveto
+485.8563 92.917623 486.47153 92.663717 487.25604 92.663712 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+492.13885 91.242813 moveto
+492.13885 92.795547 lineto
+493.98944 92.795547 lineto
+493.98944 93.49379 lineto
+492.13885 93.49379 lineto
+492.13885 96.46254 lineto
+492.13885 96.908505 492.19907 97.194963 492.31952 97.321915 curveto
+492.44321 97.448869 492.69224 97.512345 493.06659 97.512344 curveto
+493.98944 97.512344 lineto
+493.98944 98.264297 lineto
+493.06659 98.264297 lineto
+492.37323 98.264297 491.89471 98.135717 491.63104 97.878555 curveto
+491.36737 97.618139 491.23553 97.146135 491.23553 96.46254 curveto
+491.23553 93.49379 lineto
+490.57635 93.49379 lineto
+490.57635 92.795547 lineto
+491.23553 92.795547 lineto
+491.23553 91.242813 lineto
+492.13885 91.242813 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+499.8537 95.305313 moveto
+499.8537 95.744766 lineto
+495.72284 95.744766 lineto
+495.7619 96.363258 495.94745 96.835262 496.27948 97.160782 curveto
+496.61476 97.483048 497.08026 97.644181 497.67596 97.64418 curveto
+498.02101 97.644181 498.35467 97.601863 498.67694 97.517227 curveto
+499.00246 97.432593 499.32472 97.30564 499.64374 97.136368 curveto
+499.64374 97.985977 lineto
+499.32147 98.122696 498.99106 98.226863 498.65253 98.298477 curveto
+498.31398 98.370092 497.97056 98.405899 497.62225 98.405899 curveto
+496.74986 98.405899 496.05812 98.151993 495.54706 97.64418 curveto
+495.03924 97.136369 494.78534 96.449521 494.78534 95.583633 curveto
+494.78534 94.688455 495.02622 93.97882 495.508 93.454727 curveto
+495.99302 92.927389 496.64569 92.663717 497.466 92.663712 curveto
+498.20168 92.663717 498.78273 92.901347 499.20917 93.376602 curveto
+499.63885 93.848612 499.85369 94.491515 499.8537 95.305313 curveto
+498.95526 95.041641 moveto
+498.94875 94.550108 498.8104 94.157856 498.54022 93.864883 curveto
+498.27329 93.571919 497.91847 93.425435 497.47577 93.42543 curveto
+496.97446 93.425435 496.57245 93.567037 496.26971 93.850235 curveto
+495.97023 94.133442 495.79771 94.532205 495.75214 95.046524 curveto
+498.95526 95.041641 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+478.78925 100.66664 moveto
+479.68768 100.66664 lineto
+479.68768 108.2643 lineto
+478.78925 108.2643 lineto
+478.78925 100.66664 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+486.24042 105.30531 moveto
+486.24042 105.74477 lineto
+482.10956 105.74477 lineto
+482.14862 106.36326 482.33417 106.83526 482.6662 107.16078 curveto
+483.00148 107.48305 483.46698 107.64418 484.06268 107.64418 curveto
+484.40773 107.64418 484.74139 107.60186 485.06366 107.51723 curveto
+485.38918 107.43259 485.71144 107.30564 486.03046 107.13637 curveto
+486.03046 107.98598 lineto
+485.70819 108.1227 485.37778 108.22686 485.03925 108.29848 curveto
+484.7007 108.37009 484.35728 108.4059 484.00897 108.4059 curveto
+483.13657 108.4059 482.44484 108.15199 481.93378 107.64418 curveto
+481.42596 107.13637 481.17206 106.44952 481.17206 105.58363 curveto
+481.17206 104.68845 481.41294 103.97882 481.89471 103.45473 curveto
+482.37974 102.92739 483.03241 102.66372 483.85272 102.66371 curveto
+484.5884 102.66372 485.16945 102.90135 485.59589 103.3766 curveto
+486.02557 103.84861 486.24041 104.49151 486.24042 105.30531 curveto
+485.34198 105.04164 moveto
+485.33546 104.55011 485.19712 104.15786 484.92694 103.86488 curveto
+484.66001 103.57192 484.30519 103.42544 483.86249 103.42543 curveto
+483.36118 103.42544 482.95917 103.56704 482.65643 103.85023 curveto
+482.35695 104.13344 482.18443 104.5322 482.13885 105.04652 curveto
+485.34198 105.04164 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+488.6037 101.24281 moveto
+488.6037 102.79555 lineto
+490.45428 102.79555 lineto
+490.45428 103.49379 lineto
+488.6037 103.49379 lineto
+488.6037 106.46254 lineto
+488.6037 106.9085 488.66392 107.19496 488.78436 107.32191 curveto
+488.90806 107.44887 489.15708 107.51235 489.53143 107.51234 curveto
+490.45428 107.51234 lineto
+490.45428 108.2643 lineto
+489.53143 108.2643 lineto
+488.83807 108.2643 488.35956 108.13572 488.09589 107.87856 curveto
+487.83221 107.61814 487.70038 107.14613 487.70038 106.46254 curveto
+487.70038 103.49379 lineto
+487.0412 103.49379 lineto
+487.0412 102.79555 lineto
+487.70038 102.79555 lineto
+487.70038 101.24281 lineto
+488.6037 101.24281 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+44.641342 188.13469 moveto
+44.641342 184.82414 lineto
+45.53978 184.82414 lineto
+45.53978 188.10051 lineto
+45.539778 188.61809 45.640689 189.00709 45.842514 189.2675 curveto
+46.044335 189.52466 46.347069 189.65324 46.750717 189.65324 curveto
+47.23574 189.65324 47.618226 189.49862 47.898178 189.18938 curveto
+48.181377 188.88013 48.322978 188.45858 48.322983 187.92473 curveto
+48.322983 184.82414 lineto
+49.22142 184.82414 lineto
+49.22142 190.29289 lineto
+48.322983 190.29289 lineto
+48.322983 189.45305 lineto
+48.10488 189.78508 47.850974 190.03248 47.561264 190.19524 curveto
+47.274802 190.35474 46.941144 190.43449 46.560287 190.43449 curveto
+45.93203 190.43449 45.455143 190.23918 45.129623 189.84856 curveto
+44.804102 189.45793 44.641341 188.88664 44.641342 188.13469 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+54.5681 184.98528 moveto
+54.5681 185.83488 lineto
+54.31419 185.70468 54.050518 185.60702 53.777084 185.54192 curveto
+53.503643 185.47682 53.220441 185.44426 52.927475 185.44426 curveto
+52.481509 185.44426 52.146223 185.51262 51.921616 185.64934 curveto
+51.70026 185.78606 51.589583 185.99114 51.589584 186.26457 curveto
+51.589583 186.47291 51.669335 186.6373 51.828842 186.75774 curveto
+51.988346 186.87493 52.308983 186.98723 52.790756 187.09465 curveto
+53.098373 187.16301 lineto
+53.736391 187.29973 54.188864 187.49342 54.455795 187.74406 curveto
+54.725973 187.99146 54.861064 188.33814 54.861069 188.7841 curveto
+54.861064 189.29192 54.659241 189.69393 54.2556 189.99016 curveto
+53.855206 190.28638 53.303448 190.43449 52.600327 190.43449 curveto
+52.307356 190.43449 52.001366 190.4052 51.682358 190.3466 curveto
+51.366601 190.29126 51.032943 190.20663 50.681381 190.0927 curveto
+50.681381 189.16496 lineto
+51.013412 189.33749 51.34056 189.4677 51.662827 189.55559 curveto
+51.98509 189.64022 52.3041 189.68254 52.619858 189.68254 curveto
+53.043032 189.68254 53.368552 189.61093 53.59642 189.4677 curveto
+53.824281 189.32121 53.938213 189.11614 53.938217 188.85246 curveto
+53.938213 188.60832 53.855206 188.42115 53.689194 188.29094 curveto
+53.52643 188.16073 53.16673 188.03541 52.610092 187.91496 curveto
+52.297592 187.84172 lineto
+51.74095 187.72454 51.338932 187.5455 51.091537 187.30461 curveto
+50.844141 187.06047 50.720443 186.72682 50.720444 186.30363 curveto
+50.720443 185.78932 50.902735 185.39218 51.267319 185.11223 curveto
+51.631901 184.83229 52.149478 184.69231 52.820053 184.69231 curveto
+53.152081 184.69231 53.464581 184.71673 53.757553 184.76555 curveto
+54.050518 184.81438 54.3207 184.88762 54.5681 184.98528 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+56.296616 184.82414 moveto
+57.195053 184.82414 lineto
+57.195053 190.29289 lineto
+56.296616 190.29289 lineto
+56.296616 184.82414 lineto
+56.296616 182.69524 moveto
+57.195053 182.69524 lineto
+57.195053 183.83293 lineto
+56.296616 183.83293 lineto
+56.296616 182.69524 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+63.615952 186.99211 moveto
+63.615952 190.29289 lineto
+62.717514 190.29289 lineto
+62.717514 187.02141 lineto
+62.717509 186.50383 62.616598 186.11646 62.41478 185.8593 curveto
+62.212953 185.60214 61.910219 185.47356 61.506577 185.47356 curveto
+61.021548 185.47356 60.639061 185.62818 60.359116 185.93742 curveto
+60.079166 186.24667 59.939192 186.66822 59.939194 187.20207 curveto
+59.939194 190.29289 lineto
+59.035873 190.29289 lineto
+59.035873 184.82414 lineto
+59.939194 184.82414 lineto
+59.939194 185.67375 lineto
+60.154035 185.34498 60.406314 185.09921 60.69603 184.93645 curveto
+60.988996 184.77369 61.325909 184.69231 61.706772 184.69231 curveto
+62.335023 184.69231 62.810283 184.88762 63.132553 185.27824 curveto
+63.454813 185.66562 63.615946 186.23691 63.615952 186.99211 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+69.016342 187.49504 moveto
+69.016338 186.844 68.881247 186.33945 68.611069 185.98137 curveto
+68.344138 185.6233 67.968162 185.44426 67.483139 185.44426 curveto
+67.001366 185.44426 66.625389 185.6233 66.355209 185.98137 curveto
+66.088281 186.33945 65.954817 186.844 65.954819 187.49504 curveto
+65.954817 188.14283 66.088281 188.64576 66.355209 189.00383 curveto
+66.625389 189.3619 67.001366 189.54094 67.483139 189.54094 curveto
+67.968162 189.54094 68.344138 189.3619 68.611069 189.00383 curveto
+68.881247 188.64576 69.016338 188.14283 69.016342 187.49504 curveto
+69.91478 189.61418 moveto
+69.914774 190.54517 69.708069 191.2369 69.294662 191.68938 curveto
+68.881247 192.1451 68.248109 192.37297 67.395248 192.37297 curveto
+67.079491 192.37297 66.781639 192.34855 66.501694 192.29973 curveto
+66.221744 192.25415 65.949934 192.18254 65.686264 192.08488 curveto
+65.686264 191.21086 lineto
+65.949934 191.35409 66.210351 191.45988 66.467514 191.52824 curveto
+66.724673 191.5966 66.986717 191.63078 67.253647 191.63078 curveto
+67.842836 191.63078 68.283916 191.47616 68.576889 191.16692 curveto
+68.869853 190.86093 69.016338 190.39706 69.016342 189.77531 curveto
+69.016342 189.33098 lineto
+68.830791 189.65324 68.593161 189.89413 68.303452 190.05363 curveto
+68.013734 190.21314 67.667055 190.29289 67.263412 190.29289 curveto
+66.592837 190.29289 66.052473 190.03736 65.642319 189.52629 curveto
+65.232162 189.01522 65.027084 188.33814 65.027084 187.49504 curveto
+65.027084 186.64869 65.232162 185.96998 65.642319 185.45891 curveto
+66.052473 184.94785 66.592837 184.69231 67.263412 184.69231 curveto
+67.667055 184.69231 68.013734 184.77206 68.303452 184.93156 curveto
+68.593161 185.09107 68.830791 185.33196 69.016342 185.65422 curveto
+69.016342 184.82414 lineto
+69.91478 184.82414 lineto
+69.91478 189.61418 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+44.641342 198.13469 moveto
+44.641342 194.82414 lineto
+45.53978 194.82414 lineto
+45.53978 198.10051 lineto
+45.539778 198.61809 45.640689 199.00709 45.842514 199.2675 curveto
+46.044335 199.52466 46.347069 199.65324 46.750717 199.65324 curveto
+47.23574 199.65324 47.618226 199.49862 47.898178 199.18938 curveto
+48.181377 198.88013 48.322978 198.45858 48.322983 197.92473 curveto
+48.322983 194.82414 lineto
+49.22142 194.82414 lineto
+49.22142 200.29289 lineto
+48.322983 200.29289 lineto
+48.322983 199.45305 lineto
+48.10488 199.78508 47.850974 200.03248 47.561264 200.19524 curveto
+47.274802 200.35474 46.941144 200.43449 46.560287 200.43449 curveto
+45.93203 200.43449 45.455143 200.23918 45.129623 199.84856 curveto
+44.804102 199.45793 44.641341 198.88664 44.641342 198.13469 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+55.62767 196.99211 moveto
+55.62767 200.29289 lineto
+54.729233 200.29289 lineto
+54.729233 197.02141 lineto
+54.729228 196.50383 54.628317 196.11646 54.426498 195.8593 curveto
+54.224671 195.60214 53.921937 195.47356 53.518295 195.47356 curveto
+53.033266 195.47356 52.65078 195.62818 52.370834 195.93742 curveto
+52.090884 196.24667 51.950911 196.66822 51.950912 197.20207 curveto
+51.950912 200.29289 lineto
+51.047592 200.29289 lineto
+51.047592 194.82414 lineto
+51.950912 194.82414 lineto
+51.950912 195.67375 lineto
+52.165754 195.34498 52.418033 195.09921 52.707748 194.93645 curveto
+53.000714 194.77369 53.337628 194.69231 53.718491 194.69231 curveto
+54.346742 194.69231 54.822002 194.88762 55.144272 195.27824 curveto
+55.466532 195.66562 55.627665 196.23691 55.62767 196.99211 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+60.197983 192.69524 moveto
+60.197983 193.44231 lineto
+59.338608 193.44231 lineto
+59.01634 193.44231 58.79173 193.50742 58.66478 193.63762 curveto
+58.54108 193.76783 58.479231 194.00221 58.479233 194.34074 curveto
+58.479233 194.82414 lineto
+59.958725 194.82414 lineto
+59.958725 195.52238 lineto
+58.479233 195.52238 lineto
+58.479233 200.29289 lineto
+57.575912 200.29289 lineto
+57.575912 195.52238 lineto
+56.716537 195.52238 lineto
+56.716537 194.82414 lineto
+57.575912 194.82414 lineto
+57.575912 194.44328 lineto
+57.575911 193.83457 57.717513 193.39186 58.000717 193.11516 curveto
+58.283918 192.83522 58.733137 192.69524 59.348373 192.69524 curveto
+60.197983 192.69524 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+63.064194 195.45403 moveto
+62.58242 195.45403 62.201561 195.64283 61.921616 196.02043 curveto
+61.641666 196.39478 61.501692 196.90911 61.501694 197.5634 curveto
+61.501692 198.2177 61.640038 198.73365 61.916733 199.11125 curveto
+62.196679 199.4856 62.579165 199.67278 63.064194 199.67278 curveto
+63.542706 199.67278 63.921937 199.48397 64.201889 199.10637 curveto
+64.481832 198.72877 64.621806 198.21444 64.621811 197.5634 curveto
+64.621806 196.91562 64.481832 196.40292 64.201889 196.02531 curveto
+63.921937 195.64446 63.542706 195.45403 63.064194 195.45403 curveto
+63.064194 194.69231 moveto
+63.84544 194.69231 64.459046 194.94622 64.905014 195.45403 curveto
+65.350972 195.96184 65.573954 196.66497 65.573959 197.5634 curveto
+65.573954 198.45858 65.350972 199.16171 64.905014 199.67278 curveto
+64.459046 200.18059 63.84544 200.43449 63.064194 200.43449 curveto
+62.279686 200.43449 61.664452 200.18059 61.218491 199.67278 curveto
+60.775781 199.16171 60.554428 198.45858 60.554428 197.5634 curveto
+60.554428 196.66497 60.775781 195.96184 61.218491 195.45403 curveto
+61.664452 194.94622 62.279686 194.69231 63.064194 194.69231 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+67.058334 192.69524 moveto
+67.956772 192.69524 lineto
+67.956772 200.29289 lineto
+67.058334 200.29289 lineto
+67.058334 192.69524 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+73.430405 195.65422 moveto
+73.430405 192.69524 lineto
+74.328842 192.69524 lineto
+74.328842 200.29289 lineto
+73.430405 200.29289 lineto
+73.430405 199.47258 lineto
+73.241598 199.7981 73.002341 200.04061 72.712631 200.20012 curveto
+72.426169 200.35637 72.081118 200.43449 71.677475 200.43449 curveto
+71.016666 200.43449 70.477929 200.17082 70.061264 199.64348 curveto
+69.647852 199.11614 69.441146 198.42278 69.441147 197.5634 curveto
+69.441146 196.70403 69.647852 196.01067 70.061264 195.48332 curveto
+70.477929 194.95598 71.016666 194.69231 71.677475 194.69231 curveto
+72.081118 194.69231 72.426169 194.77206 72.712631 194.93156 curveto
+73.002341 195.08782 73.241598 195.3287 73.430405 195.65422 curveto
+70.368881 197.5634 moveto
+70.36888 198.22421 70.503971 198.74341 70.774155 199.12102 curveto
+71.04759 199.49537 71.421939 199.68254 71.897202 199.68254 curveto
+72.372458 199.68254 72.746807 199.49537 73.020248 199.12102 curveto
+73.293682 198.74341 73.4304 198.22421 73.430405 197.5634 curveto
+73.4304 196.9026 73.293682 196.38502 73.020248 196.01067 curveto
+72.746807 195.63307 72.372458 195.44426 71.897202 195.44426 curveto
+71.421939 195.44426 71.04759 195.63307 70.774155 196.01067 curveto
+70.503971 196.38502 70.36888 196.9026 70.368881 197.5634 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+76.179428 194.82414 moveto
+77.077866 194.82414 lineto
+77.077866 200.29289 lineto
+76.179428 200.29289 lineto
+76.179428 194.82414 lineto
+76.179428 192.69524 moveto
+77.077866 192.69524 lineto
+77.077866 193.83293 lineto
+76.179428 193.83293 lineto
+76.179428 192.69524 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+83.498764 196.99211 moveto
+83.498764 200.29289 lineto
+82.600327 200.29289 lineto
+82.600327 197.02141 lineto
+82.600322 196.50383 82.499411 196.11646 82.297592 195.8593 curveto
+82.095765 195.60214 81.793031 195.47356 81.389389 195.47356 curveto
+80.90436 195.47356 80.521874 195.62818 80.241928 195.93742 curveto
+79.961978 196.24667 79.822004 196.66822 79.822006 197.20207 curveto
+79.822006 200.29289 lineto
+78.918686 200.29289 lineto
+78.918686 194.82414 lineto
+79.822006 194.82414 lineto
+79.822006 195.67375 lineto
+80.036848 195.34498 80.289126 195.09921 80.578842 194.93645 curveto
+80.871808 194.77369 81.208722 194.69231 81.589584 194.69231 curveto
+82.217835 194.69231 82.693095 194.88762 83.015366 195.27824 curveto
+83.337626 195.66562 83.498759 196.23691 83.498764 196.99211 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+88.899155 197.49504 moveto
+88.89915 196.844 88.764059 196.33945 88.493881 195.98137 curveto
+88.22695 195.6233 87.850974 195.44426 87.365952 195.44426 curveto
+86.884178 195.44426 86.508202 195.6233 86.238022 195.98137 curveto
+85.971093 196.33945 85.83763 196.844 85.837631 197.49504 curveto
+85.83763 198.14283 85.971093 198.64576 86.238022 199.00383 curveto
+86.508202 199.3619 86.884178 199.54094 87.365952 199.54094 curveto
+87.850974 199.54094 88.22695 199.3619 88.493881 199.00383 curveto
+88.764059 198.64576 88.89915 198.14283 88.899155 197.49504 curveto
+89.797592 199.61418 moveto
+89.797587 200.54517 89.590881 201.2369 89.177475 201.68938 curveto
+88.764059 202.1451 88.130922 202.37297 87.278061 202.37297 curveto
+86.962303 202.37297 86.664452 202.34855 86.384506 202.29973 curveto
+86.104557 202.25415 85.832747 202.18254 85.569077 202.08488 curveto
+85.569077 201.21086 lineto
+85.832747 201.35409 86.093163 201.45988 86.350327 201.52824 curveto
+86.607486 201.5966 86.86953 201.63078 87.136459 201.63078 curveto
+87.725649 201.63078 88.166729 201.47616 88.459702 201.16692 curveto
+88.752666 200.86093 88.89915 200.39706 88.899155 199.77531 curveto
+88.899155 199.33098 lineto
+88.713603 199.65324 88.475973 199.89413 88.186264 200.05363 curveto
+87.896547 200.21314 87.549868 200.29289 87.146225 200.29289 curveto
+86.47565 200.29289 85.935286 200.03736 85.525131 199.52629 curveto
+85.114974 199.01522 84.909896 198.33814 84.909897 197.49504 curveto
+84.909896 196.64869 85.114974 195.96998 85.525131 195.45891 curveto
+85.935286 194.94785 86.47565 194.69231 87.146225 194.69231 curveto
+87.549868 194.69231 87.896547 194.77206 88.186264 194.93156 curveto
+88.475973 195.09107 88.713603 195.33196 88.899155 195.65422 curveto
+88.899155 194.82414 lineto
+89.797592 194.82414 lineto
+89.797592 199.61418 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+380.48996 223.50369 moveto
+380.48996 225.05643 lineto
+382.34055 225.05643 lineto
+382.34055 225.75467 lineto
+380.48996 225.75467 lineto
+380.48996 228.72342 lineto
+380.48996 229.16938 380.55018 229.45584 380.67062 229.58279 curveto
+380.79432 229.70975 381.04334 229.77322 381.41769 229.77322 curveto
+382.34055 229.77322 lineto
+382.34055 230.52518 lineto
+381.41769 230.52518 lineto
+380.72433 230.52518 380.24582 230.3966 379.98215 230.13943 curveto
+379.71847 229.87902 379.58664 229.40701 379.58664 228.72342 curveto
+379.58664 225.75467 lineto
+378.92746 225.75467 lineto
+378.92746 225.05643 lineto
+379.58664 225.05643 lineto
+379.58664 223.50369 lineto
+380.48996 223.50369 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+388.07297 227.2244 moveto
+388.07297 230.52518 lineto
+387.17453 230.52518 lineto
+387.17453 227.25369 lineto
+387.17453 226.73612 387.07361 226.34875 386.8718 226.09158 curveto
+386.66997 225.83443 386.36723 225.70585 385.96359 225.70584 curveto
+385.47856 225.70585 385.09608 225.86047 384.81613 226.16971 curveto
+384.53618 226.47896 384.39621 226.90051 384.39621 227.43436 curveto
+384.39621 230.52518 lineto
+383.49289 230.52518 lineto
+383.49289 222.92752 lineto
+384.39621 222.92752 lineto
+384.39621 225.90604 lineto
+384.61105 225.57727 384.86333 225.3315 385.15305 225.16873 curveto
+385.44601 225.00598 385.78293 224.9246 386.16379 224.92459 curveto
+386.79204 224.9246 387.2673 225.11991 387.58957 225.51053 curveto
+387.91183 225.8979 388.07296 226.46919 388.07297 227.2244 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+394.55246 227.56619 moveto
+394.55246 228.00565 lineto
+390.4216 228.00565 lineto
+390.46066 228.62414 390.64621 229.09614 390.97824 229.42166 curveto
+391.31353 229.74393 391.77902 229.90506 392.37473 229.90506 curveto
+392.71977 229.90506 393.05343 229.86274 393.3757 229.77811 curveto
+393.70122 229.69347 394.02348 229.56652 394.3425 229.39725 curveto
+394.3425 230.24686 lineto
+394.02023 230.38358 393.68982 230.48774 393.35129 230.55936 curveto
+393.01274 230.63097 392.66932 230.66678 392.32101 230.66678 curveto
+391.44862 230.66678 390.75688 230.41287 390.24582 229.90506 curveto
+389.73801 229.39725 389.4841 228.7104 389.4841 227.84451 curveto
+389.4841 226.94933 389.72498 226.2397 390.20676 225.71561 curveto
+390.69178 225.18827 391.34445 224.9246 392.16476 224.92459 curveto
+392.90044 224.9246 393.48149 225.16223 393.90793 225.63748 curveto
+394.33761 226.10949 394.55245 226.75239 394.55246 227.56619 curveto
+393.65402 227.30252 moveto
+393.64751 226.81099 393.50916 226.41874 393.23898 226.12576 curveto
+392.97205 225.8328 392.61723 225.68631 392.17453 225.68631 curveto
+391.67323 225.68631 391.27121 225.82792 390.96848 226.11111 curveto
+390.66899 226.39432 390.49647 226.79308 390.4509 227.3074 curveto
+393.65402 227.30252 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+400.57297 227.2244 moveto
+400.57297 230.52518 lineto
+399.67453 230.52518 lineto
+399.67453 227.25369 lineto
+399.67453 226.73612 399.57361 226.34875 399.3718 226.09158 curveto
+399.16997 225.83443 398.86723 225.70585 398.46359 225.70584 curveto
+397.97856 225.70585 397.59608 225.86047 397.31613 226.16971 curveto
+397.03618 226.47896 396.89621 226.90051 396.89621 227.43436 curveto
+396.89621 230.52518 lineto
+395.99289 230.52518 lineto
+395.99289 225.05643 lineto
+396.89621 225.05643 lineto
+396.89621 225.90604 lineto
+397.11105 225.57727 397.36333 225.3315 397.65305 225.16873 curveto
+397.94601 225.00598 398.28293 224.9246 398.66379 224.92459 curveto
+399.29204 224.9246 399.7673 225.11991 400.08957 225.51053 curveto
+400.41183 225.8979 400.57296 226.46919 400.57297 227.2244 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+239.47623 229.75269 moveto
+239.47623 233.05347 lineto
+238.57779 233.05347 lineto
+238.57779 229.78198 lineto
+238.57778 229.26441 238.47687 228.87704 238.27505 228.61987 curveto
+238.07323 228.36272 237.77049 228.23414 237.36685 228.23413 curveto
+236.88182 228.23414 236.49934 228.38876 236.21939 228.698 curveto
+235.93944 229.00725 235.79947 229.4288 235.79947 229.96265 curveto
+235.79947 233.05347 lineto
+234.89615 233.05347 lineto
+234.89615 225.45581 lineto
+235.79947 225.45581 lineto
+235.79947 228.43433 lineto
+236.01431 228.10556 236.26659 227.85979 236.5563 227.69702 curveto
+236.84927 227.53427 237.18618 227.45289 237.56705 227.45288 curveto
+238.1953 227.45289 238.67056 227.6482 238.99283 228.03882 curveto
+239.31509 228.42619 239.47622 228.99748 239.47623 229.75269 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+243.76334 230.30444 moveto
+243.03742 230.30445 242.53449 230.38745 242.25455 230.55347 curveto
+241.9746 230.71948 241.83462 231.00269 241.83463 231.40308 curveto
+241.83462 231.72209 241.93879 231.97599 242.14713 232.16479 curveto
+242.35871 232.35034 242.64517 232.44312 243.0065 232.44312 curveto
+243.50454 232.44312 243.90331 232.26733 244.20279 231.91577 curveto
+244.50552 231.56096 244.65689 231.09058 244.65689 230.50464 curveto
+244.65689 230.30444 lineto
+243.76334 230.30444 lineto
+245.55533 229.93335 moveto
+245.55533 233.05347 lineto
+244.65689 233.05347 lineto
+244.65689 232.22339 lineto
+244.45181 232.55542 244.19628 232.80119 243.89029 232.96069 curveto
+243.5843 233.11694 243.20995 233.19507 242.76724 233.19507 curveto
+242.20734 233.19507 241.76138 233.03882 241.42935 232.72632 curveto
+241.10057 232.41056 240.93619 231.98901 240.93619 231.46167 curveto
+240.93619 230.84644 241.14127 230.38257 241.55142 230.07007 curveto
+241.96483 229.75757 242.58007 229.60132 243.39713 229.60132 curveto
+244.65689 229.60132 lineto
+244.65689 229.51343 lineto
+244.65689 229.10002 244.52017 228.78101 244.24673 228.5564 curveto
+243.97655 228.32854 243.59569 228.2146 243.10416 228.2146 curveto
+242.79165 228.2146 242.48729 228.25204 242.19107 228.3269 curveto
+241.89485 228.40178 241.61001 228.51408 241.33658 228.66382 curveto
+241.33658 227.83374 lineto
+241.66535 227.70679 241.98436 227.61239 242.29361 227.55054 curveto
+242.60285 227.48544 242.90396 227.45289 243.19693 227.45288 curveto
+243.98794 227.45289 244.57876 227.65796 244.96939 228.06812 curveto
+245.36001 228.47828 245.55532 229.10002 245.55533 229.93335 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+246.76627 227.58472 moveto
+247.71841 227.58472 lineto
+249.4274 232.17456 lineto
+251.13638 227.58472 lineto
+252.08853 227.58472 lineto
+250.03775 233.05347 lineto
+248.81705 233.05347 lineto
+246.76627 227.58472 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+258.0065 230.09448 moveto
+258.0065 230.53394 lineto
+253.87564 230.53394 lineto
+253.9147 231.15243 254.10025 231.62443 254.43228 231.94995 curveto
+254.76757 232.27222 255.23306 232.43335 255.82877 232.43335 curveto
+256.17381 232.43335 256.50747 232.39103 256.82974 232.3064 curveto
+257.15526 232.22176 257.47752 232.09481 257.79654 231.92554 curveto
+257.79654 232.77515 lineto
+257.47427 232.91187 257.14387 233.01603 256.80533 233.08765 curveto
+256.46678 233.15926 256.12336 233.19507 255.77505 233.19507 curveto
+254.90266 233.19507 254.21093 232.94116 253.69986 232.43335 curveto
+253.19205 231.92554 252.93814 231.23869 252.93814 230.3728 curveto
+252.93814 229.47762 253.17903 228.76799 253.6608 228.2439 curveto
+254.14582 227.71656 254.79849 227.45289 255.6188 227.45288 curveto
+256.35448 227.45289 256.93553 227.69052 257.36197 228.16577 curveto
+257.79165 228.63778 258.00649 229.28068 258.0065 230.09448 curveto
+257.10806 229.83081 moveto
+257.10155 229.33928 256.9632 228.94703 256.69302 228.65405 curveto
+256.42609 228.36109 256.07128 228.2146 255.62857 228.2146 curveto
+255.12727 228.2146 254.72525 228.35621 254.42252 228.6394 curveto
+254.12303 228.92261 253.95051 229.32137 253.90494 229.83569 curveto
+257.10806 229.83081 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+238.41666 242.74585 moveto
+238.41666 243.59546 lineto
+238.16275 243.46526 237.89907 243.3676 237.62564 243.30249 curveto
+237.3522 243.23739 237.069 243.20484 236.77603 243.20483 curveto
+236.33007 243.20484 235.99478 243.2732 235.77017 243.40991 curveto
+235.54882 243.54664 235.43814 243.75171 235.43814 244.02515 curveto
+235.43814 244.23348 235.51789 244.39787 235.6774 244.51831 curveto
+235.8369 244.6355 236.15754 244.74781 236.63931 244.85522 curveto
+236.94693 244.92358 lineto
+237.58495 245.06031 238.03742 245.25399 238.30435 245.50464 curveto
+238.57453 245.75204 238.70962 246.09872 238.70963 246.54468 curveto
+238.70962 247.05249 238.5078 247.45451 238.10416 247.75073 curveto
+237.70376 248.04696 237.152 248.19507 236.44888 248.19507 curveto
+236.15591 248.19507 235.84992 248.16577 235.53091 248.10718 curveto
+235.21516 248.05184 234.8815 247.9672 234.52994 247.85327 curveto
+234.52994 246.92554 lineto
+234.86197 247.09806 235.18912 247.22827 235.51138 247.31616 curveto
+235.83365 247.4008 236.15266 247.44312 236.46841 247.44312 curveto
+236.89159 247.44312 237.21711 247.3715 237.44498 247.22827 curveto
+237.67284 247.08179 237.78677 246.87671 237.78677 246.61304 curveto
+237.78677 246.3689 237.70376 246.18172 237.53775 246.05151 curveto
+237.37499 245.92131 237.01529 245.79598 236.45865 245.67554 curveto
+236.14615 245.60229 lineto
+235.58951 245.48511 235.18749 245.30607 234.94009 245.06519 curveto
+234.6927 244.82105 234.569 244.48739 234.569 244.06421 curveto
+234.569 243.54989 234.75129 243.15276 235.11588 242.8728 curveto
+235.48046 242.59286 235.99803 242.45289 236.66861 242.45288 curveto
+237.00064 242.45289 237.31314 242.4773 237.60611 242.52612 curveto
+237.89907 242.57496 238.16926 242.6482 238.41666 242.74585 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+244.69107 244.75269 moveto
+244.69107 248.05347 lineto
+243.79263 248.05347 lineto
+243.79263 244.78198 lineto
+243.79263 244.26441 243.69172 243.87704 243.4899 243.61987 curveto
+243.28807 243.36272 242.98534 243.23414 242.5817 243.23413 curveto
+242.09667 243.23414 241.71418 243.38876 241.43423 243.698 curveto
+241.15428 244.00725 241.01431 244.4288 241.01431 244.96265 curveto
+241.01431 248.05347 lineto
+240.11099 248.05347 lineto
+240.11099 240.45581 lineto
+241.01431 240.45581 lineto
+241.01431 243.43433 lineto
+241.22915 243.10556 241.48143 242.85979 241.77115 242.69702 curveto
+242.06411 242.53427 242.40103 242.45289 242.78189 242.45288 curveto
+243.41014 242.45289 243.8854 242.6482 244.20767 243.03882 curveto
+244.52993 243.42619 244.69107 243.99748 244.69107 244.75269 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+248.61197 243.2146 moveto
+248.1302 243.2146 247.74934 243.40341 247.46939 243.78101 curveto
+247.18944 244.15536 247.04947 244.66968 247.04947 245.32397 curveto
+247.04947 245.97827 247.18781 246.49422 247.46451 246.87183 curveto
+247.74445 247.24618 248.12694 247.43335 248.61197 247.43335 curveto
+249.09048 247.43335 249.46971 247.24455 249.74966 246.86694 curveto
+250.02961 246.48934 250.16958 245.97502 250.16959 245.32397 curveto
+250.16958 244.67619 250.02961 244.1635 249.74966 243.78589 curveto
+249.46971 243.40503 249.09048 243.2146 248.61197 243.2146 curveto
+248.61197 242.45288 moveto
+249.39322 242.45289 250.00682 242.70679 250.45279 243.2146 curveto
+250.89875 243.72242 251.12173 244.42554 251.12173 245.32397 curveto
+251.12173 246.21916 250.89875 246.92228 250.45279 247.43335 curveto
+250.00682 247.94116 249.39322 248.19507 248.61197 248.19507 curveto
+247.82746 248.19507 247.21223 247.94116 246.76627 247.43335 curveto
+246.32356 246.92228 246.1022 246.21916 246.1022 245.32397 curveto
+246.1022 244.42554 246.32356 243.72242 246.76627 243.2146 curveto
+247.21223 242.70679 247.82746 242.45289 248.61197 242.45288 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+252.08365 242.58472 moveto
+252.98209 242.58472 lineto
+254.10513 246.85229 lineto
+255.2233 242.58472 lineto
+256.28287 242.58472 lineto
+257.40591 246.85229 lineto
+258.52408 242.58472 lineto
+259.42252 242.58472 lineto
+257.99185 248.05347 lineto
+256.93228 248.05347 lineto
+255.75552 243.57104 lineto
+254.57388 248.05347 lineto
+253.51431 248.05347 lineto
+252.08365 242.58472 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+311.38464 185.46135 moveto
+311.38464 188.76213 lineto
+310.48621 188.76213 lineto
+310.48621 185.49065 lineto
+310.4862 184.97307 310.38529 184.5857 310.18347 184.32854 curveto
+309.98164 184.07138 309.67891 183.9428 309.27527 183.94279 curveto
+308.79024 183.9428 308.40775 184.09742 308.12781 184.40666 curveto
+307.84786 184.71591 307.70788 185.13746 307.70789 185.67131 curveto
+307.70789 188.76213 lineto
+306.80457 188.76213 lineto
+306.80457 181.16447 lineto
+307.70789 181.16447 lineto
+307.70789 184.14299 lineto
+307.92273 183.81422 308.17501 183.56845 308.46472 183.40569 curveto
+308.75769 183.24293 309.0946 183.16155 309.47546 183.16154 curveto
+310.10371 183.16155 310.57897 183.35686 310.90125 183.74748 curveto
+311.22351 184.13486 311.38464 184.70615 311.38464 185.46135 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+315.67175 186.01311 moveto
+314.94584 186.01311 314.44291 186.09612 314.16296 186.26213 curveto
+313.88301 186.42815 313.74304 186.71135 313.74304 187.11174 curveto
+313.74304 187.43075 313.84721 187.68466 314.05554 187.87346 curveto
+314.26713 188.05901 314.55359 188.15178 314.91492 188.15178 curveto
+315.41296 188.15178 315.81172 187.976 316.11121 187.62444 curveto
+316.41394 187.26962 316.5653 186.79924 316.56531 186.2133 curveto
+316.56531 186.01311 lineto
+315.67175 186.01311 lineto
+317.46375 185.64201 moveto
+317.46375 188.76213 lineto
+316.56531 188.76213 lineto
+316.56531 187.93205 lineto
+316.36023 188.26408 316.10469 188.50985 315.79871 188.66936 curveto
+315.49271 188.82561 315.11836 188.90373 314.67566 188.90373 curveto
+314.11576 188.90373 313.6698 188.74748 313.33777 188.43498 curveto
+313.00899 188.11923 312.8446 187.69768 312.8446 187.17033 curveto
+312.8446 186.5551 313.04968 186.09123 313.45984 185.77873 curveto
+313.87325 185.46624 314.48848 185.30999 315.30554 185.30998 curveto
+316.56531 185.30998 lineto
+316.56531 185.22209 lineto
+316.5653 184.80868 316.42858 184.48967 316.15515 184.26506 curveto
+315.88497 184.0372 315.50411 183.92327 315.01257 183.92326 curveto
+314.70007 183.92327 314.39571 183.9607 314.09949 184.03557 curveto
+313.80326 184.11044 313.51843 184.22275 313.245 184.37248 curveto
+313.245 183.5424 lineto
+313.57377 183.41546 313.89278 183.32106 314.20203 183.2592 curveto
+314.51127 183.1941 314.81238 183.16155 315.10535 183.16154 curveto
+315.89636 183.16155 316.48718 183.36663 316.87781 183.77678 curveto
+317.26843 184.18694 317.46374 184.80868 317.46375 185.64201 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+318.67468 183.29338 moveto
+319.62683 183.29338 lineto
+321.33582 187.88322 lineto
+323.0448 183.29338 lineto
+323.99695 183.29338 lineto
+321.94617 188.76213 lineto
+320.72546 188.76213 lineto
+318.67468 183.29338 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+329.91492 185.80315 moveto
+329.91492 186.2426 lineto
+325.78406 186.2426 lineto
+325.82312 186.86109 326.00867 187.3331 326.3407 187.65862 curveto
+326.67598 187.98088 327.14148 188.14201 327.73718 188.14201 curveto
+328.08223 188.14201 328.41589 188.0997 328.73816 188.01506 curveto
+329.06368 187.93043 329.38594 187.80347 329.70496 187.6342 curveto
+329.70496 188.48381 lineto
+329.38269 188.62053 329.05228 188.7247 328.71375 188.79631 curveto
+328.3752 188.86792 328.03178 188.90373 327.68347 188.90373 curveto
+326.81107 188.90373 326.11934 188.64983 325.60828 188.14201 curveto
+325.10046 187.6342 324.84656 186.94735 324.84656 186.08147 curveto
+324.84656 185.18629 325.08744 184.47665 325.56921 183.95256 curveto
+326.05424 183.42522 326.70691 183.16155 327.52722 183.16154 curveto
+328.26289 183.16155 328.84395 183.39918 329.27039 183.87444 curveto
+329.70007 184.34645 329.91491 184.98935 329.91492 185.80315 curveto
+329.01648 185.53947 moveto
+329.00996 185.04794 328.87162 184.65569 328.60144 184.36272 curveto
+328.33451 184.06975 327.97969 183.92327 327.53699 183.92326 curveto
+327.03568 183.92327 326.63366 184.06487 326.33093 184.34807 curveto
+326.03145 184.63128 325.85893 185.03004 325.81335 185.54436 curveto
+329.01648 185.53947 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+310.32507 198.45451 moveto
+310.32507 199.30412 lineto
+310.07116 199.17392 309.80749 199.07626 309.53406 199.01115 curveto
+309.26062 198.94605 308.97741 198.9135 308.68445 198.9135 curveto
+308.23848 198.9135 307.9032 198.98186 307.67859 199.11858 curveto
+307.45723 199.2553 307.34656 199.46038 307.34656 199.73381 curveto
+307.34656 199.94215 307.42631 200.10654 307.58582 200.22697 curveto
+307.74532 200.34417 308.06596 200.45647 308.54773 200.56389 curveto
+308.85535 200.63225 lineto
+309.49336 200.76897 309.94584 200.96265 310.21277 201.2133 curveto
+310.48295 201.4607 310.61804 201.80738 310.61804 202.25334 curveto
+310.61804 202.76116 310.41621 203.16317 310.01257 203.4594 curveto
+309.61218 203.75562 309.06042 203.90373 308.3573 203.90373 curveto
+308.06433 203.90373 307.75834 203.87444 307.43933 203.81584 curveto
+307.12357 203.7605 306.78992 203.67587 306.43835 203.56194 curveto
+306.43835 202.6342 lineto
+306.77038 202.80673 307.09753 202.93694 307.4198 203.02483 curveto
+307.74206 203.10946 308.06107 203.15178 308.37683 203.15178 curveto
+308.80001 203.15178 309.12553 203.08017 309.35339 202.93694 curveto
+309.58125 202.79045 309.69519 202.58537 309.69519 202.3217 curveto
+309.69519 202.07756 309.61218 201.89039 309.44617 201.76018 curveto
+309.2834 201.62997 308.9237 201.50465 308.36707 201.3842 curveto
+308.05457 201.31096 lineto
+307.49792 201.19377 307.09591 201.01474 306.84851 200.77385 curveto
+306.60111 200.52971 306.47742 200.19605 306.47742 199.77287 curveto
+306.47742 199.25855 306.65971 198.86142 307.02429 198.58147 curveto
+307.38887 198.30152 307.90645 198.16155 308.57703 198.16154 curveto
+308.90905 198.16155 309.22155 198.18596 309.51453 198.23479 curveto
+309.80749 198.28362 310.07767 198.35686 310.32507 198.45451 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+316.59949 200.46135 moveto
+316.59949 203.76213 lineto
+315.70105 203.76213 lineto
+315.70105 200.49065 lineto
+315.70105 199.97307 315.60013 199.5857 315.39832 199.32854 curveto
+315.19649 199.07138 314.89375 198.9428 314.49011 198.94279 curveto
+314.00508 198.9428 313.6226 199.09742 313.34265 199.40666 curveto
+313.0627 199.71591 312.92273 200.13746 312.92273 200.67131 curveto
+312.92273 203.76213 lineto
+312.01941 203.76213 lineto
+312.01941 196.16447 lineto
+312.92273 196.16447 lineto
+312.92273 199.14299 lineto
+313.13757 198.81422 313.38985 198.56845 313.67957 198.40569 curveto
+313.97253 198.24293 314.30945 198.16155 314.69031 198.16154 curveto
+315.31856 198.16155 315.79382 198.35686 316.11609 198.74748 curveto
+316.43835 199.13486 316.59948 199.70615 316.59949 200.46135 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+320.52039 198.92326 moveto
+320.03861 198.92327 319.65775 199.11207 319.37781 199.48967 curveto
+319.09786 199.86402 318.95788 200.37835 318.95789 201.03264 curveto
+318.95788 201.68694 319.09623 202.20289 319.37292 202.58049 curveto
+319.65287 202.95484 320.03536 203.14201 320.52039 203.14201 curveto
+320.9989 203.14201 321.37813 202.95321 321.65808 202.57561 curveto
+321.93802 202.198 322.078 201.68368 322.078 201.03264 curveto
+322.078 200.38486 321.93802 199.87216 321.65808 199.49455 curveto
+321.37813 199.1137 320.9989 198.92327 320.52039 198.92326 curveto
+320.52039 198.16154 moveto
+321.30163 198.16155 321.91524 198.41546 322.36121 198.92326 curveto
+322.80716 199.43108 323.03015 200.1342 323.03015 201.03264 curveto
+323.03015 201.92782 322.80716 202.63095 322.36121 203.14201 curveto
+321.91524 203.64983 321.30163 203.90373 320.52039 203.90373 curveto
+319.73588 203.90373 319.12064 203.64983 318.67468 203.14201 curveto
+318.23197 202.63095 318.01062 201.92782 318.01062 201.03264 curveto
+318.01062 200.1342 318.23197 199.43108 318.67468 198.92326 curveto
+319.12064 198.41546 319.73588 198.16155 320.52039 198.16154 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+323.99207 198.29338 moveto
+324.8905 198.29338 lineto
+326.01355 202.56096 lineto
+327.13171 198.29338 lineto
+328.19128 198.29338 lineto
+329.31433 202.56096 lineto
+330.4325 198.29338 lineto
+331.33093 198.29338 lineto
+329.90027 203.76213 lineto
+328.8407 203.76213 lineto
+327.66394 199.27971 lineto
+326.4823 203.76213 lineto
+325.42273 203.76213 lineto
+323.99207 198.29338 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+305.63477 140.25864 moveto
+305.63477 143.15903 lineto
+304.73145 143.15903 lineto
+304.73145 135.6102 lineto
+305.63477 135.6102 lineto
+305.63477 136.44028 lineto
+305.82357 136.11476 306.0612 135.87388 306.34766 135.71762 curveto
+306.63737 135.55812 306.98242 135.47837 307.38281 135.47836 curveto
+308.04687 135.47837 308.58561 135.74204 308.99902 136.26938 curveto
+309.41568 136.79673 309.62402 137.49009 309.62402 138.34946 curveto
+309.62402 139.20883 309.41568 139.90219 308.99902 140.42953 curveto
+308.58561 140.95688 308.04687 141.22055 307.38281 141.22055 curveto
+306.98242 141.22055 306.63737 141.14243 306.34766 140.98618 curveto
+306.0612 140.82667 305.82357 140.58416 305.63477 140.25864 curveto
+308.69141 138.34946 moveto
+308.6914 137.68865 308.55468 137.17108 308.28125 136.79672 curveto
+308.01106 136.41912 307.63834 136.23032 307.16309 136.23032 curveto
+306.68782 136.23032 306.31347 136.41912 306.04004 136.79672 curveto
+305.76985 137.17108 305.63476 137.68865 305.63477 138.34946 curveto
+305.63476 139.01027 305.76985 139.52947 306.04004 139.90707 curveto
+306.31347 140.28142 306.68782 140.4686 307.16309 140.4686 curveto
+307.63834 140.4686 308.01106 140.28142 308.28125 139.90707 curveto
+308.55468 139.52947 308.6914 139.01027 308.69141 138.34946 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+314.28223 136.45004 moveto
+314.18131 136.39145 314.07063 136.34914 313.9502 136.32309 curveto
+313.833 136.2938 313.7028 136.27915 313.55957 136.27914 curveto
+313.05175 136.27915 312.66113 136.44516 312.3877 136.77719 curveto
+312.11751 137.10597 311.98242 137.5796 311.98242 138.19809 curveto
+311.98242 141.07895 lineto
+311.0791 141.07895 lineto
+311.0791 135.6102 lineto
+311.98242 135.6102 lineto
+311.98242 136.45981 lineto
+312.17122 136.12778 312.41699 135.88201 312.71973 135.7225 curveto
+313.02246 135.55975 313.3903 135.47837 313.82324 135.47836 curveto
+313.88509 135.47837 313.95345 135.48325 314.02832 135.49301 curveto
+314.10319 135.49953 314.18619 135.51092 314.27734 135.52719 curveto
+314.28223 136.45004 lineto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+317.13867 136.24008 moveto
+316.6569 136.24009 316.27604 136.42889 315.99609 136.80649 curveto
+315.71614 137.18084 315.57617 137.69516 315.57617 138.34946 curveto
+315.57617 139.00376 315.71452 139.51971 315.99121 139.89731 curveto
+316.27116 140.27166 316.65364 140.45883 317.13867 140.45883 curveto
+317.61718 140.45883 317.99642 140.27003 318.27637 139.89243 curveto
+318.55631 139.51482 318.69628 139.0005 318.69629 138.34946 curveto
+318.69628 137.70167 318.55631 137.18898 318.27637 136.81137 curveto
+317.99642 136.43052 317.61718 136.24009 317.13867 136.24008 curveto
+317.13867 135.47836 moveto
+317.91992 135.47837 318.53352 135.73227 318.97949 136.24008 curveto
+319.42545 136.7479 319.64843 137.45102 319.64844 138.34946 curveto
+319.64843 139.24464 319.42545 139.94777 318.97949 140.45883 curveto
+318.53352 140.96664 317.91992 141.22055 317.13867 141.22055 curveto
+316.35416 141.22055 315.73893 140.96664 315.29297 140.45883 curveto
+314.85026 139.94777 314.62891 139.24464 314.62891 138.34946 curveto
+314.62891 137.45102 314.85026 136.7479 315.29297 136.24008 curveto
+315.73893 135.73227 316.35416 135.47837 317.13867 135.47836 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+323.25195 136.24008 moveto
+322.77018 136.24009 322.38932 136.42889 322.10938 136.80649 curveto
+321.82943 137.18084 321.68945 137.69516 321.68945 138.34946 curveto
+321.68945 139.00376 321.8278 139.51971 322.10449 139.89731 curveto
+322.38444 140.27166 322.76692 140.45883 323.25195 140.45883 curveto
+323.73047 140.45883 324.1097 140.27003 324.38965 139.89243 curveto
+324.66959 139.51482 324.80957 139.0005 324.80957 138.34946 curveto
+324.80957 137.70167 324.66959 137.18898 324.38965 136.81137 curveto
+324.1097 136.43052 323.73047 136.24009 323.25195 136.24008 curveto
+323.25195 135.47836 moveto
+324.0332 135.47837 324.64681 135.73227 325.09277 136.24008 curveto
+325.53873 136.7479 325.76171 137.45102 325.76172 138.34946 curveto
+325.76171 139.24464 325.53873 139.94777 325.09277 140.45883 curveto
+324.64681 140.96664 324.0332 141.22055 323.25195 141.22055 curveto
+322.46745 141.22055 321.85221 140.96664 321.40625 140.45883 curveto
+320.96354 139.94777 320.74219 139.24464 320.74219 138.34946 curveto
+320.74219 137.45102 320.96354 136.7479 321.40625 136.24008 curveto
+321.85221 135.73227 322.46745 135.47837 323.25195 135.47836 curveto
+fill
+grestore
+gsave
+0 0 0 setrgbcolor
+newpath
+330.01465 133.48129 moveto
+330.01465 134.22836 lineto
+329.15527 134.22836 lineto
+328.83301 134.22837 328.6084 134.29347 328.48145 134.42368 curveto
+328.35775 134.55389 328.2959 134.78827 328.2959 135.1268 curveto
+328.2959 135.6102 lineto
+329.77539 135.6102 lineto
+329.77539 136.30844 lineto
+328.2959 136.30844 lineto
+328.2959 141.07895 lineto
+327.39258 141.07895 lineto
+327.39258 136.30844 lineto
+326.5332 136.30844 lineto
+326.5332 135.6102 lineto
+327.39258 135.6102 lineto
+327.39258 135.22934 lineto
+327.39258 134.62062 327.53418 134.17791 327.81738 133.90121 curveto
+328.10058 133.62127 328.5498 133.4813 329.16504 133.48129 curveto
+330.01465 133.48129 lineto
+fill
+grestore
+grestore
+grestore
+showpage
+%%EOF
Binary file doc-src/IsarRef/Thy/document/isar-vm.pdf has changed
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/IsarRef/Thy/document/isar-vm.svg Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,460 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="543.02673"
+ height="215.66071"
+ id="svg2"
+ sodipodi:version="0.32"
+ inkscape:version="0.46"
+ version="1.0"
+ sodipodi:docname="isar-vm.svg"
+ inkscape:output_extension="org.inkscape.output.svg.inkscape">
+ <defs
+ id="defs4">
+ <marker
+ inkscape:stockid="TriangleOutM"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="TriangleOutM"
+ style="overflow:visible">
+ <path
+ id="path4130"
+ d="M 5.77,0 L -2.88,5 L -2.88,-5 L 5.77,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="scale(0.4,0.4)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mend"
+ style="overflow:visible">
+ <path
+ id="path3993"
+ d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible">
+ <path
+ id="path3207"
+ d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path3204"
+ d="M 0,0 L 5,-5 L -12.5,0 L 5,5 L 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt;marker-start:none"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <inkscape:perspective
+ sodipodi:type="inkscape:persp3d"
+ inkscape:vp_x="0 : 526.18109 : 1"
+ inkscape:vp_y="0 : 1000 : 0"
+ inkscape:vp_z="744.09448 : 526.18109 : 1"
+ inkscape:persp3d-origin="372.04724 : 350.78739 : 1"
+ id="perspective10" />
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ gridtolerance="10"
+ guidetolerance="10"
+ objecttolerance="10"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1.4142136"
+ inkscape:cx="305.44602"
+ inkscape:cy="38.897723"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ inkscape:snap-global="true"
+ units="mm"
+ inkscape:window-width="1226"
+ inkscape:window-height="951"
+ inkscape:window-x="28"
+ inkscape:window-y="47">
+ <inkscape:grid
+ type="xygrid"
+ id="grid2383"
+ visible="true"
+ enabled="true"
+ units="mm"
+ spacingx="2.5mm"
+ spacingy="2.5mm"
+ empspacing="2" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata7">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(-44.641342,-76.87234)">
+ <g
+ id="g3448"
+ transform="translate(70.838012,79.725562)">
+ <rect
+ ry="17.67767"
+ y="131.52507"
+ x="212.09882"
+ height="53.149605"
+ width="70.866142"
+ id="rect3407"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3409"
+ y="164.06471"
+ x="223.50845"
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ xml:space="preserve"><tspan
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+ y="164.06471"
+ x="223.50845"
+ id="tspan3411"
+ sodipodi:role="line">chain</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99921262;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 424.72469,236.82544 L 356.83209,236.82544 L 356.83209,236.82544"
+ id="path3458" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99921268;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="M 282.35183,236.82544 L 215.11403,236.82544 L 215.11403,236.82544"
+ id="path4771" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:0.99999994px;stroke-linecap:butt;stroke-linejoin:miter;marker-start:none;marker-mid:none;marker-end:url(#TriangleOutM);stroke-opacity:1"
+ d="M 424.69726,192.5341 L 215.13005,192.5341"
+ id="path4773" />
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
+ d="M 211.98429,148.24276 L 422.13162,148.24276"
+ id="path6883" />
+ <g
+ id="g3443"
+ transform="translate(70.866146,78.725567)">
+ <rect
+ ry="17.67767"
+ y="42.942394"
+ x="70.366531"
+ height="141.73228"
+ width="70.866142"
+ id="rect2586"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3370"
+ y="116.62494"
+ x="79.682419"
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ xml:space="preserve"><tspan
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+ y="116.62494"
+ x="79.682419"
+ id="tspan3372"
+ sodipodi:role="line">prove</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
+ d="M 176.66575,92.035445 L 176.66575,118.61025"
+ id="path7412" />
+ <path
+ sodipodi:type="arc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path9011"
+ sodipodi:cx="119.58662"
+ sodipodi:cy="266.74686"
+ sodipodi:rx="93.01181"
+ sodipodi:ry="53.149605"
+ d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+ transform="matrix(0.2378166,0,0,-0.2269133,90.621413,253.06251)"
+ sodipodi:start="0.29223018"
+ sodipodi:end="5.9921036"
+ sodipodi:open="true" />
+ <g
+ id="g3453"
+ transform="translate(70.866151,78.725565)">
+ <rect
+ ry="17.67767"
+ y="42.942394"
+ x="353.83112"
+ height="141.73228"
+ width="70.866142"
+ id="rect3381"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:0.99921262;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ sodipodi:linespacing="100%"
+ id="text3383"
+ y="119.31244"
+ x="365.98294"
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ xml:space="preserve"><tspan
+ style="font-size:16px;font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans Bold"
+ y="119.31244"
+ x="365.98294"
+ sodipodi:role="line"
+ id="tspan3387">state</tspan></text>
+ </g>
+ <path
+ style="fill:none;fill-opacity:0.75;fill-rule:evenodd;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#TriangleOutM);stroke-opacity:1"
+ d="M 460.13031,263.40024 L 460.13031,289.97505"
+ id="path7941" />
+ <path
+ sodipodi:type="arc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path10594"
+ sodipodi:cx="119.58662"
+ sodipodi:cy="266.74686"
+ sodipodi:rx="93.01181"
+ sodipodi:ry="53.149605"
+ d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+ transform="matrix(-0.2378166,0,0,0.2269133,546.17466,132.00569)"
+ sodipodi:start="0.29223018"
+ sodipodi:end="5.9921036"
+ sodipodi:open="true" />
+ <path
+ sodipodi:type="arc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:bevel;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path12210"
+ sodipodi:cx="119.58662"
+ sodipodi:cy="266.74686"
+ sodipodi:rx="93.01181"
+ sodipodi:ry="53.149605"
+ d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+ transform="matrix(-0.2378166,0,0,0.2269133,546.17465,87.714359)"
+ sodipodi:start="0.29223018"
+ sodipodi:end="5.9921036"
+ sodipodi:open="true" />
+ <path
+ sodipodi:type="arc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-start:none;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path12212"
+ sodipodi:cx="119.58662"
+ sodipodi:cy="266.74686"
+ sodipodi:rx="93.01181"
+ sodipodi:ry="53.149605"
+ d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+ transform="matrix(-0.2378166,0,0,0.2269133,546.17465,176.29703)"
+ sodipodi:start="0.29223018"
+ sodipodi:end="5.9921036"
+ sodipodi:open="true" />
+ <path
+ sodipodi:type="arc"
+ style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:4.30137062;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#TriangleOutM);stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="path12214"
+ sodipodi:cx="119.58662"
+ sodipodi:cy="266.74686"
+ sodipodi:rx="93.01181"
+ sodipodi:ry="53.149605"
+ d="M 208.65508,282.05865 A 93.01181,53.149605 0 1 1 208.68579,251.49353"
+ transform="matrix(0,0.2378166,0.2269133,0,399.60191,71.056696)"
+ sodipodi:start="0.29223018"
+ sodipodi:end="5.9921036"
+ sodipodi:open="true" />
+ <text
+ xml:space="preserve"
+ style="font-size:12px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="173.49998"
+ y="97.094513"
+ id="text19307"
+ sodipodi:linespacing="100%"
+ transform="translate(17.216929,6.5104864)"><tspan
+ sodipodi:role="line"
+ id="tspan19309"
+ x="173.49998"
+ y="97.094513" /></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="185.52402"
+ y="110.07987"
+ id="text19311"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19313"
+ x="185.52402"
+ y="110.07987">theorem</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="389.99997"
+ y="11.594519"
+ id="text19315"
+ sodipodi:linespacing="100%"
+ transform="translate(17.216929,6.5104864)"><tspan
+ sodipodi:role="line"
+ id="tspan19317"
+ x="389.99997"
+ y="11.594519" /></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="468.98859"
+ y="280.47543"
+ id="text19319"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19321"
+ x="468.98859"
+ y="280.47543">qed</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="549.06946"
+ y="239.58423"
+ id="text19323"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19325"
+ x="549.06946"
+ y="239.58423">qed</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="549.39172"
+ y="191.26213"
+ id="text19327"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19329"
+ x="549.39172"
+ y="191.26213">fix</tspan><tspan
+ sodipodi:role="line"
+ x="549.39172"
+ y="201.26213"
+ id="tspan19331">assume</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="548.71301"
+ y="146.97079"
+ id="text19333"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19335"
+ x="548.71301"
+ y="146.97079">{ }</tspan><tspan
+ sodipodi:role="line"
+ x="548.71301"
+ y="156.97079"
+ id="tspan19337">next</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="477.84686"
+ y="98.264297"
+ id="text19339"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ x="477.84686"
+ y="98.264297"
+ id="tspan19343">note</tspan><tspan
+ sodipodi:role="line"
+ x="477.84686"
+ y="108.2643"
+ id="tspan19358">let</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="43.791733"
+ y="190.29289"
+ id="text19345"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19347"
+ x="43.791733"
+ y="190.29289">using</tspan><tspan
+ sodipodi:role="line"
+ x="43.791733"
+ y="200.29289"
+ id="tspan19349">unfolding</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="378.65891"
+ y="230.52518"
+ id="text19360"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19362"
+ x="378.65891"
+ y="230.52518">then</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:150%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="233.98795"
+ y="233.05347"
+ id="text19364"
+ sodipodi:linespacing="150%"><tspan
+ sodipodi:role="line"
+ x="233.98795"
+ y="233.05347"
+ id="tspan19368">have</tspan><tspan
+ sodipodi:role="line"
+ x="233.98795"
+ y="248.05347"
+ id="tspan19370">show</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:150%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="305.89636"
+ y="188.76213"
+ id="text19374"
+ sodipodi:linespacing="150%"><tspan
+ sodipodi:role="line"
+ x="305.89636"
+ y="188.76213"
+ id="tspan19376">have</tspan><tspan
+ sodipodi:role="line"
+ x="305.89636"
+ y="203.76213"
+ id="tspan19378">show</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-size:10px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:100%;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans;-inkscape-font-specification:Bitstream Vera Sans"
+ x="303.82324"
+ y="141.07895"
+ id="text19380"
+ sodipodi:linespacing="100%"><tspan
+ sodipodi:role="line"
+ id="tspan19382"
+ x="303.82324"
+ y="141.07895">proof</tspan></text>
+ </g>
+</svg>
--- a/doc-src/IsarRef/isar-ref.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/isar-ref.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
\documentclass[12pt,a4paper,fleqn]{report}
\usepackage{amssymb}
\usepackage[greek,english]{babel}
@@ -82,7 +79,11 @@
\pagenumbering{roman} \tableofcontents \clearfirst
+\part{Basic Concepts}
\input{Thy/document/Introduction.tex}
+\input{Thy/document/Framework.tex}
+\input{Thy/document/First_Order_Logic.tex}
+\part{General Language Elements}
\input{Thy/document/Outer_Syntax.tex}
\input{Thy/document/Document_Preparation.tex}
\input{Thy/document/Spec.tex}
@@ -90,10 +91,12 @@
\input{Thy/document/Inner_Syntax.tex}
\input{Thy/document/Misc.tex}
\input{Thy/document/Generic.tex}
+\part{Object-Logics}
\input{Thy/document/HOL_Specific.tex}
\input{Thy/document/HOLCF_Specific.tex}
\input{Thy/document/ZF_Specific.tex}
+\part{Appendix}
\appendix
\input{Thy/document/Quick_Reference.tex}
\let\int\intorig
@@ -101,7 +104,7 @@
\input{Thy/document/ML_Tactic.tex}
\begingroup
- \bibliographystyle{plain} \small\raggedright\frenchspacing
+ \bibliographystyle{abbrv} \small\raggedright\frenchspacing
\bibliography{../manual}
\endgroup
--- a/doc-src/IsarRef/style.sty Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/IsarRef/style.sty Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
%% toc
\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
@@ -18,12 +15,17 @@
%% ML
\newenvironment{mldecls}{\par\noindent\begingroup\def\isanewline{\\}\begin{tabular}{ll}}{\end{tabular}\medskip\endgroup}
-\newcommand{\indexml}[1]{\index{#1 (ML value)|bold}}
+
+%% Isar
+\newcommand{\isasymBBAR}{{\,\newdimen{\tmpheight}\settoheight\tmpheight{\isacharbar}\rule{1pt}{\tmpheight}\,}}
+\isafoldtag{noproof}\def\isafoldnoproof{~\isafold{proof}}
%% math
+\newcommand{\isasymstrut}{\isamath{\mathstrut}}
+\newcommand{\isasymvartheta}{\isamath{\,\theta}}
\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
\renewcommand{\isadigit}[1]{\isamath{#1}}
-
+\newcommand{\text}[1]{\mbox{#1}}
%% global style options
\pagestyle{headings}
--- a/doc-src/Locales/.cvsignore Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-locales.out
-locales.pdf
--- a/doc-src/Ref/ref.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/Ref/ref.tex Fri Feb 27 18:50:35 2009 +0100
@@ -7,7 +7,7 @@
%%% to delete old ones: \\indexbold{\*[^}]*}
%% run sedindex ref to prepare index file
%%% needs chapter on Provers/typedsimp.ML?
-\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] The Isabelle Reference Manual}
+\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Isabelle Reference Manual}
\author{{\em Lawrence C. Paulson}\\
Computer Laboratory \\ University of Cambridge \\
--- a/doc-src/System/Thy/Presentation.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/System/Thy/Presentation.thy Fri Feb 27 18:50:35 2009 +0100
@@ -654,7 +654,7 @@
"-"}@{text foo}'' to drop, and ``@{verbatim "/"}@{text foo}'' to
fold text tagged as @{text foo}. The builtin default is equivalent
to the tag specification ``@{verbatim
- "/theory,/proof,/ML,+visible,-invisible"}''; see also the {\LaTeX}
+ "+theory,+proof,+ML,+visible,-invisible"}''; see also the {\LaTeX}
macros @{verbatim "\\isakeeptag"}, @{verbatim "\\isadroptag"}, and
@{verbatim "\\isafoldtag"}, in @{"file"
"~~/lib/texinputs/isabelle.sty"}.
--- a/doc-src/System/Thy/document/Presentation.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/System/Thy/document/Presentation.tex Fri Feb 27 18:50:35 2009 +0100
@@ -668,7 +668,7 @@
tagged Isabelle command regions. Tags are specified as a comma
separated list of modifier/name pairs: ``\verb|+|\isa{foo}'' (or just ``\isa{foo}'') means to keep, ``\verb|-|\isa{foo}'' to drop, and ``\verb|/|\isa{foo}'' to
fold text tagged as \isa{foo}. The builtin default is equivalent
- to the tag specification ``\verb|/theory,/proof,/ML,+visible,-invisible|''; see also the {\LaTeX}
+ to the tag specification ``\verb|+theory,+proof,+ML,+visible,-invisible|''; see also the {\LaTeX}
macros \verb|\isakeeptag|, \verb|\isadroptag|, and
\verb|\isafoldtag|, in \hyperlink{file.~~/lib/texinputs/isabelle.sty}{\mbox{\isa{\isatt{{\isachartilde}{\isachartilde}{\isacharslash}lib{\isacharslash}texinputs{\isacharslash}isabelle{\isachardot}sty}}}}.
--- a/doc-src/System/system.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/System/system.tex Fri Feb 27 18:50:35 2009 +0100
@@ -36,7 +36,7 @@
\input{Thy/document/Misc.tex}
\begingroup
- \bibliographystyle{plain} \small\raggedright\frenchspacing
+ \bibliographystyle{abbrv} \small\raggedright\frenchspacing
\bibliography{../manual}
\endgroup
--- a/doc-src/ZF/FOL.tex Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/ZF/FOL.tex Fri Feb 27 18:50:35 2009 +0100
@@ -1,4 +1,4 @@
-%% $Id$
+%!TEX root = logics-ZF.tex
\chapter{First-Order Logic}
\index{first-order logic|(}
@@ -360,7 +360,8 @@
logic by designating \isa{IFOL} rather than \isa{FOL} as the parent
theory:
\begin{isabelle}
-\isacommand{theory}\ IFOL\_examples\ =\ IFOL:
+\isacommand{theory}\ IFOL\_examples\ \isacommand{imports}\ IFOL\isanewline
+\isacommand{begin}
\end{isabelle}
The proof begins by entering the goal, then applying the rule $({\imp}I)$.
\begin{isabelle}
@@ -441,7 +442,7 @@
\ 1.\ (\isasymexists y.\ \isasymforall x.\ Q(x,\ y))\
\isasymlongrightarrow \ (\isasymforall x.\ \isasymexists y.\ Q(x,\ y))
\isanewline
-\isacommand{by} (tactic {*IntPr.fast_tac 1*})\isanewline
+\isacommand{by} (tactic \ttlbrace*IntPr.fast_tac 1*\ttrbrace)\isanewline
No\ subgoals!
\end{isabelle}
@@ -529,7 +530,8 @@
$\all{x}P(x)$ is true. Either way the theorem holds. First, we must
work in a theory based on classical logic, the theory \isa{FOL}:
\begin{isabelle}
-\isacommand{theory}\ FOL\_examples\ =\ FOL:
+\isacommand{theory}\ FOL\_examples\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}
\end{isabelle}
The formal proof does not conform in any obvious way to the sketch given
@@ -631,7 +633,8 @@
$if::[o,o,o]\To o$. The axiom \tdx{if_def} asserts the
equation~$(if)$.
\begin{isabelle}
-\isacommand{theory}\ If\ =\ FOL:\isanewline
+\isacommand{theory}\ If\ \isacommand{imports}\ FOL\isanewline
+\isacommand{begin}\isanewline
\isacommand{constdefs}\isanewline
\ \ if\ ::\ "[o,o,o]=>o"\isanewline
\ \ \ "if(P,Q,R)\ ==\ P\&Q\ |\ \isachartilde P\&R"
--- a/doc-src/antiquote_setup.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/antiquote_setup.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,4 @@
(* Title: Doc/antiquote_setup.ML
- ID: $Id$
Author: Makarius
Auxiliary antiquotations for the Isabelle manuals.
@@ -13,13 +12,17 @@
(* misc utils *)
-val clean_string = translate_string
+fun translate f = Symbol.explode #> map f #> implode;
+
+val clean_string = translate
(fn "_" => "\\_"
+ | "#" => "\\#"
| "<" => "$<$"
| ">" => "$>$"
- | "#" => "\\#"
| "{" => "\\{"
+ | "|" => "$\\mid$"
| "}" => "\\}"
+ | "\\<dash>" => "-"
| c => c);
fun clean_name "\\<dots>" = "dots"
@@ -28,7 +31,7 @@
| clean_name "_" = "underscore"
| clean_name "{" = "braceleft"
| clean_name "}" = "braceright"
- | clean_name s = s |> translate_string (fn "_" => "-" | c => c);
+ | clean_name s = s |> translate (fn "_" => "-" | "\\<dash>" => "-" | c => c);
(* verbatim text *)
@@ -66,8 +69,9 @@
val txt' = if kind = "" then txt else kind ^ " " ^ txt;
val _ = writeln (ml (txt1, txt2));
val _ = ML_Context.eval_in (SOME ctxt) false Position.none (ml (txt1, txt2));
+ val kind' = if kind = "" then "ML" else "ML " ^ kind;
in
- "\\indexml" ^ kind ^ enclose "{" "}" (clean_string txt1) ^
+ "\\indexdef{}{" ^ kind' ^ "}{" ^ clean_string txt1 ^ "}" ^
(txt'
|> (if ! O.quotes then quote else I)
|> (if ! O.display then enclose "\\begin{verbatim}\n" "\n\\end{verbatim}"
@@ -193,6 +197,7 @@
entity_antiqs no_check "" "case" @
entity_antiqs (K ThyOutput.defined_command) "" "antiquotation" @
entity_antiqs (fn _ => fn name => is_some (OS.Process.getEnv name)) "isatt" "setting" @
+ entity_antiqs no_check "" "inference" @
entity_antiqs no_check "isatt" "executable" @
entity_antiqs (K check_tool) "isatt" "tool" @
entity_antiqs (K (File.exists o Path.explode)) "isatt" "file" @
--- a/doc-src/isar.sty Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/isar.sty Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,3 @@
-
-%% $Id$
-
\usepackage{ifthen}
\newcommand{\indexdef}[3]%
@@ -20,3 +17,9 @@
\newcommand{\isasymIMPORTS}{\isakeyword{imports}}
\newcommand{\isasymIN}{\isakeyword{in}}
\newcommand{\isasymSTRUCTURE}{\isakeyword{structure}}
+\newcommand{\isasymFIXES}{\isakeyword{fixes}}
+\newcommand{\isasymASSUMES}{\isakeyword{assumes}}
+\newcommand{\isasymSHOWS}{\isakeyword{shows}}
+\newcommand{\isasymOBTAINS}{\isakeyword{obtains}}
+
+\newcommand{\isasymASSM}{\isacommand{assm}}
--- a/doc-src/manual.bib Thu Feb 26 10:13:43 2009 +0100
+++ b/doc-src/manual.bib Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,4 @@
% BibTeX database for the Isabelle documentation
-%
-% Lawrence C Paulson $Id$
%publishers
@string{AP="Academic Press"}
@@ -469,6 +467,17 @@
number = {364/07}
}
+@InProceedings{Haftmann-Wenzel:2009,
+ author = {Florian Haftmann and Makarius Wenzel},
+ title = {Local theory specifications in {Isabelle/Isar}},
+ editor = {Stefano Berardi and Ferruccio Damiani and de Liguoro, Ugo},
+ booktitle = {Types for Proofs and Programs, TYPES 2008},
+ publisher = {Springer},
+ series = {LNCS},
+ volume = {????},
+ year = {2009}
+}
+
@manual{isabelle-classes,
author = {Florian Haftmann},
title = {Haskell-style type classes with {Isabelle}/{Isar}},
@@ -669,6 +678,16 @@
pages = {341-386},
crossref = {birtwistle89}}
+@Article{Miller:1991,
+ author = {Dale Miller},
+ title = {A Logic Programming Language with Lambda-Abstraction, Function Variables,
+ and Simple Unification},
+ journal = {Journal of Logic and Computation},
+ year = 1991,
+ volume = 1,
+ number = 4
+}
+
@Article{miller-mixed,
Author = {Dale Miller},
Title = {Unification Under a Mixed Prefix},
@@ -1198,6 +1217,15 @@
pages = {578-596},
crossref = {fme93}}
+@Article{Schroeder-Heister:1984,
+ author = {Peter Schroeder-Heister},
+ title = {A Natural Extension of Natural Deduction},
+ journal = {Journal of Symbolic Logic},
+ year = 1984,
+ volume = 49,
+ number = 4
+}
+
@inproceedings{slind-tfl,
author = {Konrad Slind},
title = {Function Definition in Higher Order Logic},
@@ -1331,6 +1359,24 @@
year=2002,
note = {\url{http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2002/wenzel.html}}}
+@Article{Wenzel-Wiedijk:2002,
+ author = {Freek Wiedijk and Markus Wenzel},
+ title = {A comparison of the mathematical proof languages {Mizar} and {Isar}.},
+ journal = {Journal of Automated Reasoning},
+ year = 2002,
+ volume = 29,
+ number = {3-4}
+}
+
+@InCollection{Wenzel-Paulson:2006,
+ author = {Markus Wenzel and Lawrence C. Paulson},
+ title = {{Isabelle/Isar}},
+ booktitle = {The Seventeen Provers of the World},
+ year = 2006,
+ editor = {F. Wiedijk},
+ series = {LNAI 3600}
+}
+
@InCollection{Wenzel:2006:Festschrift,
author = {Makarius Wenzel},
title = {{Isabelle/Isar} --- a generic framework for human-readable proof documents},
--- a/doc/Contents Thu Feb 26 10:13:43 2009 +0100
+++ b/doc/Contents Fri Feb 27 18:50:35 2009 +0100
@@ -6,13 +6,16 @@
functions Tutorial on Function Definitions
codegen Tutorial on Code Generation
sugar LaTeX sugar for proof documents
- ind-defs (Co)Inductive Definitions in ZF
Reference Manuals
isar-ref The Isabelle/Isar Reference Manual
implementation The Isabelle/Isar Implementation Manual
system The Isabelle System Manual
- ref The Isabelle Reference Manual
+
+Old Manuals (outdated!)
+ intro Old Introduction to Isabelle
+ ref Old Isabelle Reference Manual
logics Isabelle's Logics: overview and misc logics
logics-HOL Isabelle's Logics: HOL
logics-ZF Isabelle's Logics: FOL and ZF
+ ind-defs (Co)Inductive Definitions in ZF
--- a/lib/browser/.cvsignore Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-GraphBrowser.jar
--- a/lib/browser/GraphBrowser/.cvsignore Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-*.class
--- a/lib/browser/awtUtilities/.cvsignore Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1 +0,0 @@
-*.class
--- a/src/FOL/IsaMakefile Thu Feb 26 10:13:43 2009 +0100
+++ b/src/FOL/IsaMakefile Fri Feb 27 18:50:35 2009 +0100
@@ -46,12 +46,12 @@
FOL-ex: FOL $(LOG)/FOL-ex.gz
$(LOG)/FOL-ex.gz: $(OUT)/FOL ex/First_Order_Logic.thy ex/If.thy \
- ex/IffOracle.thy ex/Nat.thy ex/Natural_Numbers.thy \
- ex/LocaleTest.thy \
- ex/Miniscope.thy ex/Prolog.thy ex/ROOT.ML ex/Classical.thy \
- ex/document/root.tex ex/Foundation.thy ex/Intuitionistic.thy \
- ex/Intro.thy ex/Propositional_Int.thy ex/Propositional_Cla.thy \
- ex/Quantifiers_Int.thy ex/Quantifiers_Cla.thy
+ ex/Iff_Oracle.thy ex/Nat.thy ex/Nat_Class.thy ex/Natural_Numbers.thy \
+ ex/LocaleTest.thy ex/Miniscope.thy ex/Prolog.thy ex/ROOT.ML \
+ ex/Classical.thy ex/document/root.tex ex/Foundation.thy \
+ ex/Intuitionistic.thy ex/Intro.thy ex/Propositional_Int.thy \
+ ex/Propositional_Cla.thy ex/Quantifiers_Int.thy \
+ ex/Quantifiers_Cla.thy
@$(ISABELLE_TOOL) usedir $(OUT)/FOL ex
--- a/src/FOL/ex/IffOracle.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-(* Title: FOL/ex/IffOracle.thy
- ID: $Id$
- Author: Lawrence C Paulson, Cambridge University Computer Laboratory
- Copyright 1996 University of Cambridge
-*)
-
-header {* Example of Declaring an Oracle *}
-
-theory IffOracle
-imports FOL
-begin
-
-subsection {* Oracle declaration *}
-
-text {*
- This oracle makes tautologies of the form @{text "P <-> P <-> P <-> P"}.
- The length is specified by an integer, which is checked to be even
- and positive.
-*}
-
-oracle iff_oracle = {*
- let
- fun mk_iff 1 = Var (("P", 0), @{typ o})
- | mk_iff n = FOLogic.iff $ Var (("P", 0), @{typ o}) $ mk_iff (n - 1);
- in
- fn (thy, n) =>
- if n > 0 andalso n mod 2 = 0
- then Thm.cterm_of thy (FOLogic.mk_Trueprop (mk_iff n))
- else raise Fail ("iff_oracle: " ^ string_of_int n)
- end
-*}
-
-
-subsection {* Oracle as low-level rule *}
-
-ML {* iff_oracle (@{theory}, 2) *}
-ML {* iff_oracle (@{theory}, 10) *}
-ML {* Thm.proof_of (iff_oracle (@{theory}, 10)) *}
-
-text {* These oracle calls had better fail. *}
-
-ML {*
- (iff_oracle (@{theory}, 5); error "?")
- handle Fail _ => warning "Oracle failed, as expected"
-*}
-
-ML {*
- (iff_oracle (@{theory}, 1); error "?")
- handle Fail _ => warning "Oracle failed, as expected"
-*}
-
-
-subsection {* Oracle as proof method *}
-
-method_setup iff = {*
- Method.simple_args OuterParse.nat (fn n => fn ctxt =>
- Method.SIMPLE_METHOD
- (HEADGOAL (Tactic.rtac (iff_oracle (ProofContext.theory_of ctxt, n)))
- handle Fail _ => no_tac))
-*} "iff oracle"
-
-
-lemma "A <-> A"
- by (iff 2)
-
-lemma "A <-> A <-> A <-> A <-> A <-> A <-> A <-> A <-> A <-> A"
- by (iff 10)
-
-lemma "A <-> A <-> A <-> A <-> A"
- apply (iff 5)?
- oops
-
-lemma A
- apply (iff 1)?
- oops
-
-end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/FOL/ex/Iff_Oracle.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,76 @@
+(* Title: FOL/ex/Iff_Oracle.thy
+ Author: Lawrence C Paulson, Cambridge University Computer Laboratory
+ Copyright 1996 University of Cambridge
+*)
+
+header {* Example of Declaring an Oracle *}
+
+theory Iff_Oracle
+imports FOL
+begin
+
+subsection {* Oracle declaration *}
+
+text {*
+ This oracle makes tautologies of the form @{text "P <-> P <-> P <-> P"}.
+ The length is specified by an integer, which is checked to be even
+ and positive.
+*}
+
+oracle iff_oracle = {*
+ let
+ fun mk_iff 1 = Var (("P", 0), @{typ o})
+ | mk_iff n = FOLogic.iff $ Var (("P", 0), @{typ o}) $ mk_iff (n - 1);
+ in
+ fn (thy, n) =>
+ if n > 0 andalso n mod 2 = 0
+ then Thm.cterm_of thy (FOLogic.mk_Trueprop (mk_iff n))
+ else raise Fail ("iff_oracle: " ^ string_of_int n)
+ end
+*}
+
+
+subsection {* Oracle as low-level rule *}
+
+ML {* iff_oracle (@{theory}, 2) *}
+ML {* iff_oracle (@{theory}, 10) *}
+ML {* Thm.proof_of (iff_oracle (@{theory}, 10)) *}
+
+text {* These oracle calls had better fail. *}
+
+ML {*
+ (iff_oracle (@{theory}, 5); error "?")
+ handle Fail _ => warning "Oracle failed, as expected"
+*}
+
+ML {*
+ (iff_oracle (@{theory}, 1); error "?")
+ handle Fail _ => warning "Oracle failed, as expected"
+*}
+
+
+subsection {* Oracle as proof method *}
+
+method_setup iff = {*
+ Method.simple_args OuterParse.nat (fn n => fn ctxt =>
+ Method.SIMPLE_METHOD
+ (HEADGOAL (Tactic.rtac (iff_oracle (ProofContext.theory_of ctxt, n)))
+ handle Fail _ => no_tac))
+*} "iff oracle"
+
+
+lemma "A <-> A"
+ by (iff 2)
+
+lemma "A <-> A <-> A <-> A <-> A <-> A <-> A <-> A <-> A <-> A"
+ by (iff 10)
+
+lemma "A <-> A <-> A <-> A <-> A"
+ apply (iff 5)?
+ oops
+
+lemma A
+ apply (iff 1)?
+ oops
+
+end
--- a/src/FOL/ex/NatClass.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-(* Title: FOL/ex/NatClass.thy
- ID: $Id$
- Author: Markus Wenzel, TU Muenchen
-*)
-
-theory NatClass
-imports FOL
-begin
-
-text {*
- This is an abstract version of theory @{text "Nat"}. Instead of
- axiomatizing a single type @{text nat} we define the class of all
- these types (up to isomorphism).
-
- Note: The @{text rec} operator had to be made \emph{monomorphic},
- because class axioms may not contain more than one type variable.
-*}
-
-consts
- 0 :: 'a ("0")
- Suc :: "'a => 'a"
- rec :: "['a, 'a, ['a, 'a] => 'a] => 'a"
-
-axclass
- nat < "term"
- induct: "[| P(0); !!x. P(x) ==> P(Suc(x)) |] ==> P(n)"
- Suc_inject: "Suc(m) = Suc(n) ==> m = n"
- Suc_neq_0: "Suc(m) = 0 ==> R"
- rec_0: "rec(0, a, f) = a"
- rec_Suc: "rec(Suc(m), a, f) = f(m, rec(m, a, f))"
-
-definition
- add :: "['a::nat, 'a] => 'a" (infixl "+" 60) where
- "m + n = rec(m, n, %x y. Suc(y))"
-
-lemma Suc_n_not_n: "Suc(k) ~= (k::'a::nat)"
-apply (rule_tac n = k in induct)
-apply (rule notI)
-apply (erule Suc_neq_0)
-apply (rule notI)
-apply (erule notE)
-apply (erule Suc_inject)
-done
-
-lemma "(k+m)+n = k+(m+n)"
-apply (rule induct)
-back
-back
-back
-back
-back
-back
-oops
-
-lemma add_0 [simp]: "0+n = n"
-apply (unfold add_def)
-apply (rule rec_0)
-done
-
-lemma add_Suc [simp]: "Suc(m)+n = Suc(m+n)"
-apply (unfold add_def)
-apply (rule rec_Suc)
-done
-
-lemma add_assoc: "(k+m)+n = k+(m+n)"
-apply (rule_tac n = k in induct)
-apply simp
-apply simp
-done
-
-lemma add_0_right: "m+0 = m"
-apply (rule_tac n = m in induct)
-apply simp
-apply simp
-done
-
-lemma add_Suc_right: "m+Suc(n) = Suc(m+n)"
-apply (rule_tac n = m in induct)
-apply simp_all
-done
-
-lemma
- assumes prem: "!!n. f(Suc(n)) = Suc(f(n))"
- shows "f(i+j) = i+f(j)"
-apply (rule_tac n = i in induct)
-apply simp
-apply (simp add: prem)
-done
-
-end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/FOL/ex/Nat_Class.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,88 @@
+(* Title: FOL/ex/Nat_Class.thy
+ Author: Markus Wenzel, TU Muenchen
+*)
+
+theory Nat_Class
+imports FOL
+begin
+
+text {*
+ This is an abstract version of theory @{text Nat}. Instead of
+ axiomatizing a single type @{text nat} we define the class of all
+ these types (up to isomorphism).
+
+ Note: The @{text rec} operator had to be made \emph{monomorphic},
+ because class axioms may not contain more than one type variable.
+*}
+
+class nat =
+ fixes Zero :: 'a ("0")
+ and Suc :: "'a \<Rightarrow> 'a"
+ and rec :: "'a \<Rightarrow> 'a \<Rightarrow> ('a \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a"
+ assumes induct: "P(0) \<Longrightarrow> (\<And>x. P(x) \<Longrightarrow> P(Suc(x))) \<Longrightarrow> P(n)"
+ and Suc_inject: "Suc(m) = Suc(n) \<Longrightarrow> m = n"
+ and Suc_neq_Zero: "Suc(m) = 0 \<Longrightarrow> R"
+ and rec_Zero: "rec(0, a, f) = a"
+ and rec_Suc: "rec(Suc(m), a, f) = f(m, rec(m, a, f))"
+begin
+
+definition
+ add :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "+" 60) where
+ "m + n = rec(m, n, \<lambda>x y. Suc(y))"
+
+lemma Suc_n_not_n: "Suc(k) \<noteq> (k::'a)"
+ apply (rule_tac n = k in induct)
+ apply (rule notI)
+ apply (erule Suc_neq_Zero)
+ apply (rule notI)
+ apply (erule notE)
+ apply (erule Suc_inject)
+ done
+
+lemma "(k + m) + n = k + (m + n)"
+ apply (rule induct)
+ back
+ back
+ back
+ back
+ back
+ oops
+
+lemma add_Zero [simp]: "0 + n = n"
+ apply (unfold add_def)
+ apply (rule rec_Zero)
+ done
+
+lemma add_Suc [simp]: "Suc(m) + n = Suc(m + n)"
+ apply (unfold add_def)
+ apply (rule rec_Suc)
+ done
+
+lemma add_assoc: "(k + m) + n = k + (m + n)"
+ apply (rule_tac n = k in induct)
+ apply simp
+ apply simp
+ done
+
+lemma add_Zero_right: "m + 0 = m"
+ apply (rule_tac n = m in induct)
+ apply simp
+ apply simp
+ done
+
+lemma add_Suc_right: "m + Suc(n) = Suc(m + n)"
+ apply (rule_tac n = m in induct)
+ apply simp_all
+ done
+
+lemma
+ assumes prem: "\<And>n. f(Suc(n)) = Suc(f(n))"
+ shows "f(i + j) = i + f(j)"
+ apply (rule_tac n = i in induct)
+ apply simp
+ apply (simp add: prem)
+ done
+
+end
+
+end
--- a/src/FOL/ex/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/FOL/ex/ROOT.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,7 +1,4 @@
(* Title: FOL/ex/ROOT.ML
- ID: $Id$
- Author: Lawrence C Paulson, Cambridge University Computer Laboratory
- Copyright 1992 University of Cambridge
Examples for First-Order Logic.
*)
@@ -11,23 +8,19 @@
"Natural_Numbers",
"Intro",
"Nat",
+ "Nat_Class",
"Foundation",
"Prolog",
-
"Intuitionistic",
"Propositional_Int",
"Quantifiers_Int",
-
"Classical",
"Propositional_Cla",
"Quantifiers_Cla",
"Miniscope",
"If",
-
- "NatClass",
- "IffOracle"
+ "Iff_Oracle"
];
(*regression test for locales -- sets several global flags!*)
no_document use_thy "LocaleTest";
-
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/HOL/Archimedean_Field.thy Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,400 @@
+(* Title: Archimedean_Field.thy
+ Author: Brian Huffman
+*)
+
+header {* Archimedean Fields, Floor and Ceiling Functions *}
+
+theory Archimedean_Field
+imports Main
+begin
+
+subsection {* Class of Archimedean fields *}
+
+text {* Archimedean fields have no infinite elements. *}
+
+class archimedean_field = ordered_field + number_ring +
+ assumes ex_le_of_int: "\<exists>z. x \<le> of_int z"
+
+lemma ex_less_of_int:
+ fixes x :: "'a::archimedean_field" shows "\<exists>z. x < of_int z"
+proof -
+ from ex_le_of_int obtain z where "x \<le> of_int z" ..
+ then have "x < of_int (z + 1)" by simp
+ then show ?thesis ..
+qed
+
+lemma ex_of_int_less:
+ fixes x :: "'a::archimedean_field" shows "\<exists>z. of_int z < x"
+proof -
+ from ex_less_of_int obtain z where "- x < of_int z" ..
+ then have "of_int (- z) < x" by simp
+ then show ?thesis ..
+qed
+
+lemma ex_less_of_nat:
+ fixes x :: "'a::archimedean_field" shows "\<exists>n. x < of_nat n"
+proof -
+ obtain z where "x < of_int z" using ex_less_of_int ..
+ also have "\<dots> \<le> of_int (int (nat z))" by simp
+ also have "\<dots> = of_nat (nat z)" by (simp only: of_int_of_nat_eq)
+ finally show ?thesis ..
+qed
+
+lemma ex_le_of_nat:
+ fixes x :: "'a::archimedean_field" shows "\<exists>n. x \<le> of_nat n"
+proof -
+ obtain n where "x < of_nat n" using ex_less_of_nat ..
+ then have "x \<le> of_nat n" by simp
+ then show ?thesis ..
+qed
+
+text {* Archimedean fields have no infinitesimal elements. *}
+
+lemma ex_inverse_of_nat_Suc_less:
+ fixes x :: "'a::archimedean_field"
+ assumes "0 < x" shows "\<exists>n. inverse (of_nat (Suc n)) < x"
+proof -
+ from `0 < x` have "0 < inverse x"
+ by (rule positive_imp_inverse_positive)
+ obtain n where "inverse x < of_nat n"
+ using ex_less_of_nat ..
+ then obtain m where "inverse x < of_nat (Suc m)"
+ using `0 < inverse x` by (cases n) (simp_all del: of_nat_Suc)
+ then have "inverse (of_nat (Suc m)) < inverse (inverse x)"
+ using `0 < inverse x` by (rule less_imp_inverse_less)
+ then have "inverse (of_nat (Suc m)) < x"
+ using `0 < x` by (simp add: nonzero_inverse_inverse_eq)
+ then show ?thesis ..
+qed
+
+lemma ex_inverse_of_nat_less:
+ fixes x :: "'a::archimedean_field"
+ assumes "0 < x" shows "\<exists>n>0. inverse (of_nat n) < x"
+ using ex_inverse_of_nat_Suc_less [OF `0 < x`] by auto
+
+lemma ex_less_of_nat_mult:
+ fixes x :: "'a::archimedean_field"
+ assumes "0 < x" shows "\<exists>n. y < of_nat n * x"
+proof -
+ obtain n where "y / x < of_nat n" using ex_less_of_nat ..
+ with `0 < x` have "y < of_nat n * x" by (simp add: pos_divide_less_eq)
+ then show ?thesis ..
+qed
+
+
+subsection {* Existence and uniqueness of floor function *}
+
+lemma exists_least_lemma:
+ assumes "\<not> P 0" and "\<exists>n. P n"
+ shows "\<exists>n. \<not> P n \<and> P (Suc n)"
+proof -
+ from `\<exists>n. P n` have "P (Least P)" by (rule LeastI_ex)
+ with `\<not> P 0` obtain n where "Least P = Suc n"
+ by (cases "Least P") auto
+ then have "n < Least P" by simp
+ then have "\<not> P n" by (rule not_less_Least)
+ then have "\<not> P n \<and> P (Suc n)"
+ using `P (Least P)` `Least P = Suc n` by simp
+ then show ?thesis ..
+qed
+
+lemma floor_exists:
+ fixes x :: "'a::archimedean_field"
+ shows "\<exists>z. of_int z \<le> x \<and> x < of_int (z + 1)"
+proof (cases)
+ assume "0 \<le> x"
+ then have "\<not> x < of_nat 0" by simp
+ then have "\<exists>n. \<not> x < of_nat n \<and> x < of_nat (Suc n)"
+ using ex_less_of_nat by (rule exists_least_lemma)
+ then obtain n where "\<not> x < of_nat n \<and> x < of_nat (Suc n)" ..
+ then have "of_int (int n) \<le> x \<and> x < of_int (int n + 1)" by simp
+ then show ?thesis ..
+next
+ assume "\<not> 0 \<le> x"
+ then have "\<not> - x \<le> of_nat 0" by simp
+ then have "\<exists>n. \<not> - x \<le> of_nat n \<and> - x \<le> of_nat (Suc n)"
+ using ex_le_of_nat by (rule exists_least_lemma)
+ then obtain n where "\<not> - x \<le> of_nat n \<and> - x \<le> of_nat (Suc n)" ..
+ then have "of_int (- int n - 1) \<le> x \<and> x < of_int (- int n - 1 + 1)" by simp
+ then show ?thesis ..
+qed
+
+lemma floor_exists1:
+ fixes x :: "'a::archimedean_field"
+ shows "\<exists>!z. of_int z \<le> x \<and> x < of_int (z + 1)"
+proof (rule ex_ex1I)
+ show "\<exists>z. of_int z \<le> x \<and> x < of_int (z + 1)"
+ by (rule floor_exists)
+next
+ fix y z assume
+ "of_int y \<le> x \<and> x < of_int (y + 1)"
+ "of_int z \<le> x \<and> x < of_int (z + 1)"
+ then have
+ "of_int y \<le> x" "x < of_int (y + 1)"
+ "of_int z \<le> x" "x < of_int (z + 1)"
+ by simp_all
+ from le_less_trans [OF `of_int y \<le> x` `x < of_int (z + 1)`]
+ le_less_trans [OF `of_int z \<le> x` `x < of_int (y + 1)`]
+ show "y = z" by (simp del: of_int_add)
+qed
+
+
+subsection {* Floor function *}
+
+definition
+ floor :: "'a::archimedean_field \<Rightarrow> int" where
+ [code del]: "floor x = (THE z. of_int z \<le> x \<and> x < of_int (z + 1))"
+
+notation (xsymbols)
+ floor ("\<lfloor>_\<rfloor>")
+
+notation (HTML output)
+ floor ("\<lfloor>_\<rfloor>")
+
+lemma floor_correct: "of_int (floor x) \<le> x \<and> x < of_int (floor x + 1)"
+ unfolding floor_def using floor_exists1 by (rule theI')
+
+lemma floor_unique: "\<lbrakk>of_int z \<le> x; x < of_int z + 1\<rbrakk> \<Longrightarrow> floor x = z"
+ using floor_correct [of x] floor_exists1 [of x] by auto
+
+lemma of_int_floor_le: "of_int (floor x) \<le> x"
+ using floor_correct ..
+
+lemma le_floor_iff: "z \<le> floor x \<longleftrightarrow> of_int z \<le> x"
+proof
+ assume "z \<le> floor x"
+ then have "(of_int z :: 'a) \<le> of_int (floor x)" by simp
+ also have "of_int (floor x) \<le> x" by (rule of_int_floor_le)
+ finally show "of_int z \<le> x" .
+next
+ assume "of_int z \<le> x"
+ also have "x < of_int (floor x + 1)" using floor_correct ..
+ finally show "z \<le> floor x" by (simp del: of_int_add)
+qed
+
+lemma floor_less_iff: "floor x < z \<longleftrightarrow> x < of_int z"
+ by (simp add: not_le [symmetric] le_floor_iff)
+
+lemma less_floor_iff: "z < floor x \<longleftrightarrow> of_int z + 1 \<le> x"
+ using le_floor_iff [of "z + 1" x] by auto
+
+lemma floor_le_iff: "floor x \<le> z \<longleftrightarrow> x < of_int z + 1"
+ by (simp add: not_less [symmetric] less_floor_iff)
+
+lemma floor_mono: assumes "x \<le> y" shows "floor x \<le> floor y"
+proof -
+ have "of_int (floor x) \<le> x" by (rule of_int_floor_le)
+ also note `x \<le> y`
+ finally show ?thesis by (simp add: le_floor_iff)
+qed
+
+lemma floor_less_cancel: "floor x < floor y \<Longrightarrow> x < y"
+ by (auto simp add: not_le [symmetric] floor_mono)
+
+lemma floor_of_int [simp]: "floor (of_int z) = z"
+ by (rule floor_unique) simp_all
+
+lemma floor_of_nat [simp]: "floor (of_nat n) = int n"
+ using floor_of_int [of "of_nat n"] by simp
+
+text {* Floor with numerals *}
+
+lemma floor_zero [simp]: "floor 0 = 0"
+ using floor_of_int [of 0] by simp
+
+lemma floor_one [simp]: "floor 1 = 1"
+ using floor_of_int [of 1] by simp
+
+lemma floor_number_of [simp]: "floor (number_of v) = number_of v"
+ using floor_of_int [of "number_of v"] by simp
+
+lemma zero_le_floor [simp]: "0 \<le> floor x \<longleftrightarrow> 0 \<le> x"
+ by (simp add: le_floor_iff)
+
+lemma one_le_floor [simp]: "1 \<le> floor x \<longleftrightarrow> 1 \<le> x"
+ by (simp add: le_floor_iff)
+
+lemma number_of_le_floor [simp]: "number_of v \<le> floor x \<longleftrightarrow> number_of v \<le> x"
+ by (simp add: le_floor_iff)
+
+lemma zero_less_floor [simp]: "0 < floor x \<longleftrightarrow> 1 \<le> x"
+ by (simp add: less_floor_iff)
+
+lemma one_less_floor [simp]: "1 < floor x \<longleftrightarrow> 2 \<le> x"
+ by (simp add: less_floor_iff)
+
+lemma number_of_less_floor [simp]:
+ "number_of v < floor x \<longleftrightarrow> number_of v + 1 \<le> x"
+ by (simp add: less_floor_iff)
+
+lemma floor_le_zero [simp]: "floor x \<le> 0 \<longleftrightarrow> x < 1"
+ by (simp add: floor_le_iff)
+
+lemma floor_le_one [simp]: "floor x \<le> 1 \<longleftrightarrow> x < 2"
+ by (simp add: floor_le_iff)
+
+lemma floor_le_number_of [simp]:
+ "floor x \<le> number_of v \<longleftrightarrow> x < number_of v + 1"
+ by (simp add: floor_le_iff)
+
+lemma floor_less_zero [simp]: "floor x < 0 \<longleftrightarrow> x < 0"
+ by (simp add: floor_less_iff)
+
+lemma floor_less_one [simp]: "floor x < 1 \<longleftrightarrow> x < 1"
+ by (simp add: floor_less_iff)
+
+lemma floor_less_number_of [simp]:
+ "floor x < number_of v \<longleftrightarrow> x < number_of v"
+ by (simp add: floor_less_iff)
+
+text {* Addition and subtraction of integers *}
+
+lemma floor_add_of_int [simp]: "floor (x + of_int z) = floor x + z"
+ using floor_correct [of x] by (simp add: floor_unique)
+
+lemma floor_add_number_of [simp]:
+ "floor (x + number_of v) = floor x + number_of v"
+ using floor_add_of_int [of x "number_of v"] by simp
+
+lemma floor_add_one [simp]: "floor (x + 1) = floor x + 1"
+ using floor_add_of_int [of x 1] by simp
+
+lemma floor_diff_of_int [simp]: "floor (x - of_int z) = floor x - z"
+ using floor_add_of_int [of x "- z"] by (simp add: algebra_simps)
+
+lemma floor_diff_number_of [simp]:
+ "floor (x - number_of v) = floor x - number_of v"
+ using floor_diff_of_int [of x "number_of v"] by simp
+
+lemma floor_diff_one [simp]: "floor (x - 1) = floor x - 1"
+ using floor_diff_of_int [of x 1] by simp
+
+
+subsection {* Ceiling function *}
+
+definition
+ ceiling :: "'a::archimedean_field \<Rightarrow> int" where
+ [code del]: "ceiling x = - floor (- x)"
+
+notation (xsymbols)
+ ceiling ("\<lceil>_\<rceil>")
+
+notation (HTML output)
+ ceiling ("\<lceil>_\<rceil>")
+
+lemma ceiling_correct: "of_int (ceiling x) - 1 < x \<and> x \<le> of_int (ceiling x)"
+ unfolding ceiling_def using floor_correct [of "- x"] by simp
+
+lemma ceiling_unique: "\<lbrakk>of_int z - 1 < x; x \<le> of_int z\<rbrakk> \<Longrightarrow> ceiling x = z"
+ unfolding ceiling_def using floor_unique [of "- z" "- x"] by simp
+
+lemma le_of_int_ceiling: "x \<le> of_int (ceiling x)"
+ using ceiling_correct ..
+
+lemma ceiling_le_iff: "ceiling x \<le> z \<longleftrightarrow> x \<le> of_int z"
+ unfolding ceiling_def using le_floor_iff [of "- z" "- x"] by auto
+
+lemma less_ceiling_iff: "z < ceiling x \<longleftrightarrow> of_int z < x"
+ by (simp add: not_le [symmetric] ceiling_le_iff)
+
+lemma ceiling_less_iff: "ceiling x < z \<longleftrightarrow> x \<le> of_int z - 1"
+ using ceiling_le_iff [of x "z - 1"] by simp
+
+lemma le_ceiling_iff: "z \<le> ceiling x \<longleftrightarrow> of_int z - 1 < x"
+ by (simp add: not_less [symmetric] ceiling_less_iff)
+
+lemma ceiling_mono: "x \<ge> y \<Longrightarrow> ceiling x \<ge> ceiling y"
+ unfolding ceiling_def by (simp add: floor_mono)
+
+lemma ceiling_less_cancel: "ceiling x < ceiling y \<Longrightarrow> x < y"
+ by (auto simp add: not_le [symmetric] ceiling_mono)
+
+lemma ceiling_of_int [simp]: "ceiling (of_int z) = z"
+ by (rule ceiling_unique) simp_all
+
+lemma ceiling_of_nat [simp]: "ceiling (of_nat n) = int n"
+ using ceiling_of_int [of "of_nat n"] by simp
+
+text {* Ceiling with numerals *}
+
+lemma ceiling_zero [simp]: "ceiling 0 = 0"
+ using ceiling_of_int [of 0] by simp
+
+lemma ceiling_one [simp]: "ceiling 1 = 1"
+ using ceiling_of_int [of 1] by simp
+
+lemma ceiling_number_of [simp]: "ceiling (number_of v) = number_of v"
+ using ceiling_of_int [of "number_of v"] by simp
+
+lemma ceiling_le_zero [simp]: "ceiling x \<le> 0 \<longleftrightarrow> x \<le> 0"
+ by (simp add: ceiling_le_iff)
+
+lemma ceiling_le_one [simp]: "ceiling x \<le> 1 \<longleftrightarrow> x \<le> 1"
+ by (simp add: ceiling_le_iff)
+
+lemma ceiling_le_number_of [simp]:
+ "ceiling x \<le> number_of v \<longleftrightarrow> x \<le> number_of v"
+ by (simp add: ceiling_le_iff)
+
+lemma ceiling_less_zero [simp]: "ceiling x < 0 \<longleftrightarrow> x \<le> -1"
+ by (simp add: ceiling_less_iff)
+
+lemma ceiling_less_one [simp]: "ceiling x < 1 \<longleftrightarrow> x \<le> 0"
+ by (simp add: ceiling_less_iff)
+
+lemma ceiling_less_number_of [simp]:
+ "ceiling x < number_of v \<longleftrightarrow> x \<le> number_of v - 1"
+ by (simp add: ceiling_less_iff)
+
+lemma zero_le_ceiling [simp]: "0 \<le> ceiling x \<longleftrightarrow> -1 < x"
+ by (simp add: le_ceiling_iff)
+
+lemma one_le_ceiling [simp]: "1 \<le> ceiling x \<longleftrightarrow> 0 < x"
+ by (simp add: le_ceiling_iff)
+
+lemma number_of_le_ceiling [simp]:
+ "number_of v \<le> ceiling x\<longleftrightarrow> number_of v - 1 < x"
+ by (simp add: le_ceiling_iff)
+
+lemma zero_less_ceiling [simp]: "0 < ceiling x \<longleftrightarrow> 0 < x"
+ by (simp add: less_ceiling_iff)
+
+lemma one_less_ceiling [simp]: "1 < ceiling x \<longleftrightarrow> 1 < x"
+ by (simp add: less_ceiling_iff)
+
+lemma number_of_less_ceiling [simp]:
+ "number_of v < ceiling x \<longleftrightarrow> number_of v < x"
+ by (simp add: less_ceiling_iff)
+
+text {* Addition and subtraction of integers *}
+
+lemma ceiling_add_of_int [simp]: "ceiling (x + of_int z) = ceiling x + z"
+ using ceiling_correct [of x] by (simp add: ceiling_unique)
+
+lemma ceiling_add_number_of [simp]:
+ "ceiling (x + number_of v) = ceiling x + number_of v"
+ using ceiling_add_of_int [of x "number_of v"] by simp
+
+lemma ceiling_add_one [simp]: "ceiling (x + 1) = ceiling x + 1"
+ using ceiling_add_of_int [of x 1] by simp
+
+lemma ceiling_diff_of_int [simp]: "ceiling (x - of_int z) = ceiling x - z"
+ using ceiling_add_of_int [of x "- z"] by (simp add: algebra_simps)
+
+lemma ceiling_diff_number_of [simp]:
+ "ceiling (x - number_of v) = ceiling x - number_of v"
+ using ceiling_diff_of_int [of x "number_of v"] by simp
+
+lemma ceiling_diff_one [simp]: "ceiling (x - 1) = ceiling x - 1"
+ using ceiling_diff_of_int [of x 1] by simp
+
+
+subsection {* Negation *}
+
+lemma floor_minus: "floor (- x) = - ceiling x"
+ unfolding ceiling_def by simp
+
+lemma ceiling_minus: "ceiling (- x) = - floor x"
+ unfolding ceiling_def by simp
+
+end
--- a/src/HOL/AxClasses/Group.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,124 +0,0 @@
-(* Title: HOL/AxClasses/Group.thy
- ID: $Id$
- Author: Markus Wenzel, TU Muenchen
-*)
-
-theory Group imports Main begin
-
-subsection {* Monoids and Groups *}
-
-consts
- times :: "'a => 'a => 'a" (infixl "[*]" 70)
- invers :: "'a => 'a"
- one :: 'a
-
-
-axclass monoid < type
- assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
- left_unit: "one [*] x = x"
- right_unit: "x [*] one = x"
-
-axclass semigroup < type
- assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
-
-axclass group < semigroup
- left_unit: "one [*] x = x"
- left_inverse: "invers x [*] x = one"
-
-axclass agroup < group
- commute: "x [*] y = y [*] x"
-
-
-subsection {* Abstract reasoning *}
-
-theorem group_right_inverse: "x [*] invers x = (one::'a::group)"
-proof -
- have "x [*] invers x = one [*] (x [*] invers x)"
- by (simp only: group_class.left_unit)
- also have "... = one [*] x [*] invers x"
- by (simp only: semigroup_class.assoc)
- also have "... = invers (invers x) [*] invers x [*] x [*] invers x"
- by (simp only: group_class.left_inverse)
- also have "... = invers (invers x) [*] (invers x [*] x) [*] invers x"
- by (simp only: semigroup_class.assoc)
- also have "... = invers (invers x) [*] one [*] invers x"
- by (simp only: group_class.left_inverse)
- also have "... = invers (invers x) [*] (one [*] invers x)"
- by (simp only: semigroup_class.assoc)
- also have "... = invers (invers x) [*] invers x"
- by (simp only: group_class.left_unit)
- also have "... = one"
- by (simp only: group_class.left_inverse)
- finally show ?thesis .
-qed
-
-theorem group_right_unit: "x [*] one = (x::'a::group)"
-proof -
- have "x [*] one = x [*] (invers x [*] x)"
- by (simp only: group_class.left_inverse)
- also have "... = x [*] invers x [*] x"
- by (simp only: semigroup_class.assoc)
- also have "... = one [*] x"
- by (simp only: group_right_inverse)
- also have "... = x"
- by (simp only: group_class.left_unit)
- finally show ?thesis .
-qed
-
-
-subsection {* Abstract instantiation *}
-
-instance monoid < semigroup
-proof intro_classes
- fix x y z :: "'a::monoid"
- show "x [*] y [*] z = x [*] (y [*] z)"
- by (rule monoid_class.assoc)
-qed
-
-instance group < monoid
-proof intro_classes
- fix x y z :: "'a::group"
- show "x [*] y [*] z = x [*] (y [*] z)"
- by (rule semigroup_class.assoc)
- show "one [*] x = x"
- by (rule group_class.left_unit)
- show "x [*] one = x"
- by (rule group_right_unit)
-qed
-
-
-subsection {* Concrete instantiation *}
-
-defs (overloaded)
- times_bool_def: "x [*] y == x ~= (y::bool)"
- inverse_bool_def: "invers x == x::bool"
- unit_bool_def: "one == False"
-
-instance bool :: agroup
-proof (intro_classes,
- unfold times_bool_def inverse_bool_def unit_bool_def)
- fix x y z
- show "((x ~= y) ~= z) = (x ~= (y ~= z))" by blast
- show "(False ~= x) = x" by blast
- show "(x ~= x) = False" by blast
- show "(x ~= y) = (y ~= x)" by blast
-qed
-
-
-subsection {* Lifting and Functors *}
-
-defs (overloaded)
- times_prod_def: "p [*] q == (fst p [*] fst q, snd p [*] snd q)"
-
-instance * :: (semigroup, semigroup) semigroup
-proof (intro_classes, unfold times_prod_def)
- fix p q r :: "'a::semigroup * 'b::semigroup"
- show
- "(fst (fst p [*] fst q, snd p [*] snd q) [*] fst r,
- snd (fst p [*] fst q, snd p [*] snd q) [*] snd r) =
- (fst p [*] fst (fst q [*] fst r, snd q [*] snd r),
- snd p [*] snd (fst q [*] fst r, snd q [*] snd r))"
- by (simp add: semigroup_class.assoc)
-qed
-
-end
--- a/src/HOL/AxClasses/Lattice/OrdInsts.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-(* Title: OrdInsts.thy
- ID: $Id$
- Author: Markus Wenzel, TU Muenchen
-
-Some order instantiations.
-*)
-
-OrdInsts = OrdDefs +
-
-
-(* binary / general products of quasi_orders / orders *)
-
-instance
- "*" :: (quasi_order, quasi_order) quasi_order (le_prod_refl, le_prod_trans)
-
-instance
- "*" :: (partial_order, partial_order) partial_order (le_prod_antisym)
-
-
-instance
- fun :: (term, quasi_order) quasi_order (le_fun_refl, le_fun_trans)
-
-instance
- fun :: (term, partial_order) partial_order (le_fun_antisym)
-
-
-(* duals of quasi orders / partial orders / linear orders *)
-
-instance
- dual :: (quasi_order) quasi_order (le_dual_refl, le_dual_trans)
-
-instance
- dual :: (partial_order) partial_order (le_dual_antisym)
-
-
-(*FIXME: had to be moved to LatInsts.thy due to some unpleasant
- 'feature' in Pure/type.ML
-
-instance
- dual :: (linear_order) linear_order (le_dual_lin)
-*)
-
-end
--- a/src/HOL/AxClasses/Product.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-(* Title: HOL/AxClasses/Product.thy
- ID: $Id$
- Author: Markus Wenzel, TU Muenchen
-*)
-
-theory Product imports Main begin
-
-axclass product < type
-
-consts
- product :: "'a::product => 'a => 'a" (infixl "[*]" 70)
-
-
-instance bool :: product
- by intro_classes
-
-defs (overloaded)
- product_bool_def: "x [*] y == x & y"
-
-end
--- a/src/HOL/AxClasses/README.html Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,20 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-
-<!-- $Id$ -->
-
-<html>
-
-<head>
- <meta http-equiv="content-type" content="text/html;charset=iso-8859-1">
- <title>HOL/AxClasses</title>
-</head>
-
-<body>
-<h1>HOL/AxClasses</h1>
-
-These are the HOL examples of the tutorial <a
-href="http://isabelle.in.tum.de/dist/Isabelle/doc/axclass.pdf">Using Axiomatic Type
-Classes in Isabelle</a>. See also FOL/ex/NatClass for the natural
-number example.
-</body>
-</html>
--- a/src/HOL/AxClasses/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-(* $Id$ *)
-
-use_thys ["Semigroups", "Group", "Product"];
--- a/src/HOL/AxClasses/Semigroups.thy Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-(* Title: HOL/AxClasses/Semigroups.thy
- ID: $Id$
- Author: Markus Wenzel, TU Muenchen
-*)
-
-theory Semigroups imports Main begin
-
-consts
- times :: "'a => 'a => 'a" (infixl "[*]" 70)
-
-axclass semigroup < type
- assoc: "(x [*] y) [*] z = x [*] (y [*] z)"
-
-
-consts
- plus :: "'a => 'a => 'a" (infixl "[+]" 70)
-
-axclass plus_semigroup < type
- assoc: "(x [+] y) [+] z = x [+] (y [+] z)"
-
-end
--- a/src/HOL/Decision_Procs/Approximation.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Decision_Procs/Approximation.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,7 +1,9 @@
-(* Title: HOL/Reflection/Approximation.thy
- * Author: Johannes Hölzl <hoelzl@in.tum.de> 2008 / 2009
- *)
+(* Title: HOL/Reflection/Approximation.thy
+ Author: Johannes Hoelzl <hoelzl@in.tum.de> 2008 / 2009
+*)
+
header {* Prove unequations about real numbers by computation *}
+
theory Approximation
imports Complex_Main Float Reflection Dense_Linear_Order Efficient_Nat
begin
--- a/src/HOL/Decision_Procs/MIR.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Decision_Procs/MIR.thy Fri Feb 27 18:50:35 2009 +0100
@@ -83,7 +83,7 @@
have "real (floor x) \<le> x" by simp
hence "real (floor x) < real (n + 1) " using ub by arith
hence "floor x < n+1" by simp
- moreover from lb have "n \<le> floor x" using floor_mono2[where x="real n" and y="x"]
+ moreover from lb have "n \<le> floor x" using floor_mono[where x="real n" and y="x"]
by simp ultimately show "floor x = n" by simp
qed
@@ -1775,11 +1775,11 @@
"(real (a::int) \<le> b) = (a \<le> floor b \<or> (a = floor b \<and> real (floor b) < b))"
proof( auto)
assume alb: "real a \<le> b" and agb: "\<not> a \<le> floor b"
- from alb have "floor (real a) \<le> floor b " by (simp only: floor_mono2)
+ from alb have "floor (real a) \<le> floor b " by (simp only: floor_mono)
hence "a \<le> floor b" by simp with agb show "False" by simp
next
assume alb: "a \<le> floor b"
- hence "real a \<le> real (floor b)" by (simp only: floor_mono2)
+ hence "real a \<le> real (floor b)" by (simp only: floor_mono)
also have "\<dots>\<le> b" by simp finally show "real a \<le> b" .
qed
@@ -3697,7 +3697,7 @@
assumes xb: "real m \<le> x \<and> x < real ((n::int) + 1)"
shows "\<exists> j\<in> {m.. n}. real j \<le> x \<and> x < real (j+1)" (is "\<exists> j\<in> ?N. ?P j")
by (rule bexI[where P="?P" and x="floor x" and A="?N"])
-(auto simp add: floor_less_eq[where x="x" and a="n+1", simplified] xb[simplified] floor_mono2[where x="real m" and y="x", OF conjunct1[OF xb], simplified floor_real_of_int[where n="m"]])
+(auto simp add: floor_less_eq[where x="x" and a="n+1", simplified] xb[simplified] floor_mono[where x="real m" and y="x", OF conjunct1[OF xb], simplified floor_real_of_int[where n="m"]])
lemma rsplit0_complete:
assumes xp:"0 \<le> x" and x1:"x < 1"
@@ -5926,7 +5926,7 @@
apply mir
done
-lemma "ALL x y. \<lfloor>x\<rfloor> = \<lfloor>y\<rfloor> \<longrightarrow> 0 \<le> abs (y - x) \<and> abs (y - x) \<le> 1"
+lemma "ALL (x::real) (y::real). \<lfloor>x\<rfloor> = \<lfloor>y\<rfloor> \<longrightarrow> 0 \<le> abs (y - x) \<and> abs (y - x) \<le> 1"
apply mir
done
--- a/src/HOL/Fact.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Fact.thy Fri Feb 27 18:50:35 2009 +0100
@@ -58,7 +58,7 @@
"n < Suc m ==> fact (Suc m - n) = (Suc m - n) * fact (m - n)"
apply (induct n arbitrary: m)
apply auto
-apply (drule_tac x = "m - 1" in meta_spec, auto)
+apply (drule_tac x = "m - Suc 0" in meta_spec, auto)
done
lemma fact_num0: "fact 0 = 1"
--- a/src/HOL/GCD.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/GCD.thy Fri Feb 27 18:50:35 2009 +0100
@@ -60,9 +60,12 @@
lemma gcd_non_0: "n > 0 \<Longrightarrow> gcd m n = gcd n (m mod n)"
by simp
-lemma gcd_1 [simp, algebra]: "gcd m (Suc 0) = 1"
+lemma gcd_1 [simp, algebra]: "gcd m (Suc 0) = Suc 0"
by simp
+lemma nat_gcd_1_right [simp, algebra]: "gcd m 1 = 1"
+ unfolding One_nat_def by (rule gcd_1)
+
declare gcd.simps [simp del]
text {*
@@ -116,9 +119,12 @@
apply (blast intro: dvd_trans)
done
-lemma gcd_1_left [simp, algebra]: "gcd (Suc 0) m = 1"
+lemma gcd_1_left [simp, algebra]: "gcd (Suc 0) m = Suc 0"
by (simp add: gcd_commute)
+lemma nat_gcd_1_left [simp, algebra]: "gcd 1 m = 1"
+ unfolding One_nat_def by (rule gcd_1_left)
+
text {*
\medskip Multiplication laws
*}
--- a/src/HOL/Integration.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Integration.thy Fri Feb 27 18:50:35 2009 +0100
@@ -134,7 +134,7 @@
apply (frule partition [THEN iffD1], safe)
apply (drule_tac x = "psize D" and P="%n. psize D \<le> n --> ?P n" in spec, safe)
apply (case_tac "psize D = 0")
-apply (drule_tac [2] n = "psize D - 1" in partition_lt, auto)
+apply (drule_tac [2] n = "psize D - Suc 0" in partition_lt, auto)
done
lemma partition_gt: "[|partition(a,b) D; n < (psize D)|] ==> D(n) < D(psize D)"
@@ -145,7 +145,7 @@
apply (rotate_tac 2)
apply (drule_tac x = "psize D" in spec)
apply (rule ccontr)
-apply (drule_tac n = "psize D - 1" in partition_lt)
+apply (drule_tac n = "psize D - Suc 0" in partition_lt)
apply auto
done
--- a/src/HOL/IsaMakefile Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/IsaMakefile Fri Feb 27 18:50:35 2009 +0100
@@ -13,7 +13,6 @@
HOL-Library \
HOL-ex \
HOL-Auth \
- HOL-AxClasses \
HOL-Bali \
HOL-Decision_Procs \
HOL-Extraction \
@@ -267,6 +266,7 @@
@$(ISABELLE_TOOL) usedir -b -f main.ML -g true $(OUT)/Pure HOL-Main
$(OUT)/HOL: ROOT.ML $(MAIN_DEPENDENCIES) \
+ Archimedean_Field.thy \
Complex_Main.thy \
Complex.thy \
Deriv.thy \
@@ -795,15 +795,6 @@
@$(ISABELLE_TOOL) usedir $(OUT)/HOL IOA
-## HOL-AxClasses
-
-HOL-AxClasses: HOL $(LOG)/HOL-AxClasses.gz
-
-$(LOG)/HOL-AxClasses.gz: $(OUT)/HOL AxClasses/Group.thy \
- AxClasses/Product.thy AxClasses/ROOT.ML AxClasses/Semigroups.thy
- @$(ISABELLE_TOOL) usedir $(OUT)/HOL AxClasses
-
-
## HOL-Lattice
HOL-Lattice: HOL $(LOG)/HOL-Lattice.gz
@@ -1067,22 +1058,22 @@
## clean
clean:
- @rm -f $(OUT)/HOL-Plain $(OUT)/HOL-Main $(OUT)/HOL $(OUT)/HOL-Nominal $(OUT)/TLA \
- $(LOG)/HOL.gz $(LOG)/TLA.gz \
- $(LOG)/HOL-Isar_examples.gz $(LOG)/HOL-Induct.gz \
- $(LOG)/HOL-ex.gz $(LOG)/HOL-Subst.gz $(LOG)/HOL-IMP.gz \
- $(LOG)/HOL-IMPP.gz $(LOG)/HOL-Hoare.gz \
- $(LOG)/HOL-HoareParallel.gz \
- $(LOG)/HOL-Lex.gz $(LOG)/HOL-Algebra.gz \
- $(LOG)/HOL-Auth.gz $(LOG)/HOL-UNITY.gz \
- $(LOG)/HOL-Modelcheck.gz $(LOG)/HOL-Lambda.gz \
- $(LOG)/HOL-Bali.gz \
- $(LOG)/HOL-MicroJava.gz $(LOG)/HOL-NanoJava.gz \
- $(LOG)/HOL-Nominal-Examples.gz \
- $(LOG)/HOL-IOA.gz $(LOG)/HOL-AxClasses \
- $(LOG)/HOL-Lattice $(LOG)/HOL-Matrix \
- $(LOG)/HOL-HahnBanach.gz $(LOG)/HOL-SET-Protocol.gz \
- $(LOG)/TLA-Inc.gz $(LOG)/TLA-Buffer.gz $(LOG)/TLA-Memory.gz \
- $(LOG)/HOL-Library.gz $(LOG)/HOL-Unix.gz \
- $(OUT)/HOL-Word $(LOG)/HOL-Word.gz $(LOG)/HOL-Word-Examples.gz \
- $(OUT)/HOL-NSA $(LOG)/HOL-NSA.gz $(LOG)/HOL-NSA-Examples.gz
+ @rm -f $(OUT)/HOL-Plain $(OUT)/HOL-Main $(OUT)/HOL \
+ $(OUT)/HOL-Nominal $(OUT)/TLA $(LOG)/HOL.gz \
+ $(LOG)/TLA.gz $(LOG)/HOL-Isar_examples.gz \
+ $(LOG)/HOL-Induct.gz $(LOG)/HOL-ex.gz \
+ $(LOG)/HOL-Subst.gz $(LOG)/HOL-IMP.gz \
+ $(LOG)/HOL-IMPP.gz $(LOG)/HOL-Hoare.gz \
+ $(LOG)/HOL-HoareParallel.gz $(LOG)/HOL-Lex.gz \
+ $(LOG)/HOL-Algebra.gz $(LOG)/HOL-Auth.gz \
+ $(LOG)/HOL-UNITY.gz $(LOG)/HOL-Modelcheck.gz \
+ $(LOG)/HOL-Lambda.gz $(LOG)/HOL-Bali.gz \
+ $(LOG)/HOL-MicroJava.gz $(LOG)/HOL-NanoJava.gz \
+ $(LOG)/HOL-Nominal-Examples.gz $(LOG)/HOL-IOA.gz \
+ $(LOG)/HOL-Lattice $(LOG)/HOL-Matrix \
+ $(LOG)/HOL-HahnBanach.gz $(LOG)/HOL-SET-Protocol.gz \
+ $(LOG)/TLA-Inc.gz $(LOG)/TLA-Buffer.gz \
+ $(LOG)/TLA-Memory.gz $(LOG)/HOL-Library.gz \
+ $(LOG)/HOL-Unix.gz $(OUT)/HOL-Word $(LOG)/HOL-Word.gz \
+ $(LOG)/HOL-Word-Examples.gz $(OUT)/HOL-NSA \
+ $(LOG)/HOL-NSA.gz $(LOG)/HOL-NSA-Examples.gz
--- a/src/HOL/Library/Bit.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Library/Bit.thy Fri Feb 27 18:50:35 2009 +0100
@@ -79,14 +79,8 @@
end
-lemma bit_1_plus_1 [simp]: "1 + 1 = (0 :: bit)"
- unfolding plus_bit_def by simp
-
-lemma bit_add_self [simp]: "x + x = (0 :: bit)"
- by (cases x) simp_all
-
-lemma bit_add_self_left [simp]: "x + (x + y) = (y :: bit)"
- by simp
+lemma bit_add_self: "x + x = (0 :: bit)"
+ unfolding plus_bit_def by (simp split: bit.split)
lemma bit_mult_eq_1_iff [simp]: "x * y = (1 :: bit) \<longleftrightarrow> x = 1 \<and> y = 1"
unfolding times_bit_def by (simp split: bit.split)
--- a/src/HOL/Library/Float.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Library/Float.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,7 +1,7 @@
-(* Title: HOL/Library/Float.thy
- * Author: Steven Obua 2008
- * Johannes HÃ\<paragraph>lzl, TU Muenchen <hoelzl@in.tum.de> 2008 / 2009
- *)
+(* Title: HOL/Library/Float.thy
+ Author: Steven Obua 2008
+ Author: Johannes Hoelzl, TU Muenchen <hoelzl@in.tum.de> 2008 / 2009
+*)
header {* Floating-Point Numbers *}
--- a/src/HOL/Library/reflection.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Library/reflection.ML Fri Feb 27 18:50:35 2009 +0100
@@ -88,17 +88,12 @@
fun dest_listT (Type ("List.list", [T])) = T;
-fun partition P [] = ([],[])
- | partition P (x::xs) =
- let val (yes,no) = partition P xs
- in if P x then (x::yes,no) else (yes, x::no) end
-
fun rearrange congs =
let
fun P (_, th) =
let val @{term "Trueprop"}$(Const ("op =",_) $l$_) = concl_of th
in can dest_Var l end
- val (yes,no) = partition P congs
+ val (yes,no) = List.partition P congs
in no @ yes end
fun genreif ctxt raw_eqs t =
--- a/src/HOL/List.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/List.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1438,10 +1438,10 @@
apply (auto split:nat.split)
done
-lemma last_conv_nth: "xs\<noteq>[] \<Longrightarrow> last xs = xs!(length xs - Suc 0)"
+lemma last_conv_nth: "xs\<noteq>[] \<Longrightarrow> last xs = xs!(length xs - 1)"
by(induct xs)(auto simp:neq_Nil_conv)
-lemma butlast_conv_take: "butlast xs = take (length xs - Suc 0) xs"
+lemma butlast_conv_take: "butlast xs = take (length xs - 1) xs"
by (induct xs, simp, case_tac xs, simp_all)
@@ -1461,6 +1461,12 @@
declare take_Cons [simp del] and drop_Cons [simp del]
+lemma take_1_Cons [simp]: "take 1 (x # xs) = [x]"
+ unfolding One_nat_def by simp
+
+lemma drop_1_Cons [simp]: "drop 1 (x # xs) = xs"
+ unfolding One_nat_def by simp
+
lemma take_Suc: "xs ~= [] ==> take (Suc n) xs = hd xs # take n (tl xs)"
by(clarsimp simp add:neq_Nil_conv)
@@ -1588,17 +1594,17 @@
done
lemma butlast_take:
- "n <= length xs ==> butlast (take n xs) = take (n - Suc 0) xs"
+ "n <= length xs ==> butlast (take n xs) = take (n - 1) xs"
by (simp add: butlast_conv_take min_max.inf_absorb1 min_max.inf_absorb2)
lemma butlast_drop: "butlast (drop n xs) = drop n (butlast xs)"
-by (simp add: butlast_conv_take drop_take)
+by (simp add: butlast_conv_take drop_take add_ac)
lemma take_butlast: "n < length xs ==> take n (butlast xs) = take n xs"
by (simp add: butlast_conv_take min_max.inf_absorb1)
lemma drop_butlast: "drop n (butlast xs) = butlast (drop n xs)"
-by (simp add: butlast_conv_take drop_take)
+by (simp add: butlast_conv_take drop_take add_ac)
lemma hd_drop_conv_nth: "\<lbrakk> xs \<noteq> []; n < length xs \<rbrakk> \<Longrightarrow> hd(drop n xs) = xs!n"
by(simp add: hd_conv_nth)
@@ -2458,7 +2464,7 @@
done
lemma length_remove1:
- "length(remove1 x xs) = (if x : set xs then length xs - Suc 0 else length xs)"
+ "length(remove1 x xs) = (if x : set xs then length xs - 1 else length xs)"
apply (induct xs)
apply (auto dest!:length_pos_if_in_set)
done
--- a/src/HOL/MacLaurin.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/MacLaurin.thy Fri Feb 27 18:50:35 2009 +0100
@@ -81,7 +81,7 @@
prefer 2 apply simp
apply (frule less_iff_Suc_add [THEN iffD1], clarify)
apply (simp del: setsum_op_ivl_Suc)
- apply (insert sumr_offset4 [of 1])
+ apply (insert sumr_offset4 [of "Suc 0"])
apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc)
apply (rule lemma_DERIV_subst)
apply (rule DERIV_add)
@@ -124,7 +124,7 @@
have g2: "g 0 = 0 & g h = 0"
apply (simp add: m f_h g_def del: setsum_op_ivl_Suc)
- apply (cut_tac n = m and k = 1 in sumr_offset2)
+ apply (cut_tac n = m and k = "Suc 0" in sumr_offset2)
apply (simp add: eq_diff_eq' diff_0 del: setsum_op_ivl_Suc)
done
@@ -144,7 +144,7 @@
apply (simp add: m difg_def)
apply (frule less_iff_Suc_add [THEN iffD1], clarify)
apply (simp del: setsum_op_ivl_Suc)
- apply (insert sumr_offset4 [of 1])
+ apply (insert sumr_offset4 [of "Suc 0"])
apply (simp del: setsum_op_ivl_Suc fact_Suc realpow_Suc)
done
@@ -552,6 +552,10 @@
"[|x = y; abs u \<le> (v::real) |] ==> \<bar>(x + u) - y\<bar> \<le> v"
by auto
+text {* TODO: move to Parity.thy *}
+lemma nat_odd_1 [simp]: "odd (1::nat)"
+ unfolding even_nat_def by simp
+
lemma Maclaurin_sin_bound:
"abs(sin x - (\<Sum>m=0..<n. (if even m then 0 else (-1 ^ ((m - Suc 0) div 2)) / real (fact m)) *
x ^ m)) \<le> inverse(real (fact n)) * \<bar>x\<bar> ^ n"
--- a/src/HOL/Nat.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nat.thy Fri Feb 27 18:50:35 2009 +0100
@@ -280,6 +280,9 @@
lemma diff_add_0: "n - (n + m) = (0::nat)"
by (induct n) simp_all
+lemma diff_Suc_1 [simp]: "Suc n - 1 = n"
+ unfolding One_nat_def by simp
+
text {* Difference distributes over multiplication *}
lemma diff_mult_distrib: "((m::nat) - n) * k = (m * k) - (n * k)"
@@ -698,6 +701,9 @@
lemma Suc_pred [simp]: "n>0 ==> Suc (n - Suc 0) = n"
by (simp add: diff_Suc split: nat.split)
+lemma Suc_diff_1 [simp]: "0 < n ==> Suc (n - 1) = n"
+unfolding One_nat_def by (rule Suc_pred)
+
lemma nat_add_left_cancel_le [simp]: "(k + m \<le> k + n) = (m\<le>(n::nat))"
by (induct k) simp_all
@@ -1132,7 +1138,7 @@
by (cases m) (auto intro: le_add1)
text {* Lemma for @{text gcd} *}
-lemma mult_eq_self_implies_10: "(m::nat) = m * n ==> n = Suc 0 | m = 0"
+lemma mult_eq_self_implies_10: "(m::nat) = m * n ==> n = 1 | m = 0"
apply (drule sym)
apply (rule disjCI)
apply (rule nat_less_cases, erule_tac [2] _)
--- a/src/HOL/NatBin.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/NatBin.thy Fri Feb 27 18:50:35 2009 +0100
@@ -159,6 +159,21 @@
unfolding nat_number_of_def number_of_is_id numeral_simps
by (simp add: nat_add_distrib)
+lemma nat_number_of_add_1 [simp]:
+ "number_of v + (1::nat) =
+ (if v < Int.Pls then 1 else number_of (Int.succ v))"
+ unfolding nat_number_of_def number_of_is_id numeral_simps
+ by (simp add: nat_add_distrib)
+
+lemma nat_1_add_number_of [simp]:
+ "(1::nat) + number_of v =
+ (if v < Int.Pls then 1 else number_of (Int.succ v))"
+ unfolding nat_number_of_def number_of_is_id numeral_simps
+ by (simp add: nat_add_distrib)
+
+lemma nat_1_add_1 [simp]: "1 + 1 = (2::nat)"
+ by (rule int_int_eq [THEN iffD1]) simp
+
subsubsection{*Subtraction *}
@@ -178,6 +193,12 @@
unfolding nat_number_of_def number_of_is_id numeral_simps neg_def
by auto
+lemma nat_number_of_diff_1 [simp]:
+ "number_of v - (1::nat) =
+ (if v \<le> Int.Pls then 0 else number_of (Int.pred v))"
+ unfolding nat_number_of_def number_of_is_id numeral_simps
+ by auto
+
subsubsection{*Multiplication *}
--- a/src/HOL/Nominal/Examples/Fsub.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/Examples/Fsub.thy Fri Feb 27 18:50:35 2009 +0100
@@ -7,13 +7,18 @@
text{* Authors: Christian Urban,
Benjamin Pierce,
Dimitrios Vytiniotis
- Stephanie Weirich and
+ Stephanie Weirich
Steve Zdancewic
+ Julien Narboux
+ Stefan Berghofer
- with great help from Stefan Berghofer and Markus Wenzel. *}
+ with great help from Markus Wenzel. *}
section {* Types for Names, Nominal Datatype Declaration for Types and Terms *}
+no_syntax
+ "_Map" :: "maplets => 'a ~=> 'b" ("(1[_])")
+
text {* The main point of this solution is to use names everywhere (be they bound,
binding or free). In System \FSUB{} there are two kinds of names corresponding to
type-variables and to term-variables. These two kinds of names are represented in
@@ -31,30 +36,35 @@
nominal_datatype ty =
Tvar "tyvrs"
| Top
- | Arrow "ty" "ty" ("_ \<rightarrow> _" [100,100] 100)
+ | Arrow "ty" "ty" (infixr "\<rightarrow>" 200)
| Forall "\<guillemotleft>tyvrs\<guillemotright>ty" "ty"
nominal_datatype trm =
Var "vrs"
- | Lam "\<guillemotleft>vrs\<guillemotright>trm" "ty"
- | Tabs "\<guillemotleft>tyvrs\<guillemotright>trm" "ty"
- | App "trm" "trm"
- | Tapp "trm" "ty"
+ | Abs "\<guillemotleft>vrs\<guillemotright>trm" "ty"
+ | TAbs "\<guillemotleft>tyvrs\<guillemotright>trm" "ty"
+ | App "trm" "trm" (infixl "\<cdot>" 200)
+ | TApp "trm" "ty" (infixl "\<cdot>\<^sub>\<tau>" 200)
text {* To be polite to the eye, some more familiar notation is introduced.
Because of the change in the order of arguments, one needs to use
translation rules, instead of syntax annotations at the term-constructors
as given above for @{term "Arrow"}. *}
-syntax
- Forall_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty" ("\<forall>[_<:_]._" [100,100,100] 100)
- Lam_syn :: "vrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("Lam [_:_]._" [100,100,100] 100)
- Tabs_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("Tabs [_<:_]._" [100,100,100] 100)
+abbreviation
+ Forall_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> ty" ("(3\<forall>_<:_./ _)" [0, 0, 10] 10)
+where
+ "\<forall>X<:T\<^isub>1. T\<^isub>2 \<equiv> ty.Forall X T\<^isub>2 T\<^isub>1"
-translations
- "\<forall>[X<:T\<^isub>1].T\<^isub>2" \<rightleftharpoons> "ty.Forall X T\<^isub>2 T\<^isub>1"
- "Lam [x:T].t" \<rightleftharpoons> "trm.Lam x t T"
- "Tabs [X<:T].t" \<rightleftharpoons> "trm.Tabs X t T"
+abbreviation
+ Abs_syn :: "vrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("(3\<lambda>_:_./ _)" [0, 0, 10] 10)
+where
+ "\<lambda>x:T. t \<equiv> trm.Abs x t T"
+
+abbreviation
+ TAbs_syn :: "tyvrs \<Rightarrow> ty \<Rightarrow> trm \<Rightarrow> trm" ("(3\<lambda>_<:_./ _)" [0, 0, 10] 10)
+where
+ "\<lambda>X<:T. t \<equiv> trm.TAbs X t T"
text {* Again there are numerous facts that are proved automatically for @{typ "ty"}
and @{typ "trm"}: for example that the set of free variables, i.e.~the @{text "support"},
@@ -64,13 +74,17 @@
and @{typ "trm"}s are equal: *}
lemma alpha_illustration:
- shows "\<forall>[X<:T].(Tvar X) = \<forall>[Y<:T].(Tvar Y)"
- and "Lam [x:T].(Var x) = Lam [y:T].(Var y)"
+ shows "(\<forall>X<:T. Tvar X) = (\<forall>Y<:T. Tvar Y)"
+ and "(\<lambda>x:T. Var x) = (\<lambda>y:T. Var y)"
by (simp_all add: ty.inject trm.inject alpha calc_atm fresh_atm)
section {* SubTyping Contexts *}
-types ty_context = "(tyvrs\<times>ty) list"
+nominal_datatype binding =
+ VarB vrs ty
+ | TVarB tyvrs ty
+
+types env = "binding list"
text {* Typing contexts are represented as lists that ``grow" on the left; we
thereby deviating from the convention in the POPLmark-paper. The lists contain
@@ -79,66 +93,139 @@
text {* In order to state validity-conditions for typing-contexts, the notion of
a @{text "domain"} of a typing-context is handy. *}
+nominal_primrec
+ "tyvrs_of" :: "binding \<Rightarrow> tyvrs set"
+where
+ "tyvrs_of (VarB x y) = {}"
+| "tyvrs_of (TVarB x y) = {x}"
+by auto
+
+nominal_primrec
+ "vrs_of" :: "binding \<Rightarrow> vrs set"
+where
+ "vrs_of (VarB x y) = {x}"
+| "vrs_of (TVarB x y) = {}"
+by auto
+
consts
- "domain" :: "ty_context \<Rightarrow> tyvrs set"
+ "ty_domain" :: "env \<Rightarrow> tyvrs set"
primrec
- "domain [] = {}"
- "domain (X#\<Gamma>) = {fst X}\<union>(domain \<Gamma>)"
+ "ty_domain [] = {}"
+ "ty_domain (X#\<Gamma>) = (tyvrs_of X)\<union>(ty_domain \<Gamma>)"
+
+consts
+ "trm_domain" :: "env \<Rightarrow> vrs set"
+primrec
+ "trm_domain [] = {}"
+ "trm_domain (X#\<Gamma>) = (vrs_of X)\<union>(trm_domain \<Gamma>)"
-lemma domain_eqvt[eqvt]:
+lemma vrs_of_eqvt[eqvt]:
+ fixes pi ::"tyvrs prm"
+ and pi'::"vrs prm"
+ shows "pi \<bullet>(tyvrs_of x) = tyvrs_of (pi\<bullet>x)"
+ and "pi'\<bullet>(tyvrs_of x) = tyvrs_of (pi'\<bullet>x)"
+ and "pi \<bullet>(vrs_of x) = vrs_of (pi\<bullet>x)"
+ and "pi'\<bullet>(vrs_of x) = vrs_of (pi'\<bullet>x)"
+by (nominal_induct x rule: binding.strong_induct) (simp_all add: tyvrs_of.simps eqvts)
+
+lemma domains_eqvt[eqvt]:
fixes pi::"tyvrs prm"
and pi'::"vrs prm"
- shows "pi\<bullet>(domain \<Gamma>) = domain (pi\<bullet>\<Gamma>)"
- and "pi'\<bullet>(domain \<Gamma>) = domain (pi'\<bullet>\<Gamma>)"
- by (induct \<Gamma>) (simp_all add: eqvts)
+ shows "pi \<bullet>(ty_domain \<Gamma>) = ty_domain (pi\<bullet>\<Gamma>)"
+ and "pi'\<bullet>(ty_domain \<Gamma>) = ty_domain (pi'\<bullet>\<Gamma>)"
+ and "pi \<bullet>(trm_domain \<Gamma>) = trm_domain (pi\<bullet>\<Gamma>)"
+ and "pi'\<bullet>(trm_domain \<Gamma>) = trm_domain (pi'\<bullet>\<Gamma>)"
+by (induct \<Gamma>) (simp_all add: eqvts)
+
+lemma finite_vrs:
+ shows "finite (tyvrs_of x)"
+ and "finite (vrs_of x)"
+by (nominal_induct rule:binding.strong_induct, auto)
+
+lemma finite_domains:
+ shows "finite (ty_domain \<Gamma>)"
+ and "finite (trm_domain \<Gamma>)"
+by (induct \<Gamma>, auto simp add: finite_vrs)
+
+lemma ty_domain_supp:
+ shows "(supp (ty_domain \<Gamma>)) = (ty_domain \<Gamma>)"
+ and "(supp (trm_domain \<Gamma>)) = (trm_domain \<Gamma>)"
+by (simp only: at_fin_set_supp at_tyvrs_inst at_vrs_inst finite_domains)+
-lemma finite_domain:
- shows "finite (domain \<Gamma>)"
+lemma ty_domain_inclusion:
+ assumes a: "(TVarB X T)\<in>set \<Gamma>"
+ shows "X\<in>(ty_domain \<Gamma>)"
+using a by (induct \<Gamma>, auto)
+
+lemma ty_binding_existence:
+ assumes "X \<in> (tyvrs_of a)"
+ shows "\<exists>T.(TVarB X T=a)"
+ using assms
+by (nominal_induct a rule: binding.strong_induct, auto)
+
+lemma ty_domain_existence:
+ assumes a: "X\<in>(ty_domain \<Gamma>)"
+ shows "\<exists>T.(TVarB X T)\<in>set \<Gamma>"
+ using a
+ apply (induct \<Gamma>, auto)
+ apply (subgoal_tac "\<exists>T.(TVarB X T=a)")
+ apply (auto)
+ apply (auto simp add: ty_binding_existence)
+done
+
+lemma domains_append:
+ shows "ty_domain (\<Gamma>@\<Delta>) = ((ty_domain \<Gamma>) \<union> (ty_domain \<Delta>))"
+ and "trm_domain (\<Gamma>@\<Delta>) = ((trm_domain \<Gamma>) \<union> (trm_domain \<Delta>))"
by (induct \<Gamma>, auto)
-lemma domain_supp:
- shows "(supp (domain \<Gamma>)) = (domain \<Gamma>)"
- by (simp only: at_fin_set_supp at_tyvrs_inst finite_domain)
-
-lemma domain_inclusion:
- assumes a: "(X,T)\<in>set \<Gamma>"
- shows "X\<in>(domain \<Gamma>)"
- using a by (induct \<Gamma>, auto)
+lemma ty_vrs_prm_simp:
+ fixes pi::"vrs prm"
+ and S::"ty"
+ shows "pi\<bullet>S = S"
+by (induct S rule: ty.induct) (auto simp add: calc_atm)
-lemma domain_existence:
- assumes a: "X\<in>(domain \<Gamma>)"
- shows "\<exists>T.(X,T)\<in>set \<Gamma>"
- using a by (induct \<Gamma>, auto)
+lemma fresh_ty_domain_cons:
+ fixes X::"tyvrs"
+ shows "X\<sharp>(ty_domain (Y#\<Gamma>)) = (X\<sharp>(tyvrs_of Y) \<and> X\<sharp>(ty_domain \<Gamma>))"
+ apply (nominal_induct rule:binding.strong_induct)
+ apply (auto)
+ apply (simp add: fresh_def supp_def eqvts)
+ apply (simp add: fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains)
+ apply (simp add: fresh_def supp_def eqvts)
+ apply (simp add: fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains)+
+ done
-lemma domain_append:
- shows "domain (\<Gamma>@\<Delta>) = ((domain \<Gamma>) \<union> (domain \<Delta>))"
- by (induct \<Gamma>, auto)
-
-lemma fresh_domain_cons:
- fixes X::"tyvrs"
- shows "X\<sharp>(domain (Y#\<Gamma>)) = (X\<sharp>(fst Y) \<and> X\<sharp>(domain \<Gamma>))"
- by (simp add: fresh_fin_insert pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst finite_domain)
+lemma tyvrs_fresh:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> a"
+ shows "X \<sharp> tyvrs_of a"
+ and "X \<sharp> vrs_of a"
+ using assms
+ apply (nominal_induct a rule:binding.strong_induct)
+ apply (auto)
+ apply (fresh_guess)+
+done
lemma fresh_domain:
fixes X::"tyvrs"
assumes a: "X\<sharp>\<Gamma>"
- shows "X\<sharp>(domain \<Gamma>)"
+ shows "X\<sharp>(ty_domain \<Gamma>)"
using a
apply(induct \<Gamma>)
apply(simp add: fresh_set_empty)
-apply(simp only: fresh_domain_cons)
-apply(auto simp add: fresh_prod fresh_list_cons)
+apply(simp only: fresh_ty_domain_cons)
+apply(auto simp add: fresh_prod fresh_list_cons tyvrs_fresh)
done
-text {* Not all lists of type @{typ "ty_context"} are well-formed. One condition
- requires that in @{term "(X,S)#\<Gamma>"} all free variables of @{term "S"} must be
- in the @{term "domain"} of @{term "\<Gamma>"}, that is @{term "S"} must be @{text "closed"}
+text {* Not all lists of type @{typ "env"} are well-formed. One condition
+ requires that in @{term "TVarB X S#\<Gamma>"} all free variables of @{term "S"} must be
+ in the @{term "ty_domain"} of @{term "\<Gamma>"}, that is @{term "S"} must be @{text "closed"}
in @{term "\<Gamma>"}. The set of free variables of @{term "S"} is the
@{text "support"} of @{term "S"}. *}
constdefs
- "closed_in" :: "ty \<Rightarrow> ty_context \<Rightarrow> bool" ("_ closed'_in _" [100,100] 100)
- "S closed_in \<Gamma> \<equiv> (supp S)\<subseteq>(domain \<Gamma>)"
+ "closed_in" :: "ty \<Rightarrow> env \<Rightarrow> bool" ("_ closed'_in _" [100,100] 100)
+ "S closed_in \<Gamma> \<equiv> (supp S)\<subseteq>(ty_domain \<Gamma>)"
lemma closed_in_eqvt[eqvt]:
fixes pi::"tyvrs prm"
@@ -150,80 +237,148 @@
then show "(pi\<bullet>S) closed_in (pi\<bullet>\<Gamma>)" by (simp add: closed_in_def eqvts)
qed
-lemma ty_vrs_prm_simp:
+lemma tyvrs_vrs_prm_simp:
fixes pi::"vrs prm"
- and S::"ty"
- shows "pi\<bullet>S = S"
-by (induct S rule: ty.induct) (auto simp add: calc_atm)
+ shows "tyvrs_of (pi\<bullet>a) = tyvrs_of a"
+ apply (nominal_induct rule:binding.strong_induct)
+ apply (simp_all add: eqvts)
+ apply (simp add: dj_perm_forget[OF dj_tyvrs_vrs])
+ done
-lemma ty_context_vrs_prm_simp:
+lemma ty_vrs_fresh[fresh]:
+ fixes x::"vrs"
+ and T::"ty"
+ shows "x \<sharp> T"
+by (simp add: fresh_def supp_def ty_vrs_prm_simp)
+
+lemma ty_domain_vrs_prm_simp:
fixes pi::"vrs prm"
- and \<Gamma>::"ty_context"
- shows "pi\<bullet>\<Gamma> = \<Gamma>"
-by (induct \<Gamma>)
- (auto simp add: calc_atm ty_vrs_prm_simp)
+ and \<Gamma>::"env"
+ shows "(ty_domain (pi\<bullet>\<Gamma>)) = (ty_domain \<Gamma>)"
+ apply(induct \<Gamma>)
+ apply (simp add: eqvts)
+ apply(simp add: tyvrs_vrs_prm_simp)
+done
lemma closed_in_eqvt'[eqvt]:
fixes pi::"vrs prm"
assumes a: "S closed_in \<Gamma>"
shows "(pi\<bullet>S) closed_in (pi\<bullet>\<Gamma>)"
using a
-by (simp add: ty_vrs_prm_simp ty_context_vrs_prm_simp)
+by (simp add: closed_in_def ty_domain_vrs_prm_simp ty_vrs_prm_simp)
+
+lemma fresh_vrs_of:
+ fixes x::"vrs"
+ shows "x\<sharp>vrs_of b = x\<sharp>b"
+ by (nominal_induct b rule: binding.strong_induct)
+ (simp_all add: fresh_singleton [OF pt_vrs_inst at_vrs_inst] fresh_set_empty ty_vrs_fresh fresh_atm)
+
+lemma fresh_trm_domain:
+ fixes x::"vrs"
+ shows "x\<sharp> trm_domain \<Gamma> = x\<sharp>\<Gamma>"
+ by (induct \<Gamma>)
+ (simp_all add: fresh_set_empty fresh_list_cons
+ fresh_fin_union [OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ finite_domains finite_vrs fresh_vrs_of fresh_list_nil)
+
+lemma closed_in_fresh: "(X::tyvrs) \<sharp> ty_domain \<Gamma> \<Longrightarrow> T closed_in \<Gamma> \<Longrightarrow> X \<sharp> T"
+ by (auto simp add: closed_in_def fresh_def ty_domain_supp)
text {* Now validity of a context is a straightforward inductive definition. *}
-inductive
- valid_rel :: "ty_context \<Rightarrow> bool" ("\<turnstile> _ ok" [100] 100)
+inductive
+ valid_rel :: "env \<Rightarrow> bool" ("\<turnstile> _ ok" [100] 100)
where
- valid_nil[simp]: "\<turnstile> [] ok"
-| valid_cons[simp]: "\<lbrakk>\<turnstile> \<Gamma> ok; X\<sharp>(domain \<Gamma>); T closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<turnstile> ((X,T)#\<Gamma>) ok"
+ valid_nil[simp]: "\<turnstile> [] ok"
+| valid_consT[simp]: "\<lbrakk>\<turnstile> \<Gamma> ok; X\<sharp>(ty_domain \<Gamma>); T closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<turnstile> (TVarB X T#\<Gamma>) ok"
+| valid_cons [simp]: "\<lbrakk>\<turnstile> \<Gamma> ok; x\<sharp>(trm_domain \<Gamma>); T closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<turnstile> (VarB x T#\<Gamma>) ok"
equivariance valid_rel
-lemma validE:
- assumes a: "\<turnstile> ((X,T)#\<Gamma>) ok"
- shows "\<turnstile> \<Gamma> ok \<and> X\<sharp>(domain \<Gamma>) \<and> T closed_in \<Gamma>"
-using a by (cases, auto)
+declare binding.inject [simp add]
+declare trm.inject [simp add]
+
+inductive_cases validE[elim]: "\<turnstile> (TVarB X T#\<Gamma>) ok" "\<turnstile> (VarB x T#\<Gamma>) ok" "\<turnstile> (b#\<Gamma>) ok"
+
+declare binding.inject [simp del]
+declare trm.inject [simp del]
lemma validE_append:
assumes a: "\<turnstile> (\<Delta>@\<Gamma>) ok"
shows "\<turnstile> \<Gamma> ok"
- using a by (induct \<Delta>, auto dest: validE)
+ using a
+proof (induct \<Delta>)
+ case (Cons a \<Gamma>')
+ then show ?case
+ by (nominal_induct a rule:binding.strong_induct)
+ (auto elim: validE)
+qed (auto)
lemma replace_type:
- assumes a: "\<turnstile> (\<Delta>@(X,T)#\<Gamma>) ok"
+ assumes a: "\<turnstile> (\<Delta>@(TVarB X T)#\<Gamma>) ok"
and b: "S closed_in \<Gamma>"
- shows "\<turnstile> (\<Delta>@(X,S)#\<Gamma>) ok"
+ shows "\<turnstile> (\<Delta>@(TVarB X S)#\<Gamma>) ok"
using a b
-apply(induct \<Delta>)
-apply(auto dest!: validE intro!: valid_cons simp add: domain_append closed_in_def)
-done
+proof(induct \<Delta>)
+ case Nil
+ then show ?case by (auto elim: validE intro: valid_cons simp add: domains_append closed_in_def)
+next
+ case (Cons a \<Gamma>')
+ then show ?case
+ by (nominal_induct a rule:binding.strong_induct)
+ (auto elim: validE intro!: valid_cons simp add: domains_append closed_in_def)
+qed
text {* Well-formed contexts have a unique type-binding for a type-variable. *}
lemma uniqueness_of_ctxt:
- fixes \<Gamma>::"ty_context"
+ fixes \<Gamma>::"env"
assumes a: "\<turnstile> \<Gamma> ok"
- and b: "(X,T)\<in>set \<Gamma>"
- and c: "(X,S)\<in>set \<Gamma>"
+ and b: "(TVarB X T)\<in>set \<Gamma>"
+ and c: "(TVarB X S)\<in>set \<Gamma>"
shows "T=S"
using a b c
proof (induct)
- case valid_nil thus "T=S" by simp
-next
- case valid_cons
+ case (valid_consT \<Gamma> X' T')
moreover
- { fix \<Gamma>::"ty_context"
- assume a: "X\<sharp>(domain \<Gamma>)"
- have "\<not>(\<exists>T.(X,T)\<in>(set \<Gamma>))" using a
- proof (induct \<Gamma>)
- case (Cons Y \<Gamma>)
- thus "\<not> (\<exists>T.(X,T)\<in>set(Y#\<Gamma>))"
- by (simp only: fresh_domain_cons, auto simp add: fresh_atm)
+ { fix \<Gamma>'::"env"
+ assume a: "X'\<sharp>(ty_domain \<Gamma>')"
+ have "\<not>(\<exists>T.(TVarB X' T)\<in>(set \<Gamma>'))" using a
+ proof (induct \<Gamma>')
+ case (Cons Y \<Gamma>')
+ thus "\<not> (\<exists>T.(TVarB X' T)\<in>set(Y#\<Gamma>'))"
+ by (simp add: fresh_ty_domain_cons
+ fresh_fin_union[OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst]
+ finite_vrs finite_domains,
+ auto simp add: fresh_atm fresh_singleton [OF pt_tyvrs_inst at_tyvrs_inst])
qed (simp)
}
- ultimately show "T=S" by auto
-qed
+ ultimately show "T=S" by (auto simp add: binding.inject)
+qed (auto)
+
+lemma uniqueness_of_ctxt':
+ fixes \<Gamma>::"env"
+ assumes a: "\<turnstile> \<Gamma> ok"
+ and b: "(VarB x T)\<in>set \<Gamma>"
+ and c: "(VarB x S)\<in>set \<Gamma>"
+ shows "T=S"
+using a b c
+proof (induct)
+ case (valid_cons \<Gamma> x' T')
+ moreover
+ { fix \<Gamma>'::"env"
+ assume a: "x'\<sharp>(trm_domain \<Gamma>')"
+ have "\<not>(\<exists>T.(VarB x' T)\<in>(set \<Gamma>'))" using a
+ proof (induct \<Gamma>')
+ case (Cons y \<Gamma>')
+ thus "\<not> (\<exists>T.(VarB x' T)\<in>set(y#\<Gamma>'))"
+ by (simp add: fresh_fin_union[OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ finite_vrs finite_domains,
+ auto simp add: fresh_atm fresh_singleton [OF pt_vrs_inst at_vrs_inst])
+ qed (simp)
+ }
+ ultimately show "T=S" by (auto simp add: binding.inject)
+qed (auto)
section {* Size and Capture-Avoiding Substitution for Types *}
@@ -233,7 +388,7 @@
"size_ty (Tvar X) = 1"
| "size_ty (Top) = 1"
| "size_ty (T1 \<rightarrow> T2) = (size_ty T1) + (size_ty T2) + 1"
-| "X\<sharp>T1 \<Longrightarrow> size_ty (\<forall>[X<:T1].T2) = (size_ty T1) + (size_ty T2) + 1"
+| "X \<sharp> T1 \<Longrightarrow> size_ty (\<forall>X<:T1. T2) = (size_ty T1) + (size_ty T2) + 1"
apply (finite_guess)+
apply (rule TrueI)+
apply (simp add: fresh_nat)
@@ -241,24 +396,195 @@
done
nominal_primrec
- subst_ty :: "ty \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> ty" ("_[_:=_]\<^isub>t\<^isub>y" [100,100,100] 100)
+ subst_ty :: "ty \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> ty" ("_[_ \<mapsto> _]\<^sub>\<tau>" [300, 0, 0] 300)
where
- "(Tvar X)[Y:=T]\<^isub>t\<^isub>y= (if X=Y then T else (Tvar X))"
-| "(Top)[Y:=T]\<^isub>t\<^isub>y = Top"
-| "(T\<^isub>1 \<rightarrow> T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (T\<^isub>1[Y:=T]\<^isub>t\<^isub>y) \<rightarrow> (T\<^isub>2[Y:=T]\<^isub>t\<^isub>y)"
-| "\<lbrakk>X\<sharp>(Y,T); X\<sharp>T\<^isub>1\<rbrakk> \<Longrightarrow> (\<forall>[X<:T\<^isub>1].T\<^isub>2)[Y:=T]\<^isub>t\<^isub>y = (\<forall>[X<:(T\<^isub>1[Y:=T]\<^isub>t\<^isub>y)].(T\<^isub>2[Y:=T]\<^isub>t\<^isub>y))"
+ "(Tvar X)[Y \<mapsto> T]\<^sub>\<tau> = (if X=Y then T else Tvar X)"
+| "(Top)[Y \<mapsto> T]\<^sub>\<tau> = Top"
+| "(T\<^isub>1 \<rightarrow> T\<^isub>2)[Y \<mapsto> T]\<^sub>\<tau> = T\<^isub>1[Y \<mapsto> T]\<^sub>\<tau> \<rightarrow> T\<^isub>2[Y \<mapsto> T]\<^sub>\<tau>"
+| "\<lbrakk>X\<sharp>(Y,T); X\<sharp>T\<^isub>1\<rbrakk> \<Longrightarrow> (\<forall>X<:T\<^isub>1. T\<^isub>2)[Y \<mapsto> T]\<^sub>\<tau> = (\<forall>X<:T\<^isub>1[Y \<mapsto> T]\<^sub>\<tau>. T\<^isub>2[Y \<mapsto> T]\<^sub>\<tau>)"
apply (finite_guess)+
apply (rule TrueI)+
apply (simp add: abs_fresh)
apply (fresh_guess)+
done
+lemma subst_eqvt[eqvt]:
+ fixes pi::"tyvrs prm"
+ and T::"ty"
+ shows "pi\<bullet>(T[X \<mapsto> T']\<^sub>\<tau>) = (pi\<bullet>T)[(pi\<bullet>X) \<mapsto> (pi\<bullet>T')]\<^sub>\<tau>"
+ by (nominal_induct T avoiding: X T' rule: ty.strong_induct)
+ (perm_simp add: fresh_bij)+
+
+lemma subst_eqvt'[eqvt]:
+ fixes pi::"vrs prm"
+ and T::"ty"
+ shows "pi\<bullet>(T[X \<mapsto> T']\<^sub>\<tau>) = (pi\<bullet>T)[(pi\<bullet>X) \<mapsto> (pi\<bullet>T')]\<^sub>\<tau>"
+ by (nominal_induct T avoiding: X T' rule: ty.strong_induct)
+ (perm_simp add: fresh_left)+
+
+lemma type_subst_fresh[fresh]:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> T" and "X \<sharp> P"
+ shows "X \<sharp> T[Y \<mapsto> P]\<^sub>\<tau>"
+using assms
+by (nominal_induct T avoiding: X Y P rule:ty.strong_induct)
+ (auto simp add: abs_fresh)
+
+lemma fresh_type_subst_fresh[fresh]:
+ assumes "X\<sharp>T'"
+ shows "X\<sharp>T[X \<mapsto> T']\<^sub>\<tau>"
+using assms
+by (nominal_induct T avoiding: X T' rule: ty.strong_induct)
+ (auto simp add: fresh_atm abs_fresh fresh_nat)
+
+lemma type_subst_identity: "X \<sharp> T \<Longrightarrow> T[X \<mapsto> U]\<^sub>\<tau> = T"
+ by (nominal_induct T avoiding: X U rule: ty.strong_induct)
+ (simp_all add: fresh_atm abs_fresh)
+
+lemma type_substitution_lemma:
+ "X \<noteq> Y \<Longrightarrow> X \<sharp> L \<Longrightarrow> M[X \<mapsto> N]\<^sub>\<tau>[Y \<mapsto> L]\<^sub>\<tau> = M[Y \<mapsto> L]\<^sub>\<tau>[X \<mapsto> N[Y \<mapsto> L]\<^sub>\<tau>]\<^sub>\<tau>"
+ by (nominal_induct M avoiding: X Y N L rule: ty.strong_induct)
+ (auto simp add: type_subst_fresh type_subst_identity)
+
+lemma type_subst_rename:
+ "Y \<sharp> T \<Longrightarrow> ([(Y, X)] \<bullet> T)[Y \<mapsto> U]\<^sub>\<tau> = T[X \<mapsto> U]\<^sub>\<tau>"
+ by (nominal_induct T avoiding: X Y U rule: ty.strong_induct)
+ (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux)
+
+nominal_primrec
+ subst_tyb :: "binding \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> binding" ("_[_ \<mapsto> _]\<^sub>b" [100,100,100] 100)
+where
+ "(TVarB X U)[Y \<mapsto> T]\<^sub>b = TVarB X (U[Y \<mapsto> T]\<^sub>\<tau>)"
+| "(VarB X U)[Y \<mapsto> T]\<^sub>b = VarB X (U[Y \<mapsto> T]\<^sub>\<tau>)"
+by auto
+
+lemma binding_subst_fresh[fresh]:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> a"
+ and "X \<sharp> P"
+ shows "X \<sharp> a[Y \<mapsto> P]\<^sub>b"
+using assms
+by (nominal_induct a rule:binding.strong_induct)
+ (auto simp add: freshs)
+
+lemma binding_subst_identity: "X \<sharp> B \<Longrightarrow> B[X \<mapsto> U]\<^sub>b = B"
+ by (induct B rule: binding.induct)
+ (simp_all add: fresh_atm type_subst_identity)
+
consts
- subst_tyc :: "ty_context \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> ty_context" ("_[_:=_]\<^isub>t\<^isub>y\<^isub>c" [100,100,100] 100)
+ subst_tyc :: "env \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> env" ("_[_ \<mapsto> _]\<^sub>e" [100,100,100] 100)
+
primrec
-"([])[Y:=T]\<^isub>t\<^isub>y\<^isub>c= []"
-"(XT#\<Gamma>)[Y:=T]\<^isub>t\<^isub>y\<^isub>c = (fst XT,(snd XT)[Y:=T]\<^isub>t\<^isub>y)#(\<Gamma>[Y:=T]\<^isub>t\<^isub>y\<^isub>c)"
-
+"([])[Y \<mapsto> T]\<^sub>e= []"
+"(B#\<Gamma>)[Y \<mapsto> T]\<^sub>e = (B[Y \<mapsto> T]\<^sub>b)#(\<Gamma>[Y \<mapsto> T]\<^sub>e)"
+
+lemma ctxt_subst_fresh'[fresh]:
+ fixes X::"tyvrs"
+ assumes "X \<sharp> \<Gamma>"
+ and "X \<sharp> P"
+ shows "X \<sharp> \<Gamma>[Y \<mapsto> P]\<^sub>e"
+using assms
+by (induct \<Gamma>)
+ (auto simp add: fresh_list_cons freshs)
+
+lemma ctxt_subst_mem_TVarB: "TVarB X T \<in> set \<Gamma> \<Longrightarrow> TVarB X (T[Y \<mapsto> U]\<^sub>\<tau>) \<in> set (\<Gamma>[Y \<mapsto> U]\<^sub>e)"
+ by (induct \<Gamma>) auto
+
+lemma ctxt_subst_mem_VarB: "VarB x T \<in> set \<Gamma> \<Longrightarrow> VarB x (T[Y \<mapsto> U]\<^sub>\<tau>) \<in> set (\<Gamma>[Y \<mapsto> U]\<^sub>e)"
+ by (induct \<Gamma>) auto
+
+lemma ctxt_subst_identity: "X \<sharp> \<Gamma> \<Longrightarrow> \<Gamma>[X \<mapsto> U]\<^sub>e = \<Gamma>"
+ by (induct \<Gamma>) (simp_all add: fresh_list_cons binding_subst_identity)
+
+lemma ctxt_subst_append: "(\<Delta> @ \<Gamma>)[X \<mapsto> T]\<^sub>e = \<Delta>[X \<mapsto> T]\<^sub>e @ \<Gamma>[X \<mapsto> T]\<^sub>e"
+ by (induct \<Delta>) simp_all
+
+nominal_primrec
+ subst_trm :: "trm \<Rightarrow> vrs \<Rightarrow> trm \<Rightarrow> trm" ("_[_ \<mapsto> _]" [300, 0, 0] 300)
+where
+ "(Var x)[y \<mapsto> t'] = (if x=y then t' else (Var x))"
+| "(t1 \<cdot> t2)[y \<mapsto> t'] = t1[y \<mapsto> t'] \<cdot> t2[y \<mapsto> t']"
+| "(t \<cdot>\<^sub>\<tau> T)[y \<mapsto> t'] = t[y \<mapsto> t'] \<cdot>\<^sub>\<tau> T"
+| "X\<sharp>(T,t') \<Longrightarrow> (\<lambda>X<:T. t)[y \<mapsto> t'] = (\<lambda>X<:T. t[y \<mapsto> t'])"
+| "x\<sharp>(y,t') \<Longrightarrow> (\<lambda>x:T. t)[y \<mapsto> t'] = (\<lambda>x:T. t[y \<mapsto> t'])"
+apply(finite_guess)+
+apply(rule TrueI)+
+apply(simp add: abs_fresh)+
+apply(fresh_guess add: ty_vrs_fresh abs_fresh)+
+done
+
+lemma subst_trm_fresh_tyvar:
+ "(X::tyvrs) \<sharp> t \<Longrightarrow> X \<sharp> u \<Longrightarrow> X \<sharp> t[x \<mapsto> u]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (auto simp add: trm.fresh abs_fresh)
+
+lemma subst_trm_fresh_var: "x \<sharp> u \<Longrightarrow> x \<sharp> t[x \<mapsto> u]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (simp_all add: abs_fresh fresh_atm ty_vrs_fresh)
+
+lemma subst_trm_eqvt[eqvt]:
+ fixes pi::"tyvrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[x \<mapsto> u]) = (pi\<bullet>t)[(pi\<bullet>x) \<mapsto> (pi\<bullet>u)]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (perm_simp add: fresh_left)+
+
+lemma subst_trm_eqvt'[eqvt]:
+ fixes pi::"vrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[x \<mapsto> u]) = (pi\<bullet>t)[(pi\<bullet>x) \<mapsto> (pi\<bullet>u)]"
+ by (nominal_induct t avoiding: x u rule: trm.strong_induct)
+ (perm_simp add: fresh_left)+
+
+lemma subst_trm_rename:
+ "y \<sharp> t \<Longrightarrow> ([(y, x)] \<bullet> t)[y \<mapsto> u] = t[x \<mapsto> u]"
+ by (nominal_induct t avoiding: x y u rule: trm.strong_induct)
+ (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux ty_vrs_fresh perm_fresh_fresh)
+
+nominal_primrec (freshness_context: "T2::ty")
+ subst_trm_ty :: "trm \<Rightarrow> tyvrs \<Rightarrow> ty \<Rightarrow> trm" ("_[_ \<mapsto>\<^sub>\<tau> _]" [300, 0, 0] 300)
+where
+ "(Var x)[Y \<mapsto>\<^sub>\<tau> T2] = Var x"
+| "(t1 \<cdot> t2)[Y \<mapsto>\<^sub>\<tau> T2] = t1[Y \<mapsto>\<^sub>\<tau> T2] \<cdot> t2[Y \<mapsto>\<^sub>\<tau> T2]"
+| "(t1 \<cdot>\<^sub>\<tau> T)[Y \<mapsto>\<^sub>\<tau> T2] = t1[Y \<mapsto>\<^sub>\<tau> T2] \<cdot>\<^sub>\<tau> T[Y \<mapsto> T2]\<^sub>\<tau>"
+| "X\<sharp>(Y,T,T2) \<Longrightarrow> (\<lambda>X<:T. t)[Y \<mapsto>\<^sub>\<tau> T2] = (\<lambda>X<:T[Y \<mapsto> T2]\<^sub>\<tau>. t[Y \<mapsto>\<^sub>\<tau> T2])"
+| "(\<lambda>x:T. t)[Y \<mapsto>\<^sub>\<tau> T2] = (\<lambda>x:T[Y \<mapsto> T2]\<^sub>\<tau>. t[Y \<mapsto>\<^sub>\<tau> T2])"
+apply(finite_guess)+
+apply(rule TrueI)+
+apply(simp add: abs_fresh ty_vrs_fresh)+
+apply(simp add: type_subst_fresh)
+apply(fresh_guess add: ty_vrs_fresh abs_fresh)+
+done
+
+lemma subst_trm_ty_fresh:
+ "(X::tyvrs) \<sharp> t \<Longrightarrow> X \<sharp> T \<Longrightarrow> X \<sharp> t[Y \<mapsto>\<^sub>\<tau> T]"
+ by (nominal_induct t avoiding: Y T rule: trm.strong_induct)
+ (auto simp add: abs_fresh type_subst_fresh)
+
+lemma subst_trm_ty_fresh':
+ "X \<sharp> T \<Longrightarrow> X \<sharp> t[X \<mapsto>\<^sub>\<tau> T]"
+ by (nominal_induct t avoiding: X T rule: trm.strong_induct)
+ (simp_all add: abs_fresh fresh_type_subst_fresh fresh_atm)
+
+lemma subst_trm_ty_eqvt[eqvt]:
+ fixes pi::"tyvrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[X \<mapsto>\<^sub>\<tau> T]) = (pi\<bullet>t)[(pi\<bullet>X) \<mapsto>\<^sub>\<tau> (pi\<bullet>T)]"
+ by (nominal_induct t avoiding: X T rule: trm.strong_induct)
+ (perm_simp add: fresh_bij subst_eqvt)+
+
+lemma subst_trm_ty_eqvt'[eqvt]:
+ fixes pi::"vrs prm"
+ and t::"trm"
+ shows "pi\<bullet>(t[X \<mapsto>\<^sub>\<tau> T]) = (pi\<bullet>t)[(pi\<bullet>X) \<mapsto>\<^sub>\<tau> (pi\<bullet>T)]"
+ by (nominal_induct t avoiding: X T rule: trm.strong_induct)
+ (perm_simp add: fresh_left subst_eqvt')+
+
+lemma subst_trm_ty_rename:
+ "Y \<sharp> t \<Longrightarrow> ([(Y, X)] \<bullet> t)[Y \<mapsto>\<^sub>\<tau> U] = t[X \<mapsto>\<^sub>\<tau> U]"
+ by (nominal_induct t avoiding: X Y U rule: trm.strong_induct)
+ (simp_all add: fresh_atm calc_atm abs_fresh fresh_aux type_subst_rename)
+
section {* Subtyping-Relation *}
text {* The definition for the subtyping-relation follows quite closely what is written
@@ -269,13 +595,13 @@
$\alpha$-equivalence classes.) *}
inductive
- subtype_of :: "ty_context \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool" ("_\<turnstile>_<:_" [100,100,100] 100)
+ subtype_of :: "env \<Rightarrow> ty \<Rightarrow> ty \<Rightarrow> bool" ("_\<turnstile>_<:_" [100,100,100] 100)
where
- S_Top[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; S closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> S <: Top"
-| S_Var[intro]: "\<lbrakk>(X,S) \<in> set \<Gamma>; \<Gamma> \<turnstile> S <: T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (Tvar X) <: T"
-| S_Refl[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; X \<in> domain \<Gamma>\<rbrakk>\<Longrightarrow> \<Gamma> \<turnstile> Tvar X <: Tvar X"
-| S_Arrow[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; \<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (S\<^isub>1 \<rightarrow> S\<^isub>2) <: (T\<^isub>1 \<rightarrow> T\<^isub>2)"
-| S_Forall[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; X\<sharp>\<Gamma>; ((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2"
+ SA_Top[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; S closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> S <: Top"
+| SA_refl_TVar[intro]: "\<lbrakk>\<turnstile> \<Gamma> ok; X \<in> ty_domain \<Gamma>\<rbrakk>\<Longrightarrow> \<Gamma> \<turnstile> Tvar X <: Tvar X"
+| SA_trans_TVar[intro]: "\<lbrakk>(TVarB X S) \<in> set \<Gamma>; \<Gamma> \<turnstile> S <: T\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (Tvar X) <: T"
+| SA_arrow[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; \<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (S\<^isub>1 \<rightarrow> S\<^isub>2) <: (T\<^isub>1 \<rightarrow> T\<^isub>2)"
+| SA_all[intro]: "\<lbrakk>\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1; ((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)"
lemma subtype_implies_ok:
fixes X::"tyvrs"
@@ -288,15 +614,15 @@
shows "S closed_in \<Gamma> \<and> T closed_in \<Gamma>"
using a
proof (induct)
- case (S_Top \<Gamma> S)
+ case (SA_Top \<Gamma> S)
have "Top closed_in \<Gamma>" by (simp add: closed_in_def ty.supp)
moreover
have "S closed_in \<Gamma>" by fact
ultimately show "S closed_in \<Gamma> \<and> Top closed_in \<Gamma>" by simp
next
- case (S_Var X S \<Gamma> T)
- have "(X,S)\<in>set \<Gamma>" by fact
- hence "X \<in> domain \<Gamma>" by (rule domain_inclusion)
+ case (SA_trans_TVar X S \<Gamma> T)
+ have "(TVarB X S)\<in>set \<Gamma>" by fact
+ hence "X \<in> ty_domain \<Gamma>" by (rule ty_domain_inclusion)
hence "(Tvar X) closed_in \<Gamma>" by (simp add: closed_in_def ty.supp supp_atm)
moreover
have "S closed_in \<Gamma> \<and> T closed_in \<Gamma>" by fact
@@ -311,20 +637,33 @@
shows "X\<sharp>S \<and> X\<sharp>T"
proof -
from a1 have "\<turnstile> \<Gamma> ok" by (rule subtype_implies_ok)
- with a2 have "X\<sharp>domain(\<Gamma>)" by (simp add: fresh_domain)
+ with a2 have "X\<sharp>ty_domain(\<Gamma>)" by (simp add: fresh_domain)
moreover
from a1 have "S closed_in \<Gamma> \<and> T closed_in \<Gamma>" by (rule subtype_implies_closed)
- hence "supp S \<subseteq> ((supp (domain \<Gamma>))::tyvrs set)"
- and "supp T \<subseteq> ((supp (domain \<Gamma>))::tyvrs set)" by (simp_all add: domain_supp closed_in_def)
+ hence "supp S \<subseteq> ((supp (ty_domain \<Gamma>))::tyvrs set)"
+ and "supp T \<subseteq> ((supp (ty_domain \<Gamma>))::tyvrs set)" by (simp_all add: ty_domain_supp closed_in_def)
ultimately show "X\<sharp>S \<and> X\<sharp>T" by (force simp add: supp_prod fresh_def)
qed
+lemma valid_ty_domain_fresh:
+ fixes X::"tyvrs"
+ assumes valid: "\<turnstile> \<Gamma> ok"
+ shows "X\<sharp>(ty_domain \<Gamma>) = X\<sharp>\<Gamma>"
+ using valid
+ apply induct
+ apply (simp add: fresh_list_nil fresh_set_empty)
+ apply (simp_all add: binding.fresh fresh_list_cons
+ fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains fresh_atm)
+ apply (auto simp add: closed_in_fresh)
+ done
+
equivariance subtype_of
-nominal_inductive subtype_of
- by (simp_all add: abs_fresh subtype_implies_fresh)
-
-thm subtype_of.strong_induct
+nominal_inductive subtype_of
+ apply (simp_all add: abs_fresh)
+ apply (fastsimp simp add: valid_ty_domain_fresh dest: subtype_implies_ok)
+ apply (force simp add: closed_in_fresh dest: subtype_implies_closed subtype_implies_ok)+
+ done
section {* Reflexivity of Subtyping *}
@@ -338,17 +677,17 @@
have ih_T\<^isub>1: "\<And>\<Gamma>. \<lbrakk>\<turnstile> \<Gamma> ok; T\<^isub>1 closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> T\<^isub>1 <: T\<^isub>1" by fact
have ih_T\<^isub>2: "\<And>\<Gamma>. \<lbrakk>\<turnstile> \<Gamma> ok; T\<^isub>2 closed_in \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>2" by fact
have fresh_cond: "X\<sharp>\<Gamma>" by fact
- hence fresh_domain: "X\<sharp>(domain \<Gamma>)" by (simp add: fresh_domain)
- have "(\<forall>[X<:T\<^isub>2].T\<^isub>1) closed_in \<Gamma>" by fact
- hence closed\<^isub>T\<^isub>2: "T\<^isub>2 closed_in \<Gamma>" and closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in ((X,T\<^isub>2)#\<Gamma>)"
+ hence fresh_ty_domain: "X\<sharp>(ty_domain \<Gamma>)" by (simp add: fresh_domain)
+ have "(\<forall>X<:T\<^isub>2. T\<^isub>1) closed_in \<Gamma>" by fact
+ hence closed\<^isub>T\<^isub>2: "T\<^isub>2 closed_in \<Gamma>" and closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in ((TVarB X T\<^isub>2)#\<Gamma>)"
by (auto simp add: closed_in_def ty.supp abs_supp)
have ok: "\<turnstile> \<Gamma> ok" by fact
- hence ok': "\<turnstile> ((X,T\<^isub>2)#\<Gamma>) ok" using closed\<^isub>T\<^isub>2 fresh_domain by simp
+ hence ok': "\<turnstile> ((TVarB X T\<^isub>2)#\<Gamma>) ok" using closed\<^isub>T\<^isub>2 fresh_ty_domain by simp
have "\<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>2" using ih_T\<^isub>2 closed\<^isub>T\<^isub>2 ok by simp
moreover
- have "((X,T\<^isub>2)#\<Gamma>) \<turnstile> T\<^isub>1 <: T\<^isub>1" using ih_T\<^isub>1 closed\<^isub>T\<^isub>1 ok' by simp
- ultimately show "\<Gamma> \<turnstile> \<forall>[X<:T\<^isub>2].T\<^isub>1 <: \<forall>[X<:T\<^isub>2].T\<^isub>1" using fresh_cond
- by (simp add: subtype_of.S_Forall)
+ have "((TVarB X T\<^isub>2)#\<Gamma>) \<turnstile> T\<^isub>1 <: T\<^isub>1" using ih_T\<^isub>1 closed\<^isub>T\<^isub>1 ok' by simp
+ ultimately show "\<Gamma> \<turnstile> (\<forall>X<:T\<^isub>2. T\<^isub>1) <: (\<forall>X<:T\<^isub>2. T\<^isub>1)" using fresh_cond
+ by (simp add: subtype_of.SA_all)
qed (auto simp add: closed_in_def ty.supp supp_atm)
lemma subtype_reflexivity_semiautomated:
@@ -361,11 +700,10 @@
--{* Too bad that this instantiation cannot be found automatically by
\isakeyword{auto}; \isakeyword{blast} would find it if we had not used
an explicit definition for @{text "closed_in_def"}. *}
-apply(drule_tac x="(tyvrs, ty2)#\<Gamma>" in meta_spec)
+apply(drule_tac x="(TVarB tyvrs ty2)#\<Gamma>" in meta_spec)
apply(force dest: fresh_domain simp add: closed_in_def)
done
-
section {* Weakening *}
text {* In order to prove weakening we introduce the notion of a type-context extending
@@ -373,16 +711,16 @@
smoother than if we had strictly adhered to the version in the POPLmark-paper. *}
constdefs
- extends :: "ty_context \<Rightarrow> ty_context \<Rightarrow> bool" ("_ extends _" [100,100] 100)
- "\<Delta> extends \<Gamma> \<equiv> \<forall>X Q. (X,Q)\<in>set \<Gamma> \<longrightarrow> (X,Q)\<in>set \<Delta>"
+ extends :: "env \<Rightarrow> env \<Rightarrow> bool" ("_ extends _" [100,100] 100)
+ "\<Delta> extends \<Gamma> \<equiv> \<forall>X Q. (TVarB X Q)\<in>set \<Gamma> \<longrightarrow> (TVarB X Q)\<in>set \<Delta>"
-lemma extends_domain:
+lemma extends_ty_domain:
assumes a: "\<Delta> extends \<Gamma>"
- shows "domain \<Gamma> \<subseteq> domain \<Delta>"
+ shows "ty_domain \<Gamma> \<subseteq> ty_domain \<Delta>"
using a
apply (auto simp add: extends_def)
- apply (drule domain_existence)
- apply (force simp add: domain_inclusion)
+ apply (drule ty_domain_existence)
+ apply (force simp add: ty_domain_inclusion)
done
lemma extends_closed:
@@ -390,12 +728,12 @@
and a2: "\<Delta> extends \<Gamma>"
shows "T closed_in \<Delta>"
using a1 a2
- by (auto dest: extends_domain simp add: closed_in_def)
+ by (auto dest: extends_ty_domain simp add: closed_in_def)
lemma extends_memb:
assumes a: "\<Delta> extends \<Gamma>"
- and b: "(X,T) \<in> set \<Gamma>"
- shows "(X,T) \<in> set \<Delta>"
+ and b: "(TVarB X T) \<in> set \<Gamma>"
+ shows "(TVarB X T) \<in> set \<Delta>"
using a b by (simp add: extends_def)
lemma weakening:
@@ -405,7 +743,7 @@
shows "\<Delta> \<turnstile> S <: T"
using a b c
proof (nominal_induct \<Gamma> S T avoiding: \<Delta> rule: subtype_of.strong_induct)
- case (S_Top \<Gamma> S)
+ case (SA_Top \<Gamma> S)
have lh_drv_prem: "S closed_in \<Gamma>" by fact
have "\<turnstile> \<Delta> ok" by fact
moreover
@@ -413,43 +751,43 @@
hence "S closed_in \<Delta>" using lh_drv_prem by (simp only: extends_closed)
ultimately show "\<Delta> \<turnstile> S <: Top" by force
next
- case (S_Var X S \<Gamma> T)
- have lh_drv_prem: "(X,S) \<in> set \<Gamma>" by fact
+ case (SA_trans_TVar X S \<Gamma> T)
+ have lh_drv_prem: "(TVarB X S) \<in> set \<Gamma>" by fact
have ih: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends \<Gamma> \<Longrightarrow> \<Delta> \<turnstile> S <: T" by fact
have ok: "\<turnstile> \<Delta> ok" by fact
have extends: "\<Delta> extends \<Gamma>" by fact
- have "(X,S) \<in> set \<Delta>" using lh_drv_prem extends by (simp only: extends_memb)
+ have "(TVarB X S) \<in> set \<Delta>" using lh_drv_prem extends by (simp only: extends_memb)
moreover
have "\<Delta> \<turnstile> S <: T" using ok extends ih by simp
ultimately show "\<Delta> \<turnstile> Tvar X <: T" using ok by force
next
- case (S_Refl \<Gamma> X)
- have lh_drv_prem: "X \<in> domain \<Gamma>" by fact
+ case (SA_refl_TVar \<Gamma> X)
+ have lh_drv_prem: "X \<in> ty_domain \<Gamma>" by fact
have "\<turnstile> \<Delta> ok" by fact
moreover
have "\<Delta> extends \<Gamma>" by fact
- hence "X \<in> domain \<Delta>" using lh_drv_prem by (force dest: extends_domain)
+ hence "X \<in> ty_domain \<Delta>" using lh_drv_prem by (force dest: extends_ty_domain)
ultimately show "\<Delta> \<turnstile> Tvar X <: Tvar X" by force
next
- case (S_Arrow \<Gamma> T\<^isub>1 S\<^isub>1 S\<^isub>2 T\<^isub>2) thus "\<Delta> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by blast
+ case (SA_arrow \<Gamma> T\<^isub>1 S\<^isub>1 S\<^isub>2 T\<^isub>2) thus "\<Delta> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by blast
next
- case (S_Forall \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
+ case (SA_all \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
have fresh_cond: "X\<sharp>\<Delta>" by fact
- hence fresh_domain: "X\<sharp>(domain \<Delta>)" by (simp add: fresh_domain)
+ hence fresh_domain: "X\<sharp>(ty_domain \<Delta>)" by (simp add: fresh_domain)
have ih\<^isub>1: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends \<Gamma> \<Longrightarrow> \<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
- have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((X,T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
+ have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((TVarB X T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
have lh_drv_prem: "\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
hence closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in \<Gamma>" by (simp add: subtype_implies_closed)
have ok: "\<turnstile> \<Delta> ok" by fact
have ext: "\<Delta> extends \<Gamma>" by fact
have "T\<^isub>1 closed_in \<Delta>" using ext closed\<^isub>T\<^isub>1 by (simp only: extends_closed)
- hence "\<turnstile> ((X,T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
+ hence "\<turnstile> ((TVarB X T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
moreover
- have "((X,T\<^isub>1)#\<Delta>) extends ((X,T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
- ultimately have "((X,T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
+ have "((TVarB X T\<^isub>1)#\<Delta>) extends ((TVarB X T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
+ ultimately have "((TVarB X T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
moreover
have "\<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" using ok ext ih\<^isub>1 by simp
- ultimately show "\<Delta> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2" using ok by (force intro: S_Forall)
+ ultimately show "\<Delta> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)" using ok by (force intro: SA_all)
qed
text {* In fact all ``non-binding" cases can be solved automatically: *}
@@ -461,44 +799,41 @@
shows "\<Delta> \<turnstile> S <: T"
using a b c
proof (nominal_induct \<Gamma> S T avoiding: \<Delta> rule: subtype_of.strong_induct)
- case (S_Forall \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
+ case (SA_all \<Gamma> T\<^isub>1 S\<^isub>1 X S\<^isub>2 T\<^isub>2)
have fresh_cond: "X\<sharp>\<Delta>" by fact
- hence fresh_domain: "X\<sharp>(domain \<Delta>)" by (simp add: fresh_domain)
+ hence fresh_domain: "X\<sharp>(ty_domain \<Delta>)" by (simp add: fresh_domain)
have ih\<^isub>1: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends \<Gamma> \<Longrightarrow> \<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
- have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((X,T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
+ have ih\<^isub>2: "\<And>\<Delta>. \<turnstile> \<Delta> ok \<Longrightarrow> \<Delta> extends ((TVarB X T\<^isub>1)#\<Gamma>) \<Longrightarrow> \<Delta> \<turnstile> S\<^isub>2 <: T\<^isub>2" by fact
have lh_drv_prem: "\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1" by fact
hence closed\<^isub>T\<^isub>1: "T\<^isub>1 closed_in \<Gamma>" by (simp add: subtype_implies_closed)
have ok: "\<turnstile> \<Delta> ok" by fact
have ext: "\<Delta> extends \<Gamma>" by fact
have "T\<^isub>1 closed_in \<Delta>" using ext closed\<^isub>T\<^isub>1 by (simp only: extends_closed)
- hence "\<turnstile> ((X,T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
+ hence "\<turnstile> ((TVarB X T\<^isub>1)#\<Delta>) ok" using fresh_domain ok by force
moreover
- have "((X,T\<^isub>1)#\<Delta>) extends ((X,T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
- ultimately have "((X,T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
+ have "((TVarB X T\<^isub>1)#\<Delta>) extends ((TVarB X T\<^isub>1)#\<Gamma>)" using ext by (force simp add: extends_def)
+ ultimately have "((TVarB X T\<^isub>1)#\<Delta>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using ih\<^isub>2 by simp
moreover
have "\<Delta> \<turnstile> T\<^isub>1 <: S\<^isub>1" using ok ext ih\<^isub>1 by simp
- ultimately show "\<Delta> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2" using ok by (force intro: S_Forall)
-qed (blast intro: extends_closed extends_memb dest: extends_domain)+
+ ultimately show "\<Delta> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)" using ok by (force intro: SA_all)
+qed (blast intro: extends_closed extends_memb dest: extends_ty_domain)+
section {* Transitivity and Narrowing *}
text {* Some inversion lemmas that are needed in the transitivity and narrowing proof.*}
-lemma S_TopE:
- assumes a: "\<Gamma> \<turnstile> Top <: T"
- shows "T = Top"
-using a by (cases, auto)
+declare ty.inject [simp add]
-lemma S_ArrowE_left:
- assumes a: "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T"
- shows "T = Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T = T\<^isub>1 \<rightarrow> T\<^isub>2 \<and> \<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1 \<and> \<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2)"
-using a by (cases, auto simp add: ty.inject)
+inductive_cases S_TopE: "\<Gamma> \<turnstile> Top <: T"
+inductive_cases S_ArrowE_left: "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T"
+
+declare ty.inject [simp del]
lemma S_ForallE_left:
- shows "\<lbrakk>\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T; X\<sharp>\<Gamma>; X\<sharp>S\<^isub>1\<rbrakk>
- \<Longrightarrow> T = Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T = \<forall>[X<:T\<^isub>1].T\<^isub>2 \<and> \<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1 \<and> ((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2)"
+ shows "\<lbrakk>\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T; X\<sharp>\<Gamma>; X\<sharp>S\<^isub>1\<rbrakk>
+ \<Longrightarrow> T = Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T = (\<forall>X<:T\<^isub>1. T\<^isub>2) \<and> \<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1 \<and> ((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2)"
apply(frule subtype_implies_ok)
- apply(ind_cases "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T")
+ apply(ind_cases "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T")
apply(auto simp add: ty.inject alpha)
apply(rule_tac x="[(X,Xa)]\<bullet>T\<^isub>2" in exI)
apply(rule conjI)
@@ -509,18 +844,20 @@
apply(rule at_ds5[OF at_tyvrs_inst])
apply(rule conjI)
apply(simp add: pt_fresh_left[OF pt_tyvrs_inst, OF at_tyvrs_inst] calc_atm)
- apply(drule_tac \<Gamma>="((Xa,T\<^isub>1)#\<Gamma>)" in subtype_implies_closed)+
+ apply(drule_tac \<Gamma>="((TVarB Xa T\<^isub>1)#\<Gamma>)" in subtype_implies_closed)+
apply(simp add: closed_in_def)
apply(drule fresh_domain)+
apply(simp add: fresh_def)
- apply(subgoal_tac "X \<notin> (insert Xa (domain \<Gamma>))")(*A*)
+ apply(subgoal_tac "X \<notin> (insert Xa (ty_domain \<Gamma>))")(*A*)
apply(force)
- (*A*)apply(simp add: at_fin_set_supp[OF at_tyvrs_inst, OF finite_domain])
+ (*A*)apply(simp add: at_fin_set_supp[OF at_tyvrs_inst, OF finite_domains(1)])
(* 2nd conjunct *)apply(frule_tac X="X" in subtype_implies_fresh)
apply(assumption)
+ apply (frule_tac \<Gamma>="TVarB Xa T\<^isub>1 # \<Gamma>" in subtype_implies_ok)
+ apply (erule validE)
+ apply (simp add: valid_ty_domain_fresh)
apply(drule_tac X="Xa" in subtype_implies_fresh)
apply(assumption)
- apply(simp add: fresh_prod)
apply(drule_tac pi="[(X,Xa)]" in subtype_of.eqvt(2))
apply(simp add: calc_atm)
apply(simp add: pt_fresh_fresh[OF pt_tyvrs_inst, OF at_tyvrs_inst])
@@ -556,8 +893,8 @@
that of @{term x} the property @{term "P y"} holds. *}
lemma
- shows trans: "\<Gamma>\<turnstile>S<:Q \<Longrightarrow> \<Gamma>\<turnstile>Q<:T \<Longrightarrow> \<Gamma>\<turnstile>S<:T"
- and narrow: "(\<Delta>@[(X,Q)]@\<Gamma>)\<turnstile>M<:N \<Longrightarrow> \<Gamma>\<turnstile>P<:Q \<Longrightarrow> (\<Delta>@[(X,P)]@\<Gamma>)\<turnstile>M<:N"
+ shows subtype_transitivity: "\<Gamma>\<turnstile>S<:Q \<Longrightarrow> \<Gamma>\<turnstile>Q<:T \<Longrightarrow> \<Gamma>\<turnstile>S<:T"
+ and subtype_narrow: "(\<Delta>@[(TVarB X Q)]@\<Gamma>)\<turnstile>M<:N \<Longrightarrow> \<Gamma>\<turnstile>P<:Q \<Longrightarrow> (\<Delta>@[(TVarB X P)]@\<Gamma>)\<turnstile>M<:N"
proof (induct Q arbitrary: \<Gamma> S T \<Delta> X P M N taking: "size_ty" rule: measure_induct_rule)
case (less Q)
--{* \begin{minipage}[t]{0.9\textwidth}
@@ -566,8 +903,8 @@
have IH_trans:
"\<And>Q' \<Gamma> S T. \<lbrakk>size_ty Q' < size_ty Q; \<Gamma>\<turnstile>S<:Q'; \<Gamma>\<turnstile>Q'<:T\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>S<:T" by fact
have IH_narrow:
- "\<And>Q' \<Delta> \<Gamma> X M N P. \<lbrakk>size_ty Q' < size_ty Q; (\<Delta>@[(X,Q')]@\<Gamma>)\<turnstile>M<:N; \<Gamma>\<turnstile>P<:Q'\<rbrakk>
- \<Longrightarrow> (\<Delta>@[(X,P)]@\<Gamma>)\<turnstile>M<:N" by fact
+ "\<And>Q' \<Delta> \<Gamma> X M N P. \<lbrakk>size_ty Q' < size_ty Q; (\<Delta>@[(TVarB X Q')]@\<Gamma>)\<turnstile>M<:N; \<Gamma>\<turnstile>P<:Q'\<rbrakk>
+ \<Longrightarrow> (\<Delta>@[(TVarB X P)]@\<Gamma>)\<turnstile>M<:N" by fact
--{* \begin{minipage}[t]{0.9\textwidth}
We proceed with the transitivity proof as an auxiliary lemma, because it needs
to be referenced in the narrowing proof.\end{minipage}*}
@@ -579,37 +916,36 @@
and "\<Gamma>' \<turnstile> Q <: T" --{* right-hand derivation *}
thus "\<Gamma>' \<turnstile> S' <: T"
proof (nominal_induct \<Gamma>' S' Q\<equiv>Q rule: subtype_of.strong_induct)
- case (S_Top \<Gamma> S)
+ case (SA_Top \<Gamma> S)
--{* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma> \<turnstile> S <: Top"}, giving
us @{term "\<turnstile> \<Gamma> ok"} and @{term "S closed_in \<Gamma>"}. This case is straightforward,
because the right-hand derivation must be of the form @{term "\<Gamma> \<turnstile> Top <: Top"}
giving us the equation @{term "T = Top"}.\end{minipage}*}
hence rh_drv: "\<Gamma> \<turnstile> Top <: T" by simp
- hence T_inst: "T = Top" by (simp add: S_TopE)
- have "\<turnstile> \<Gamma> ok"
- and "S closed_in \<Gamma>" by fact+
- hence "\<Gamma> \<turnstile> S <: Top" by (simp add: subtype_of.S_Top)
+ hence T_inst: "T = Top" by (auto elim: S_TopE)
+ from `\<turnstile> \<Gamma> ok` and `S closed_in \<Gamma>`
+ have "\<Gamma> \<turnstile> S <: Top" by (simp add: subtype_of.SA_Top)
thus "\<Gamma> \<turnstile> S <: T" using T_inst by simp
next
- case (S_Var Y U \<Gamma>)
+ case (SA_trans_TVar Y U \<Gamma>)
-- {* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma> \<turnstile> Tvar Y <: Q"}
with @{term "S = Tvar Y"}. We have therefore @{term "(Y,U)"}
is in @{term "\<Gamma>"} and by inner induction hypothesis that @{term "\<Gamma> \<turnstile> U <: T"}.
By @{text "S_Var"} follows @{term "\<Gamma> \<turnstile> Tvar Y <: T"}.\end{minipage}*}
hence IH_inner: "\<Gamma> \<turnstile> U <: T" by simp
- have "(Y,U) \<in> set \<Gamma>" by fact
- with IH_inner show "\<Gamma> \<turnstile> Tvar Y <: T" by (simp add: subtype_of.S_Var)
+ have "(TVarB Y U) \<in> set \<Gamma>" by fact
+ with IH_inner show "\<Gamma> \<turnstile> Tvar Y <: T" by (simp add: subtype_of.SA_trans_TVar)
next
- case (S_Refl \<Gamma> X)
+ case (SA_refl_TVar \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma>\<turnstile>(Tvar X) <: (Tvar X)"} with
@{term "Q=Tvar X"}. The goal then follows immediately from the right-hand
derivation.\end{minipage}*}
thus "\<Gamma> \<turnstile> Tvar X <: T" by simp
next
- case (S_Arrow \<Gamma> Q\<^isub>1 S\<^isub>1 S\<^isub>2 Q\<^isub>2)
+ case (SA_arrow \<Gamma> Q\<^isub>1 S\<^isub>1 S\<^isub>2 Q\<^isub>2)
--{* \begin{minipage}[t]{0.9\textwidth}
In this case the left-hand derivation is @{term "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: Q\<^isub>1 \<rightarrow> Q\<^isub>2"} with
@{term "S\<^isub>1\<rightarrow>S\<^isub>2=S"} and @{term "Q\<^isub>1\<rightarrow>Q\<^isub>2=Q"}. We know that the @{text "size_ty"} of
@@ -629,7 +965,7 @@
have lh_drv_prm\<^isub>1: "\<Gamma> \<turnstile> Q\<^isub>1 <: S\<^isub>1" by fact
have lh_drv_prm\<^isub>2: "\<Gamma> \<turnstile> S\<^isub>2 <: Q\<^isub>2" by fact
from rh_drv have "T=Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T=T\<^isub>1\<rightarrow>T\<^isub>2 \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> \<Gamma>\<turnstile>Q\<^isub>2<:T\<^isub>2)"
- by (simp add: S_ArrowE_left)
+ by (auto elim: S_ArrowE_left)
moreover
have "S\<^isub>1 closed_in \<Gamma>" and "S\<^isub>2 closed_in \<Gamma>"
using lh_drv_prm\<^isub>1 lh_drv_prm\<^isub>2 by (simp_all add: subtype_implies_closed)
@@ -647,176 +983,1020 @@
moreover
from IH_trans[of "Q\<^isub>2"]
have "\<Gamma> \<turnstile> S\<^isub>2 <: T\<^isub>2" using Q\<^isub>1\<^isub>2_less rh_drv_prm\<^isub>2 lh_drv_prm\<^isub>2 by simp
- ultimately have "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by (simp add: subtype_of.S_Arrow)
+ ultimately have "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T\<^isub>1 \<rightarrow> T\<^isub>2" by (simp add: subtype_of.SA_arrow)
hence "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T" using T_inst by simp
}
ultimately show "\<Gamma> \<turnstile> S\<^isub>1 \<rightarrow> S\<^isub>2 <: T" by blast
next
- case (S_Forall \<Gamma> Q\<^isub>1 S\<^isub>1 X S\<^isub>2 Q\<^isub>2)
+ case (SA_all \<Gamma> Q\<^isub>1 S\<^isub>1 X S\<^isub>2 Q\<^isub>2)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{text "\<Gamma>\<turnstile>\<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:Q\<^isub>1].Q\<^isub>2"} with
- @{text "\<forall>[X<:S\<^isub>1].S\<^isub>2=S"} and @{text "\<forall>[X<:Q\<^isub>1].Q\<^isub>2=Q"}. We therefore have the sub-derivations
- @{term "\<Gamma>\<turnstile>Q\<^isub>1<:S\<^isub>1"} and @{term "((X,Q\<^isub>1)#\<Gamma>)\<turnstile>S\<^isub>2<:Q\<^isub>2"}. Since @{term "X"} is a binder, we
+ In this case the left-hand derivation is @{term "\<Gamma>\<turnstile>(\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:Q\<^isub>1. Q\<^isub>2)"} with
+ @{term "(\<forall>X<:S\<^isub>1. S\<^isub>2)=S"} and @{term "(\<forall>X<:Q\<^isub>1. Q\<^isub>2)=Q"}. We therefore have the sub-derivations
+ @{term "\<Gamma>\<turnstile>Q\<^isub>1<:S\<^isub>1"} and @{term "((TVarB X Q\<^isub>1)#\<Gamma>)\<turnstile>S\<^isub>2<:Q\<^isub>2"}. Since @{term "X"} is a binder, we
assume that it is sufficiently fresh; in particular we have the freshness conditions
@{term "X\<sharp>\<Gamma>"} and @{term "X\<sharp>Q\<^isub>1"} (these assumptions are provided by the strong
induction-rule @{text "subtype_of_induct"}). We know that the @{text "size_ty"} of
@{term Q\<^isub>1} and @{term Q\<^isub>2} is smaller than that of @{term Q};
so we can apply the outer induction hypotheses for @{term Q\<^isub>1} and @{term Q\<^isub>2}.
- The right-hand derivation is @{text "\<Gamma> \<turnstile> \<forall>[X<:Q\<^isub>1].Q\<^isub>2 <: T"}. Since @{term "X\<sharp>\<Gamma>"}
+ The right-hand derivation is @{term "\<Gamma> \<turnstile> (\<forall>X<:Q\<^isub>1. Q\<^isub>2) <: T"}. Since @{term "X\<sharp>\<Gamma>"}
and @{term "X\<sharp>Q\<^isub>1"} there exists types @{text "T\<^isub>1,T\<^isub>2"} such that
- @{text "T=Top \<or> T=\<forall>[X<:T\<^isub>1].T\<^isub>2"}. The @{term "Top"}-case is straightforward once we know
- @{text "(\<forall>[X<:S\<^isub>1].S\<^isub>2) closed_in \<Gamma>"} and @{term "\<turnstile> \<Gamma> ok"}. In the other case we have
- the sub-derivations @{term "\<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1"} and @{term "((X,T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"}. Using the outer
+ @{term "T=Top \<or> T=(\<forall>X<:T\<^isub>1. T\<^isub>2)"}. The @{term "Top"}-case is straightforward once we know
+ @{term "(\<forall>X<:S\<^isub>1. S\<^isub>2) closed_in \<Gamma>"} and @{term "\<turnstile> \<Gamma> ok"}. In the other case we have
+ the sub-derivations @{term "\<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1"} and @{term "((TVarB X T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"}. Using the outer
induction hypothesis for transitivity we can derive @{term "\<Gamma>\<turnstile>T\<^isub>1<:S\<^isub>1"}. From the outer
- induction for narrowing we get @{term "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2"} and then using again
- induction for transitivity we obtain @{term "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. By rule
+ induction for narrowing we get @{term "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2"} and then using again
+ induction for transitivity we obtain @{term "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. By rule
@{text "S_Forall"} and the freshness condition @{term "X\<sharp>\<Gamma>"} follows
- @{text "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2"}, which is @{text "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T\<^isub>"}.
+ @{term "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)"}, which is @{term "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T\<^isub>"}.
\end{minipage}*}
- hence rh_drv: "\<Gamma> \<turnstile> \<forall>[X<:Q\<^isub>1].Q\<^isub>2 <: T" by simp
+ hence rh_drv: "\<Gamma> \<turnstile> (\<forall>X<:Q\<^isub>1. Q\<^isub>2) <: T" by simp
have lh_drv_prm\<^isub>1: "\<Gamma> \<turnstile> Q\<^isub>1 <: S\<^isub>1" by fact
- have lh_drv_prm\<^isub>2: "((X,Q\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" by fact
- have "X\<sharp>\<Gamma>" by fact
+ have lh_drv_prm\<^isub>2: "((TVarB X Q\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" by fact
+ then have "X\<sharp>\<Gamma>" by (force dest: subtype_implies_ok simp add: valid_ty_domain_fresh)
then have fresh_cond: "X\<sharp>\<Gamma>" "X\<sharp>Q\<^isub>1" using lh_drv_prm\<^isub>1 by (simp_all add: subtype_implies_fresh)
- from `\<forall>[X<:Q\<^isub>1].Q\<^isub>2 = Q`
+ from `(\<forall>X<:Q\<^isub>1. Q\<^isub>2) = Q`
have Q\<^isub>1\<^isub>2_less: "size_ty Q\<^isub>1 < size_ty Q" "size_ty Q\<^isub>2 < size_ty Q " using fresh_cond by auto
from rh_drv
- have "T=Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T=\<forall>[X<:T\<^isub>1].T\<^isub>2 \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((X,T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2)"
+ have "T=Top \<or> (\<exists>T\<^isub>1 T\<^isub>2. T=(\<forall>X<:T\<^isub>1. T\<^isub>2) \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((TVarB X T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2)"
using fresh_cond by (simp add: S_ForallE_left)
moreover
- have "S\<^isub>1 closed_in \<Gamma>" and "S\<^isub>2 closed_in ((X,Q\<^isub>1)#\<Gamma>)"
+ have "S\<^isub>1 closed_in \<Gamma>" and "S\<^isub>2 closed_in ((TVarB X Q\<^isub>1)#\<Gamma>)"
using lh_drv_prm\<^isub>1 lh_drv_prm\<^isub>2 by (simp_all add: subtype_implies_closed)
- hence "(\<forall>[X<:S\<^isub>1].S\<^isub>2) closed_in \<Gamma>" by (force simp add: closed_in_def ty.supp abs_supp)
+ hence "(\<forall>X<:S\<^isub>1. S\<^isub>2) closed_in \<Gamma>" by (force simp add: closed_in_def ty.supp abs_supp)
moreover
have "\<turnstile> \<Gamma> ok" using rh_drv by (rule subtype_implies_ok)
moreover
- { assume "\<exists>T\<^isub>1 T\<^isub>2. T=\<forall>[X<:T\<^isub>1].T\<^isub>2 \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((X,T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"
+ { assume "\<exists>T\<^isub>1 T\<^isub>2. T=(\<forall>X<:T\<^isub>1. T\<^isub>2) \<and> \<Gamma>\<turnstile>T\<^isub>1<:Q\<^isub>1 \<and> ((TVarB X T\<^isub>1)#\<Gamma>)\<turnstile>Q\<^isub>2<:T\<^isub>2"
then obtain T\<^isub>1 T\<^isub>2
- where T_inst: "T = \<forall>[X<:T\<^isub>1].T\<^isub>2"
+ where T_inst: "T = (\<forall>X<:T\<^isub>1. T\<^isub>2)"
and rh_drv_prm\<^isub>1: "\<Gamma> \<turnstile> T\<^isub>1 <: Q\<^isub>1"
- and rh_drv_prm\<^isub>2:"((X,T\<^isub>1)#\<Gamma>) \<turnstile> Q\<^isub>2 <: T\<^isub>2" by force
+ and rh_drv_prm\<^isub>2:"((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> Q\<^isub>2 <: T\<^isub>2" by force
from IH_trans[of "Q\<^isub>1"]
have "\<Gamma> \<turnstile> T\<^isub>1 <: S\<^isub>1" using lh_drv_prm\<^isub>1 rh_drv_prm\<^isub>1 Q\<^isub>1\<^isub>2_less by blast
moreover
from IH_narrow[of "Q\<^isub>1" "[]"]
- have "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" using Q\<^isub>1\<^isub>2_less lh_drv_prm\<^isub>2 rh_drv_prm\<^isub>1 by simp
+ have "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: Q\<^isub>2" using Q\<^isub>1\<^isub>2_less lh_drv_prm\<^isub>2 rh_drv_prm\<^isub>1 by simp
with IH_trans[of "Q\<^isub>2"]
- have "((X,T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using Q\<^isub>1\<^isub>2_less rh_drv_prm\<^isub>2 by simp
- ultimately have "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: \<forall>[X<:T\<^isub>1].T\<^isub>2"
- using fresh_cond by (simp add: subtype_of.S_Forall)
- hence "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T" using T_inst by simp
+ have "((TVarB X T\<^isub>1)#\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2" using Q\<^isub>1\<^isub>2_less rh_drv_prm\<^isub>2 by simp
+ ultimately have "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: (\<forall>X<:T\<^isub>1. T\<^isub>2)"
+ using fresh_cond by (simp add: subtype_of.SA_all)
+ hence "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T" using T_inst by simp
}
- ultimately show "\<Gamma> \<turnstile> \<forall>[X<:S\<^isub>1].S\<^isub>2 <: T" by blast
+ ultimately show "\<Gamma> \<turnstile> (\<forall>X<:S\<^isub>1. S\<^isub>2) <: T" by blast
qed
qed
{ --{* The transitivity proof is now by the auxiliary lemma. *}
case 1
- have "\<Gamma> \<turnstile> S <: Q"
- and "\<Gamma> \<turnstile> Q <: T" by fact+
- thus "\<Gamma> \<turnstile> S <: T" by (rule transitivity_aux)
+ from `\<Gamma> \<turnstile> S <: Q` and `\<Gamma> \<turnstile> Q <: T`
+ show "\<Gamma> \<turnstile> S <: T" by (rule transitivity_aux)
next
- --{* The narrowing proof proceeds by an induction over @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> M <: N"}. *}
+ --{* The narrowing proof proceeds by an induction over @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> M <: N"}. *}
case 2
- have "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> M <: N" --{* left-hand derivation *}
- and "\<Gamma> \<turnstile> P<:Q" by fact+ --{* right-hand derivation *}
- thus "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> M <: N"
- proof (nominal_induct \<Gamma>\<equiv>"\<Delta>@[(X,Q)]@\<Gamma>" M N avoiding: \<Delta> \<Gamma> X rule: subtype_of.strong_induct)
- case (S_Top _ S \<Delta> \<Gamma> X)
+ from `(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> M <: N` --{* left-hand derivation *}
+ and `\<Gamma> \<turnstile> P<:Q` --{* right-hand derivation *}
+ show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> M <: N"
+ proof (nominal_induct \<Gamma>\<equiv>"\<Delta>@[(TVarB X Q)]@\<Gamma>" M N avoiding: \<Delta> \<Gamma> X rule: subtype_of.strong_induct)
+ case (SA_Top _ S \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> S <: Top"}. We show
- that the context @{term "\<Delta>@[(X,P)]@\<Gamma>"} is ok and that @{term S} is closed in
- @{term "\<Delta>@[(X,P)]@\<Gamma>"}. Then we can apply the @{text "S_Top"}-rule.\end{minipage}*}
- hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(X,Q)]@\<Gamma>) ok"
- and lh_drv_prm\<^isub>2: "S closed_in (\<Delta>@[(X,Q)]@\<Gamma>)" by simp_all
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> S <: Top"}. We show
+ that the context @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} is ok and that @{term S} is closed in
+ @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"}. Then we can apply the @{text "S_Top"}-rule.\end{minipage}*}
+ hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok"
+ and lh_drv_prm\<^isub>2: "S closed_in (\<Delta>@[(TVarB X Q)]@\<Gamma>)" by simp_all
have rh_drv: "\<Gamma> \<turnstile> P <: Q" by fact
hence "P closed_in \<Gamma>" by (simp add: subtype_implies_closed)
- with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(X,P)]@\<Gamma>) ok" by (simp add: replace_type)
+ with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok" by (simp add: replace_type)
moreover
- from lh_drv_prm\<^isub>2 have "S closed_in (\<Delta>@[(X,P)]@\<Gamma>)"
- by (simp add: closed_in_def domain_append)
- ultimately show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S <: Top" by (simp add: subtype_of.S_Top)
+ from lh_drv_prm\<^isub>2 have "S closed_in (\<Delta>@[(TVarB X P)]@\<Gamma>)"
+ by (simp add: closed_in_def domains_append)
+ ultimately show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: Top" by (simp add: subtype_of.SA_Top)
next
- case (S_Var Y S _ N \<Delta> \<Gamma> X)
+ case (SA_trans_TVar Y S _ N \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> Tvar Y <: N"} and
- by inner induction hypothesis we have @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S <: N"}. We therefore
- know that the contexts @{term "\<Delta>@[(X,Q)]@\<Gamma>"} and @{term "\<Delta>@[(X,P)]@\<Gamma>"} are ok, and that
- @{term "(Y,S)"} is in @{term "\<Delta>@[(X,Q)]@\<Gamma>"}. We need to show that
- @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N"} holds. In case @{term "X\<noteq>Y"} we know that
- @{term "(Y,S)"} is in @{term "\<Delta>@[(X,P)]@\<Gamma>"} and can use the inner induction hypothesis
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> Tvar Y <: N"} and
+ by inner induction hypothesis we have @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: N"}. We therefore
+ know that the contexts @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"} and @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} are ok, and that
+ @{term "(Y,S)"} is in @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"}. We need to show that
+ @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N"} holds. In case @{term "X\<noteq>Y"} we know that
+ @{term "(Y,S)"} is in @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} and can use the inner induction hypothesis
and rule @{text "S_Var"} to conclude. In case @{term "X=Y"} we can infer that
- @{term "S=Q"}; moreover we have that @{term "(\<Delta>@[(X,P)]@\<Gamma>) extends \<Gamma>"} and therefore
- by @{text "weakening"} that @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: Q"} holds. By transitivity we
- obtain then @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: N"} and can conclude by applying rule
+ @{term "S=Q"}; moreover we have that @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) extends \<Gamma>"} and therefore
+ by @{text "weakening"} that @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: Q"} holds. By transitivity we
+ obtain then @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: N"} and can conclude by applying rule
@{text "S_Var"}.\end{minipage}*}
- hence IH_inner: "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S <: N"
- and lh_drv_prm: "(Y,S) \<in> set (\<Delta>@[(X,Q)]@\<Gamma>)"
+ hence IH_inner: "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S <: N"
+ and lh_drv_prm: "(TVarB Y S) \<in> set (\<Delta>@[(TVarB X Q)]@\<Gamma>)"
and rh_drv: "\<Gamma> \<turnstile> P<:Q"
- and ok\<^isub>Q: "\<turnstile> (\<Delta>@[(X,Q)]@\<Gamma>) ok" by (simp_all add: subtype_implies_ok)
- hence ok\<^isub>P: "\<turnstile> (\<Delta>@[(X,P)]@\<Gamma>) ok" by (simp add: subtype_implies_ok)
- show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N"
+ and ok\<^isub>Q: "\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok" by (simp_all add: subtype_implies_ok)
+ hence ok\<^isub>P: "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok" by (simp add: subtype_implies_ok)
+ show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N"
proof (cases "X=Y")
case False
have "X\<noteq>Y" by fact
- hence "(Y,S)\<in>set (\<Delta>@[(X,P)]@\<Gamma>)" using lh_drv_prm by simp
- with IH_inner show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N" by (simp add: subtype_of.S_Var)
+ hence "(TVarB Y S)\<in>set (\<Delta>@[(TVarB X P)]@\<Gamma>)" using lh_drv_prm by (simp add:binding.inject)
+ with IH_inner show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N" by (simp add: subtype_of.SA_trans_TVar)
next
case True
- have memb\<^isub>X\<^isub>Q: "(X,Q)\<in>set (\<Delta>@[(X,Q)]@\<Gamma>)" by simp
- have memb\<^isub>X\<^isub>P: "(X,P)\<in>set (\<Delta>@[(X,P)]@\<Gamma>)" by simp
+ have memb\<^isub>X\<^isub>Q: "(TVarB X Q)\<in>set (\<Delta>@[(TVarB X Q)]@\<Gamma>)" by simp
+ have memb\<^isub>X\<^isub>P: "(TVarB X P)\<in>set (\<Delta>@[(TVarB X P)]@\<Gamma>)" by simp
have eq: "X=Y" by fact
hence "S=Q" using ok\<^isub>Q lh_drv_prm memb\<^isub>X\<^isub>Q by (simp only: uniqueness_of_ctxt)
- hence "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Q <: N" using IH_inner by simp
+ hence "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Q <: N" using IH_inner by simp
moreover
- have "(\<Delta>@[(X,P)]@\<Gamma>) extends \<Gamma>" by (simp add: extends_def)
- hence "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: Q" using rh_drv ok\<^isub>P by (simp only: weakening)
- ultimately have "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> P <: N" by (simp add: transitivity_aux)
- thus "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: N" using memb\<^isub>X\<^isub>P eq by (simp only: subtype_of.S_Var)
+ have "(\<Delta>@[(TVarB X P)]@\<Gamma>) extends \<Gamma>" by (simp add: extends_def)
+ hence "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: Q" using rh_drv ok\<^isub>P by (simp only: weakening)
+ ultimately have "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> P <: N" by (simp add: transitivity_aux)
+ thus "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: N" using memb\<^isub>X\<^isub>P eq by (simp only: subtype_of.SA_trans_TVar)
qed
next
- case (S_Refl _ Y \<Delta> \<Gamma> X)
+ case (SA_refl_TVar _ Y \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y"} and we
- therefore know that @{term "\<Delta>@[(X,Q)]@\<Gamma>"} is ok and that @{term "Y"} is in
- the domain of @{term "\<Delta>@[(X,Q)]@\<Gamma>"}. We therefore know that @{term "\<Delta>@[(X,P)]@\<Gamma>"} is ok
- and that @{term Y} is in the domain of @{term "\<Delta>@[(X,P)]@\<Gamma>"}. We can conclude by applying
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y"} and we
+ therefore know that @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"} is ok and that @{term "Y"} is in
+ the domain of @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"}. We therefore know that @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"} is ok
+ and that @{term Y} is in the domain of @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"}. We can conclude by applying
rule @{text "S_Refl"}.\end{minipage}*}
- hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(X,Q)]@\<Gamma>) ok"
- and lh_drv_prm\<^isub>2: "Y \<in> domain (\<Delta>@[(X,Q)]@\<Gamma>)" by simp_all
+ hence lh_drv_prm\<^isub>1: "\<turnstile> (\<Delta>@[(TVarB X Q)]@\<Gamma>) ok"
+ and lh_drv_prm\<^isub>2: "Y \<in> ty_domain (\<Delta>@[(TVarB X Q)]@\<Gamma>)" by simp_all
have "\<Gamma> \<turnstile> P <: Q" by fact
hence "P closed_in \<Gamma>" by (simp add: subtype_implies_closed)
- with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(X,P)]@\<Gamma>) ok" by (simp add: replace_type)
+ with lh_drv_prm\<^isub>1 have "\<turnstile> (\<Delta>@[(TVarB X P)]@\<Gamma>) ok" by (simp add: replace_type)
moreover
- from lh_drv_prm\<^isub>2 have "Y \<in> domain (\<Delta>@[(X,P)]@\<Gamma>)" by (simp add: domain_append)
- ultimately show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y" by (simp add: subtype_of.S_Refl)
+ from lh_drv_prm\<^isub>2 have "Y \<in> ty_domain (\<Delta>@[(TVarB X P)]@\<Gamma>)" by (simp add: domains_append)
+ ultimately show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Tvar Y <: Tvar Y" by (simp add: subtype_of.SA_refl_TVar)
next
- case (S_Arrow _ S\<^isub>1 Q\<^isub>1 Q\<^isub>2 S\<^isub>2 \<Delta> \<Gamma> X)
+ case (SA_arrow _ S\<^isub>1 Q\<^isub>1 Q\<^isub>2 S\<^isub>2 \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{term "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2"}
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2"}
and the proof is trivial.\end{minipage}*}
- thus "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2" by blast
+ thus "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> Q\<^isub>1 \<rightarrow> Q\<^isub>2 <: S\<^isub>1 \<rightarrow> S\<^isub>2" by blast
next
- case (S_Forall _ T\<^isub>1 S\<^isub>1 Y S\<^isub>2 T\<^isub>2 \<Delta> \<Gamma> X)
+ case (SA_all \<Gamma>' T\<^isub>1 S\<^isub>1 Y S\<^isub>2 T\<^isub>2 \<Delta> \<Gamma> X)
--{* \begin{minipage}[t]{0.9\textwidth}
- In this case the left-hand derivation is @{text "(\<Delta>@[(X,Q)]@\<Gamma>) \<turnstile> \<forall>[Y<:S\<^isub>1].S\<^isub>2 <: \<forall>[Y<:T\<^isub>1].T\<^isub>2"}
- and therfore we know that the binder @{term Y} is fresh for @{term "\<Delta>@[(X,Q)]@\<Gamma>"}. By
- the inner induction hypothesis we have that @{term "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"} and
- @{term "((Y,T\<^isub>1)#\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. Since @{term P} is a subtype of @{term Q}
+ In this case the left-hand derivation is @{term "(\<Delta>@[(TVarB X Q)]@\<Gamma>) \<turnstile> (\<forall>Y<:S\<^isub>1. S\<^isub>2) <: (\<forall>Y<:T\<^isub>1. T\<^isub>2)"}
+ and therfore we know that the binder @{term Y} is fresh for @{term "\<Delta>@[(TVarB X Q)]@\<Gamma>"}. By
+ the inner induction hypothesis we have that @{term "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"} and
+ @{term "((TVarB Y T\<^isub>1)#\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"}. Since @{term P} is a subtype of @{term Q}
we can infer that @{term Y} is fresh for @{term P} and thus also fresh for
- @{term "\<Delta>@[(X,P)]@\<Gamma>"}. We can then conclude by applying rule @{text "S_Forall"}.
+ @{term "\<Delta>@[(TVarB X P)]@\<Gamma>"}. We can then conclude by applying rule @{text "S_Forall"}.
\end{minipage}*}
- hence IH_inner\<^isub>1: "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"
- and IH_inner\<^isub>2: "((Y,T\<^isub>1)#\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> S\<^isub>2 <: T\<^isub>2"
- and lh_drv_prm: "Y\<sharp>(\<Delta>@[(X,Q)]@\<Gamma>)" by force+
- have rh_drv: "\<Gamma> \<turnstile> P <: Q" by fact
- hence "Y\<sharp>P" using lh_drv_prm by (simp only: fresh_list_append subtype_implies_fresh)
- hence "Y\<sharp>(\<Delta>@[(X,P)]@\<Gamma>)" using lh_drv_prm
- by (simp add: fresh_list_append fresh_list_cons fresh_prod)
+ hence rh_drv: "\<Gamma> \<turnstile> P <: Q"
+ and IH_inner\<^isub>1: "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> T\<^isub>1 <: S\<^isub>1"
+ and "TVarB Y T\<^isub>1 # \<Gamma>' = ((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X Q] @ \<Gamma>" by auto
+ moreover have " \<lbrakk>\<Gamma>\<turnstile>P<:Q; TVarB Y T\<^isub>1 # \<Gamma>' = ((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X Q] @ \<Gamma>\<rbrakk> \<Longrightarrow> (((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X P] @ \<Gamma>)\<turnstile>S\<^isub>2<:T\<^isub>2" by fact
+ ultimately have IH_inner\<^isub>2: "(((TVarB Y T\<^isub>1)#\<Delta>) @ [TVarB X P] @ \<Gamma>)\<turnstile>S\<^isub>2<:T\<^isub>2" by auto
with IH_inner\<^isub>1 IH_inner\<^isub>2
- show "(\<Delta>@[(X,P)]@\<Gamma>) \<turnstile> \<forall>[Y<:S\<^isub>1].S\<^isub>2 <: \<forall>[Y<:T\<^isub>1].T\<^isub>2" by (simp add: subtype_of.S_Forall)
+ show "(\<Delta>@[(TVarB X P)]@\<Gamma>) \<turnstile> (\<forall>Y<:S\<^isub>1. S\<^isub>2) <: (\<forall>Y<:T\<^isub>1. T\<^isub>2)" by (simp add: subtype_of.SA_all)
qed
}
qed
-end
\ No newline at end of file
+section {* Typing *}
+
+inductive
+ typing :: "env \<Rightarrow> trm \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _" [60,60,60] 60)
+where
+ T_Var[intro]: "\<lbrakk> VarB x T \<in> set \<Gamma>; \<turnstile> \<Gamma> ok \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Var x : T"
+| T_App[intro]: "\<lbrakk> \<Gamma> \<turnstile> t\<^isub>1 : T\<^isub>1 \<rightarrow> T\<^isub>2; \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t\<^isub>1 \<cdot> t\<^isub>2 : T\<^isub>2"
+| T_Abs[intro]: "\<lbrakk> VarB x T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (\<lambda>x:T\<^isub>1. t\<^isub>2) : T\<^isub>1 \<rightarrow> T\<^isub>2"
+| T_Sub[intro]: "\<lbrakk> \<Gamma> \<turnstile> t : S; \<Gamma> \<turnstile> S <: T \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t : T"
+| T_TAbs[intro]:"\<lbrakk> TVarB X T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> (\<lambda>X<:T\<^isub>1. t\<^isub>2) : (\<forall>X<:T\<^isub>1. T\<^isub>2)"
+| T_TApp[intro]:"\<lbrakk> X \<sharp> (\<Gamma>, t\<^isub>1, T\<^isub>2); \<Gamma> \<turnstile> t\<^isub>1 : (\<forall>X<:T\<^isub>1\<^isub>1. T\<^isub>1\<^isub>2); \<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>1\<^isub>1 \<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2 : (T\<^isub>1\<^isub>2[X \<mapsto> T\<^isub>2]\<^sub>\<tau>)"
+
+equivariance typing
+
+lemma better_T_TApp:
+ assumes H1: "\<Gamma> \<turnstile> t\<^isub>1 : (\<forall>X<:T11. T12)"
+ and H2: "\<Gamma> \<turnstile> T2 <: T11"
+ shows "\<Gamma> \<turnstile> t\<^isub>1 \<cdot>\<^sub>\<tau> T2 : (T12[X \<mapsto> T2]\<^sub>\<tau>)"
+proof -
+ obtain Y::tyvrs where Y: "Y \<sharp> (X, T12, \<Gamma>, t\<^isub>1, T2)"
+ by (rule exists_fresh) (rule fin_supp)
+ then have "Y \<sharp> (\<Gamma>, t\<^isub>1, T2)" by simp
+ moreover from Y have "(\<forall>X<:T11. T12) = (\<forall>Y<:T11. [(Y, X)] \<bullet> T12)"
+ by (auto simp add: ty.inject alpha' fresh_prod fresh_atm)
+ with H1 have "\<Gamma> \<turnstile> t\<^isub>1 : (\<forall>Y<:T11. [(Y, X)] \<bullet> T12)" by simp
+ ultimately have "\<Gamma> \<turnstile> t\<^isub>1 \<cdot>\<^sub>\<tau> T2 : (([(Y, X)] \<bullet> T12)[Y \<mapsto> T2]\<^sub>\<tau>)" using H2
+ by (rule T_TApp)
+ with Y show ?thesis by (simp add: type_subst_rename)
+qed
+
+lemma typing_ok:
+ assumes "\<Gamma> \<turnstile> t : T"
+ shows "\<turnstile> \<Gamma> ok"
+using assms by (induct, auto)
+
+nominal_inductive typing
+ by (auto dest!: typing_ok intro: closed_in_fresh fresh_domain
+ simp: abs_fresh fresh_prod fresh_atm freshs valid_ty_domain_fresh fresh_trm_domain)
+
+lemma ok_imp_VarB_closed_in:
+ assumes ok: "\<turnstile> \<Gamma> ok"
+ shows "VarB x T \<in> set \<Gamma> \<Longrightarrow> T closed_in \<Gamma>" using ok
+ by induct (auto simp add: binding.inject closed_in_def)
+
+lemma tyvrs_of_subst: "tyvrs_of (B[X \<mapsto> T]\<^sub>b) = tyvrs_of B"
+ by (nominal_induct B rule: binding.strong_induct) simp_all
+
+lemma ty_domain_subst: "ty_domain (\<Gamma>[X \<mapsto> T]\<^sub>e) = ty_domain \<Gamma>"
+ by (induct \<Gamma>) (simp_all add: tyvrs_of_subst)
+
+lemma vrs_of_subst: "vrs_of (B[X \<mapsto> T]\<^sub>b) = vrs_of B"
+ by (nominal_induct B rule: binding.strong_induct) simp_all
+
+lemma trm_domain_subst: "trm_domain (\<Gamma>[X \<mapsto> T]\<^sub>e) = trm_domain \<Gamma>"
+ by (induct \<Gamma>) (simp_all add: vrs_of_subst)
+
+lemma subst_closed_in:
+ "T closed_in (\<Delta> @ TVarB X S # \<Gamma>) \<Longrightarrow> U closed_in \<Gamma> \<Longrightarrow> T[X \<mapsto> U]\<^sub>\<tau> closed_in (\<Delta>[X \<mapsto> U]\<^sub>e @ \<Gamma>)"
+ apply (nominal_induct T avoiding: X U \<Gamma> rule: ty.strong_induct)
+ apply (simp add: closed_in_def ty.supp supp_atm domains_append ty_domain_subst)
+ apply blast
+ apply (simp add: closed_in_def ty.supp)
+ apply (simp add: closed_in_def ty.supp)
+ apply (simp add: closed_in_def ty.supp abs_supp)
+ apply (drule_tac x = X in meta_spec)
+ apply (drule_tac x = U in meta_spec)
+ apply (drule_tac x = "(TVarB tyvrs ty2) # \<Gamma>" in meta_spec)
+ apply (simp add: domains_append ty_domain_subst)
+ apply blast
+ done
+
+lemmas subst_closed_in' = subst_closed_in [where \<Delta>="[]", simplified]
+
+lemma typing_closed_in:
+ assumes "\<Gamma> \<turnstile> t : T"
+ shows "T closed_in \<Gamma>"
+using assms
+proof induct
+ case (T_Var x T \<Gamma>)
+ from `\<turnstile> \<Gamma> ok` and `VarB x T \<in> set \<Gamma>`
+ show ?case by (rule ok_imp_VarB_closed_in)
+next
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1 T\<^isub>2 t\<^isub>2)
+ then show ?case by (auto simp add: ty.supp closed_in_def)
+next
+ case (T_Abs x T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `VarB x T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2`
+ have "T\<^isub>1 closed_in \<Gamma>" by (auto dest: typing_ok)
+ with T_Abs show ?case by (auto simp add: ty.supp closed_in_def)
+next
+ case (T_Sub \<Gamma> t S T)
+ from `\<Gamma> \<turnstile> S <: T` show ?case by (simp add: subtype_implies_closed)
+next
+ case (T_TAbs X T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `TVarB X T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2`
+ have "T\<^isub>1 closed_in \<Gamma>" by (auto dest: typing_ok)
+ with T_TAbs show ?case by (auto simp add: ty.supp closed_in_def abs_supp)
+next
+ case (T_TApp X \<Gamma> t\<^isub>1 T2 T11 T12)
+ then have "T12 closed_in (TVarB X T11 # \<Gamma>)"
+ by (auto simp add: closed_in_def ty.supp abs_supp)
+ moreover from T_TApp have "T2 closed_in \<Gamma>"
+ by (simp add: subtype_implies_closed)
+ ultimately show ?case by (rule subst_closed_in')
+qed
+
+
+subsection {* Evaluation *}
+
+inductive
+ val :: "trm \<Rightarrow> bool"
+where
+ Abs[intro]: "val (\<lambda>x:T. t)"
+| TAbs[intro]: "val (\<lambda>X<:T. t)"
+
+equivariance val
+
+inductive_cases val_inv_auto[elim]:
+ "val (Var x)"
+ "val (t1 \<cdot> t2)"
+ "val (t1 \<cdot>\<^sub>\<tau> t2)"
+
+inductive
+ eval :: "trm \<Rightarrow> trm \<Rightarrow> bool" ("_ \<longmapsto> _" [60,60] 60)
+where
+ E_Abs : "\<lbrakk> x \<sharp> v\<^isub>2; val v\<^isub>2 \<rbrakk> \<Longrightarrow> (\<lambda>x:T\<^isub>1\<^isub>1. t\<^isub>1\<^isub>2) \<cdot> v\<^isub>2 \<longmapsto> t\<^isub>1\<^isub>2[x \<mapsto> v\<^isub>2]"
+| E_App1 [intro]: "t \<longmapsto> t' \<Longrightarrow> t \<cdot> u \<longmapsto> t' \<cdot> u"
+| E_App2 [intro]: "\<lbrakk> val v; t \<longmapsto> t' \<rbrakk> \<Longrightarrow> v \<cdot> t \<longmapsto> v \<cdot> t'"
+| E_TAbs : "X \<sharp> (T\<^isub>1\<^isub>1, T\<^isub>2) \<Longrightarrow> (\<lambda>X<:T\<^isub>1\<^isub>1. t\<^isub>1\<^isub>2) \<cdot>\<^sub>\<tau> T\<^isub>2 \<longmapsto> t\<^isub>1\<^isub>2[X \<mapsto>\<^sub>\<tau> T\<^isub>2]"
+| E_TApp [intro]: "t \<longmapsto> t' \<Longrightarrow> t \<cdot>\<^sub>\<tau> T \<longmapsto> t' \<cdot>\<^sub>\<tau> T"
+
+lemma better_E_Abs[intro]:
+ assumes H: "val v2"
+ shows "(\<lambda>x:T11. t12) \<cdot> v2 \<longmapsto> t12[x \<mapsto> v2]"
+proof -
+ obtain y::vrs where y: "y \<sharp> (x, t12, v2)" by (rule exists_fresh) (rule fin_supp)
+ then have "y \<sharp> v2" by simp
+ then have "(\<lambda>y:T11. [(y, x)] \<bullet> t12) \<cdot> v2 \<longmapsto> ([(y, x)] \<bullet> t12)[y \<mapsto> v2]" using H
+ by (rule E_Abs)
+ moreover from y have "(\<lambda>x:T11. t12) \<cdot> v2 = (\<lambda>y:T11. [(y, x)] \<bullet> t12) \<cdot> v2"
+ by (auto simp add: trm.inject alpha' fresh_prod fresh_atm)
+ ultimately have "(\<lambda>x:T11. t12) \<cdot> v2 \<longmapsto> ([(y, x)] \<bullet> t12)[y \<mapsto> v2]"
+ by simp
+ with y show ?thesis by (simp add: subst_trm_rename)
+qed
+
+lemma better_E_TAbs[intro]: "(\<lambda>X<:T11. t12) \<cdot>\<^sub>\<tau> T2 \<longmapsto> t12[X \<mapsto>\<^sub>\<tau> T2]"
+proof -
+ obtain Y::tyvrs where Y: "Y \<sharp> (X, t12, T11, T2)" by (rule exists_fresh) (rule fin_supp)
+ then have "Y \<sharp> (T11, T2)" by simp
+ then have "(\<lambda>Y<:T11. [(Y, X)] \<bullet> t12) \<cdot>\<^sub>\<tau> T2 \<longmapsto> ([(Y, X)] \<bullet> t12)[Y \<mapsto>\<^sub>\<tau> T2]"
+ by (rule E_TAbs)
+ moreover from Y have "(\<lambda>X<:T11. t12) \<cdot>\<^sub>\<tau> T2 = (\<lambda>Y<:T11. [(Y, X)] \<bullet> t12) \<cdot>\<^sub>\<tau> T2"
+ by (auto simp add: trm.inject alpha' fresh_prod fresh_atm)
+ ultimately have "(\<lambda>X<:T11. t12) \<cdot>\<^sub>\<tau> T2 \<longmapsto> ([(Y, X)] \<bullet> t12)[Y \<mapsto>\<^sub>\<tau> T2]"
+ by simp
+ with Y show ?thesis by (simp add: subst_trm_ty_rename)
+qed
+
+equivariance eval
+
+nominal_inductive eval
+ by (simp_all add: abs_fresh ty_vrs_fresh subst_trm_fresh_tyvar
+ subst_trm_fresh_var subst_trm_ty_fresh')
+
+inductive_cases eval_inv_auto[elim]:
+ "Var x \<longmapsto> t'"
+ "(\<lambda>x:T. t) \<longmapsto> t'"
+ "(\<lambda>X<:T. t) \<longmapsto> t'"
+
+lemma ty_domain_cons:
+ shows "ty_domain (\<Gamma>@[VarB X Q]@\<Delta>) = ty_domain (\<Gamma>@\<Delta>)"
+by (induct \<Gamma>, auto)
+
+lemma closed_in_cons:
+ assumes "S closed_in (\<Gamma> @ VarB X Q # \<Delta>)"
+ shows "S closed_in (\<Gamma>@\<Delta>)"
+using assms ty_domain_cons closed_in_def by auto
+
+lemma closed_in_weaken: "T closed_in (\<Delta> @ \<Gamma>) \<Longrightarrow> T closed_in (\<Delta> @ B # \<Gamma>)"
+ by (auto simp add: closed_in_def domains_append)
+
+lemma closed_in_weaken': "T closed_in \<Gamma> \<Longrightarrow> T closed_in (\<Delta> @ \<Gamma>)"
+ by (auto simp add: closed_in_def domains_append)
+
+lemma valid_subst:
+ assumes ok: "\<turnstile> (\<Delta> @ TVarB X Q # \<Gamma>) ok"
+ and closed: "P closed_in \<Gamma>"
+ shows "\<turnstile> (\<Delta>[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" using ok closed
+ apply (induct \<Delta>)
+ apply simp_all
+ apply (erule validE)
+ apply assumption
+ apply (erule validE)
+ apply simp
+ apply (rule valid_consT)
+ apply assumption
+ apply (simp add: domains_append ty_domain_subst)
+ apply (simp add: fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst] finite_domains)
+ apply (rule_tac S=Q in subst_closed_in')
+ apply (simp add: closed_in_def domains_append ty_domain_subst)
+ apply (simp add: closed_in_def domains_append)
+ apply blast
+ apply simp
+ apply (rule valid_cons)
+ apply assumption
+ apply (simp add: domains_append trm_domain_subst)
+ apply (rule_tac S=Q in subst_closed_in')
+ apply (simp add: closed_in_def domains_append ty_domain_subst)
+ apply (simp add: closed_in_def domains_append)
+ apply blast
+ done
+
+lemma ty_domain_vrs:
+ shows "ty_domain (G @ [VarB x Q] @ D) = ty_domain (G @ D)"
+by (induct G, auto)
+
+lemma valid_cons':
+ assumes "\<turnstile> (\<Gamma> @ VarB x Q # \<Delta>) ok"
+ shows "\<turnstile> (\<Gamma> @ \<Delta>) ok"
+ using assms
+proof (induct \<Gamma>' \<equiv> "\<Gamma> @ VarB x Q # \<Delta>" arbitrary: \<Gamma> \<Delta>)
+ case valid_nil
+ have "[] = \<Gamma> @ VarB x Q # \<Delta>" by fact
+ then have "False" by auto
+ then show ?case by auto
+next
+ case (valid_consT G X T)
+ then show ?case
+ proof (cases \<Gamma>)
+ case Nil
+ with valid_consT show ?thesis by simp
+ next
+ case (Cons b bs)
+ with valid_consT
+ have "\<turnstile> (bs @ \<Delta>) ok" by simp
+ moreover from Cons and valid_consT have "X \<sharp> ty_domain (bs @ \<Delta>)"
+ by (simp add: domains_append)
+ moreover from Cons and valid_consT have "T closed_in (bs @ \<Delta>)"
+ by (simp add: closed_in_def domains_append)
+ ultimately have "\<turnstile> (TVarB X T # bs @ \<Delta>) ok"
+ by (rule valid_rel.valid_consT)
+ with Cons and valid_consT show ?thesis by simp
+ qed
+next
+ case (valid_cons G x T)
+ then show ?case
+ proof (cases \<Gamma>)
+ case Nil
+ with valid_cons show ?thesis by simp
+ next
+ case (Cons b bs)
+ with valid_cons
+ have "\<turnstile> (bs @ \<Delta>) ok" by simp
+ moreover from Cons and valid_cons have "x \<sharp> trm_domain (bs @ \<Delta>)"
+ by (simp add: domains_append finite_domains
+ fresh_fin_insert [OF pt_vrs_inst at_vrs_inst fs_vrs_inst])
+ moreover from Cons and valid_cons have "T closed_in (bs @ \<Delta>)"
+ by (simp add: closed_in_def domains_append)
+ ultimately have "\<turnstile> (VarB x T # bs @ \<Delta>) ok"
+ by (rule valid_rel.valid_cons)
+ with Cons and valid_cons show ?thesis by simp
+ qed
+qed
+
+text {* A.5(6) *}
+
+lemma type_weaken:
+ assumes "(\<Delta>@\<Gamma>) \<turnstile> t : T"
+ and "\<turnstile> (\<Delta> @ B # \<Gamma>) ok"
+ shows "(\<Delta> @ B # \<Gamma>) \<turnstile> t : T"
+using assms
+proof(nominal_induct \<Gamma>'\<equiv> "\<Delta> @ \<Gamma>" t T avoiding: \<Delta> \<Gamma> B rule: typing.strong_induct)
+ case (T_Var x' T \<Gamma>' \<Gamma>'' \<Delta>')
+ then show ?case by auto
+next
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1 T\<^isub>2 t\<^isub>2 \<Gamma> \<Delta>)
+ then show ?case by force
+next
+ case (T_Abs y T\<^isub>1 \<Gamma>' t\<^isub>2 T\<^isub>2 \<Delta> \<Gamma>)
+ then have "VarB y T\<^isub>1 # \<Delta> @ \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then have closed: "T\<^isub>1 closed_in (\<Delta> @ \<Gamma>)"
+ by (auto dest: typing_ok)
+ have "\<turnstile> (VarB y T\<^isub>1 # \<Delta> @ B # \<Gamma>) ok"
+ apply (rule valid_cons)
+ apply (rule T_Abs)
+ apply (simp add: domains_append
+ fresh_fin_insert [OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ fresh_fin_union [OF pt_vrs_inst at_vrs_inst fs_vrs_inst]
+ finite_domains finite_vrs fresh_vrs_of T_Abs fresh_trm_domain)
+ apply (rule closed_in_weaken)
+ apply (rule closed)
+ done
+ then have "\<turnstile> ((VarB y T\<^isub>1 # \<Delta>) @ B # \<Gamma>) ok" by simp
+ then have "(VarB y T\<^isub>1 # \<Delta>) @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2"
+ by (rule T_Abs) (simp add: T_Abs)
+ then have "VarB y T\<^isub>1 # \<Delta> @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then show ?case by (rule typing.T_Abs)
+next
+ case (T_Sub \<Gamma>' t S T \<Delta> \<Gamma>)
+ from `\<turnstile> (\<Delta> @ B # \<Gamma>) ok` and `\<Gamma>' = \<Delta> @ \<Gamma>`
+ have "\<Delta> @ B # \<Gamma> \<turnstile> t : S" by (rule T_Sub)
+ moreover from `\<Gamma>'\<turnstile>S<:T` and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+ have "(\<Delta> @ B # \<Gamma>)\<turnstile>S<:T"
+ by (rule weakening) (simp add: extends_def T_Sub)
+ ultimately show ?case by (rule typing.T_Sub)
+next
+ case (T_TAbs X T\<^isub>1 \<Gamma>' t\<^isub>2 T\<^isub>2 \<Delta> \<Gamma>)
+ then have "TVarB X T\<^isub>1 # \<Delta> @ \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then have closed: "T\<^isub>1 closed_in (\<Delta> @ \<Gamma>)"
+ by (auto dest: typing_ok)
+ have "\<turnstile> (TVarB X T\<^isub>1 # \<Delta> @ B # \<Gamma>) ok"
+ apply (rule valid_consT)
+ apply (rule T_TAbs)
+ apply (simp add: domains_append
+ fresh_fin_insert [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst]
+ fresh_fin_union [OF pt_tyvrs_inst at_tyvrs_inst fs_tyvrs_inst]
+ finite_domains finite_vrs tyvrs_fresh T_TAbs fresh_domain)
+ apply (rule closed_in_weaken)
+ apply (rule closed)
+ done
+ then have "\<turnstile> ((TVarB X T\<^isub>1 # \<Delta>) @ B # \<Gamma>) ok" by simp
+ then have "(TVarB X T\<^isub>1 # \<Delta>) @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2"
+ by (rule T_TAbs) (simp add: T_TAbs)
+ then have "TVarB X T\<^isub>1 # \<Delta> @ B # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2" by simp
+ then show ?case by (rule typing.T_TAbs)
+next
+ case (T_TApp X \<Gamma>' t\<^isub>1 T2 T11 T12 \<Delta> \<Gamma>)
+ have "\<Delta> @ B # \<Gamma> \<turnstile> t\<^isub>1 : (\<forall>X<:T11. T12)"
+ by (rule T_TApp)+
+ moreover from `\<Gamma>'\<turnstile>T2<:T11` and `\<turnstile> (\<Delta> @ B # \<Gamma>) ok`
+ have "(\<Delta> @ B # \<Gamma>)\<turnstile>T2<:T11"
+ by (rule weakening) (simp add: extends_def T_TApp)
+ ultimately show ?case by (rule better_T_TApp)
+qed
+
+lemma type_weaken':
+ "\<Gamma> \<turnstile> t : T \<Longrightarrow> \<turnstile> (\<Delta>@\<Gamma>) ok \<Longrightarrow> (\<Delta>@\<Gamma>) \<turnstile> t : T"
+ apply (induct \<Delta>)
+ apply simp_all
+ apply (erule validE)
+ apply (insert type_weaken [of "[]", simplified])
+ apply simp_all
+ done
+
+text {* A.6 *}
+
+lemma strengthening:
+ assumes "(\<Gamma> @ VarB x Q # \<Delta>) \<turnstile> S <: T"
+ shows "(\<Gamma>@\<Delta>) \<turnstile> S <: T"
+ using assms
+proof (induct \<Gamma>' \<equiv> "\<Gamma> @ VarB x Q # \<Delta>" S T arbitrary: \<Gamma>)
+ case (SA_Top G' S G)
+ then have "\<turnstile> (G @ \<Delta>) ok" by (auto dest: valid_cons')
+ moreover have "S closed_in (G @ \<Delta>)" using SA_Top by (auto dest: closed_in_cons)
+ ultimately show ?case using subtype_of.SA_Top by auto
+next
+ case (SA_refl_TVar G X' G')
+ then have "\<turnstile> (G' @ VarB x Q # \<Delta>) ok" by simp
+ then have h1:"\<turnstile> (G' @ \<Delta>) ok" by (auto dest: valid_cons')
+ have "X' \<in> ty_domain (G' @ VarB x Q # \<Delta>)" using SA_refl_TVar by auto
+ then have h2:"X' \<in> ty_domain (G' @ \<Delta>)" using ty_domain_vrs by auto
+ show ?case using h1 h2 by auto
+next
+ case (SA_all G T1 S1 X S2 T2 G')
+ have ih1:"TVarB X T1 # G = (TVarB X T1 # G') @ VarB x Q # \<Delta> \<Longrightarrow> ((TVarB X T1 # G') @ \<Delta>)\<turnstile>S2<:T2" by fact
+ then have h1:"(TVarB X T1 # (G' @ \<Delta>))\<turnstile>S2<:T2" using SA_all by auto
+ have ih2:"G = G' @ VarB x Q # \<Delta> \<Longrightarrow> (G' @ \<Delta>)\<turnstile>T1<:S1" by fact
+ then have h2:"(G' @ \<Delta>)\<turnstile>T1<:S1" using SA_all by auto
+ then show ?case using h1 h2 by auto
+qed (auto)
+
+lemma narrow_type: -- {* A.7 *}
+ assumes H: "\<Delta> @ (TVarB X Q) # \<Gamma> \<turnstile> t : T"
+ shows "\<Gamma> \<turnstile> P <: Q \<Longrightarrow> \<Delta> @ (TVarB X P) # \<Gamma> \<turnstile> t : T"
+ using H
+ proof (nominal_induct \<Gamma>' \<equiv> "\<Delta> @ (TVarB X Q) # \<Gamma>" t T avoiding: P arbitrary: \<Delta> rule: typing.strong_induct)
+ case (T_Var x T G P D)
+ then have "VarB x T \<in> set (D @ TVarB X P # \<Gamma>)"
+ and "\<turnstile> (D @ TVarB X P # \<Gamma>) ok"
+ by (auto intro: replace_type dest!: subtype_implies_closed)
+ then show ?case by auto
+ next
+ case (T_App G t1 T1 T2 t2 P D)
+ then show ?case by force
+ next
+ case (T_Abs x T1 G t2 T2 P D)
+ then show ?case by (fastsimp dest: typing_ok)
+ next
+ case (T_Sub G t S T D)
+ then show ?case using subtype_narrow by fastsimp
+ next
+ case (T_TAbs X' T1 G t2 T2 P D)
+ then show ?case by (fastsimp dest: typing_ok)
+ next
+ case (T_TApp X' G t1 T2 T11 T12 P D)
+ then have "D @ TVarB X P # \<Gamma> \<turnstile> t1 : Forall X' T12 T11" by fastsimp
+ moreover have "(D @ [TVarB X Q] @ \<Gamma>) \<turnstile> T2<:T11" using T_TApp by auto
+ then have "(D @ [TVarB X P] @ \<Gamma>) \<turnstile> T2<:T11" using `\<Gamma>\<turnstile>P<:Q`
+ by (rule subtype_narrow)
+ moreover from T_TApp have "X' \<sharp> (D @ TVarB X P # \<Gamma>, t1, T2)"
+ by (simp add: fresh_list_append fresh_list_cons fresh_prod)
+ ultimately show ?case by auto
+qed
+
+subsection {* Substitution lemmas *}
+
+subsubsection {* Substition Preserves Typing *}
+
+theorem subst_type: -- {* A.8 *}
+ assumes H: "(\<Delta> @ (VarB x U) # \<Gamma>) \<turnstile> t : T"
+ shows "\<Gamma> \<turnstile> u : U \<Longrightarrow> \<Delta> @ \<Gamma> \<turnstile> t[x \<mapsto> u] : T" using H
+ proof (nominal_induct \<Gamma>' \<equiv> "\<Delta> @ (VarB x U) # \<Gamma>" t T avoiding: x u arbitrary: \<Delta> rule: typing.strong_induct)
+ case (T_Var y T G x u D)
+ show ?case
+ proof (cases "x = y")
+ assume eq:"x=y"
+ then have "T=U" using T_Var uniqueness_of_ctxt' by auto
+ then show ?case using eq T_Var
+ by (auto intro: type_weaken' dest: valid_cons')
+ next
+ assume "x\<noteq>y"
+ then show ?case using T_Var
+ by (auto simp add:binding.inject dest: valid_cons')
+ qed
+ next
+ case (T_App G t1 T1 T2 t2 x u D)
+ then show ?case by force
+ next
+ case (T_Abs y T1 G t2 T2 x u D)
+ then show ?case by force
+ next
+ case (T_Sub G t S T x u D)
+ then have "D @ \<Gamma> \<turnstile> t[x \<mapsto> u] : S" by auto
+ moreover have "(D @ \<Gamma>) \<turnstile> S<:T" using T_Sub by (auto dest: strengthening)
+ ultimately show ?case by auto
+ next
+ case (T_TAbs X T1 G t2 T2 x u D)
+ from `TVarB X T1 # G \<turnstile> t2 : T2` have "X \<sharp> T1"
+ by (auto simp add: valid_ty_domain_fresh dest: typing_ok intro!: closed_in_fresh)
+ with `X \<sharp> u` and T_TAbs show ?case by fastsimp
+ next
+ case (T_TApp X G t1 T2 T11 T12 x u D)
+ then have "(D@\<Gamma>) \<turnstile>T2<:T11" using T_TApp by (auto dest: strengthening)
+ then show "((D @ \<Gamma>) \<turnstile> ((t1 \<cdot>\<^sub>\<tau> T2)[x \<mapsto> u]) : (T12[X \<mapsto> T2]\<^sub>\<tau>))" using T_TApp
+ by (force simp add: fresh_prod fresh_list_append fresh_list_cons subst_trm_fresh_tyvar)
+qed
+
+subsubsection {* Type Substitution Preserves Subtyping *}
+
+lemma substT_subtype: -- {* A.10 *}
+ assumes H: "(\<Delta> @ ((TVarB X Q) # \<Gamma>)) \<turnstile> S <: T"
+ shows "\<Gamma> \<turnstile> P <: Q \<Longrightarrow> (\<Delta>[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S[X \<mapsto> P]\<^sub>\<tau> <: T[X \<mapsto> P]\<^sub>\<tau>"
+ using H
+proof (nominal_induct \<Gamma>' \<equiv> "\<Delta> @ TVarB X Q # \<Gamma>" S T avoiding: X P arbitrary: \<Delta> rule: subtype_of.strong_induct)
+ case (SA_Top G S X P D)
+ then have "\<turnstile> (D @ TVarB X Q # \<Gamma>) ok" by simp
+ moreover have closed: "P closed_in \<Gamma>" using SA_Top subtype_implies_closed by auto
+ ultimately have "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" by (rule valid_subst)
+ moreover from SA_Top have "S closed_in (D @ TVarB X Q # \<Gamma>)" by simp
+ then have "S[X \<mapsto> P]\<^sub>\<tau> closed_in (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)" using closed by (rule subst_closed_in)
+ ultimately show ?case by auto
+next
+ case (SA_trans_TVar Y S G T X P D)
+ have h:"G\<turnstile>S<:T" by fact
+ then have ST: "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S[X \<mapsto> P]\<^sub>\<tau> <: T[X \<mapsto> P]\<^sub>\<tau>" using SA_trans_TVar by auto
+ from `G\<turnstile>S<:T` have G_ok: "\<turnstile> G ok" by (rule subtype_implies_ok)
+ from G_ok and SA_trans_TVar have X\<Gamma>_ok: "\<turnstile> (TVarB X Q # \<Gamma>) ok"
+ by (auto intro: validE_append)
+ show "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> Tvar Y[X \<mapsto> P]\<^sub>\<tau><:T[X \<mapsto> P]\<^sub>\<tau>"
+ proof (cases "X = Y")
+ assume eq: "X = Y"
+ from eq and SA_trans_TVar have "TVarB Y Q \<in> set G" by simp
+ with G_ok have QS: "Q = S" using `TVarB Y S \<in> set G` by (rule uniqueness_of_ctxt)
+ from X\<Gamma>_ok have "X \<sharp> ty_domain \<Gamma>" and "Q closed_in \<Gamma>" by auto
+ then have XQ: "X \<sharp> Q" by (rule closed_in_fresh)
+ note `\<Gamma>\<turnstile>P<:Q`
+ moreover from ST have "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" by (rule subtype_implies_ok)
+ moreover have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) extends \<Gamma>" by (simp add: extends_def)
+ ultimately have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:Q" by (rule weakening)
+ with QS have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:S" by simp
+ moreover from XQ and ST and QS have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> S<:T[X \<mapsto> P]\<^sub>\<tau>"
+ by (simp add: type_subst_identity)
+ ultimately have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>) \<turnstile> P<:T[X \<mapsto> P]\<^sub>\<tau>"
+ by (rule subtype_transitivity)
+ with eq show ?case by simp
+ next
+ assume neq: "X \<noteq> Y"
+ with SA_trans_TVar have "TVarB Y S \<in> set D \<or> TVarB Y S \<in> set \<Gamma>"
+ by (simp add: binding.inject)
+ then show ?case
+ proof
+ assume "TVarB Y S \<in> set D"
+ then have "TVarB Y (S[X \<mapsto> P]\<^sub>\<tau>) \<in> set (D[X \<mapsto> P]\<^sub>e)"
+ by (rule ctxt_subst_mem_TVarB)
+ then have "TVarB Y (S[X \<mapsto> P]\<^sub>\<tau>) \<in> set (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)" by simp
+ with neq and ST show ?thesis by auto
+ next
+ assume Y: "TVarB Y S \<in> set \<Gamma>"
+ from X\<Gamma>_ok have "X \<sharp> ty_domain \<Gamma>" and "\<turnstile> \<Gamma> ok" by auto
+ then have "X \<sharp> \<Gamma>" by (simp add: valid_ty_domain_fresh)
+ with Y have "X \<sharp> S"
+ by (induct \<Gamma>) (auto simp add: fresh_list_nil fresh_list_cons)
+ with ST have "(D[X \<mapsto> P]\<^sub>e @ \<Gamma>)\<turnstile>S<:T[X \<mapsto> P]\<^sub>\<tau>"
+ by (simp add: type_subst_identity)
+ moreover from Y have "TVarB Y S \<in> set (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)" by simp
+ ultimately show ?thesis using neq by auto
+ qed
+ qed
+next
+ case (SA_refl_TVar G Y X P D)
+ then have "\<turnstile> (D @ TVarB X Q # \<Gamma>) ok" by simp
+ moreover from SA_refl_TVar have closed: "P closed_in \<Gamma>"
+ by (auto dest: subtype_implies_closed)
+ ultimately have ok: "\<turnstile> (D[X \<mapsto> P]\<^sub>e @ \<Gamma>) ok" using valid_subst by auto
+ from closed have closed': "P closed_in (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)"
+ by (simp add: closed_in_weaken')
+ show ?case
+ proof (cases "X = Y")
+ assume "X = Y"
+ with closed' and ok show ?thesis
+ by (auto intro: subtype_reflexivity)
+ next
+ assume neq: "X \<noteq> Y"
+ with SA_refl_TVar have "Y \<in> ty_domain (D[X \<mapsto> P]\<^sub>e @ \<Gamma>)"
+ by (simp add: ty_domain_subst domains_append)
+ with neq and ok show ?thesis by auto
+ qed
+next
+ case (SA_arrow G T1 S1 S2 T2 X P D)
+ then have h1:"(D[X \<mapsto> P]\<^sub>e @ \<Gamma>)\<turnstile>T1[X \<mapsto> P]\<^sub>\<tau><:S1[X \<mapsto> P]\<^sub>\<tau>" using SA_arrow by auto
+ from SA_arrow have h2:"(D[X \<mapsto> P]\<^sub>e @ \<Gamma>)\<turnstile>S2[X \<mapsto> P]\<^sub>\<tau><:T2[X \<mapsto> P]\<^sub>\<tau>" using SA_arrow by auto
+ show ?case using subtype_of.SA_arrow h1 h2 by auto
+next
+ case (SA_all G T1 S1 Y S2 T2 X P D)
+ then have Y: "Y \<sharp> ty_domain (D @ TVarB X Q # \<Gamma>)"
+ by (auto dest: subtype_implies_ok intro: fresh_domain)
+ moreover from SA_all have "S1 closed_in (D @ TVarB X Q # \<Gamma>)"
+ by (auto dest: subtype_implies_closed)
+ ultimately have S1: "Y \<sharp> S1" by (rule closed_in_fresh)
+ from SA_all have "T1 closed_in (D @ TVarB X Q # \<Gamma>)"
+ by (auto dest: subtype_implies_closed)
+ with Y have T1: "Y \<sharp> T1" by (rule closed_in_fresh)
+ with SA_all and S1 show ?case by force
+qed
+
+subsubsection {* Type Substitution Preserves Typing *}
+
+theorem substT_type: -- {* A.11 *}
+ assumes H: "(D @ TVarB X Q # G) \<turnstile> t : T"
+ shows "G \<turnstile> P <: Q \<Longrightarrow>
+ (D[X \<mapsto> P]\<^sub>e @ G) \<turnstile> t[X \<mapsto>\<^sub>\<tau> P] : T[X \<mapsto> P]\<^sub>\<tau>" using H
+proof (nominal_induct \<Gamma>'\<equiv>"(D @ TVarB X Q # G)" t T avoiding: X P arbitrary: D rule: typing.strong_induct)
+ case (T_Var x T G' X P D')
+ have "G\<turnstile>P<:Q" by fact
+ then have "P closed_in G" using subtype_implies_closed by auto
+ moreover have "\<turnstile> (D' @ TVarB X Q # G) ok" using T_Var by auto
+ ultimately have "\<turnstile> (D'[X \<mapsto> P]\<^sub>e @ G) ok" using valid_subst by auto
+ moreover have "VarB x T \<in> set (D' @ TVarB X Q # G)" using T_Var by auto
+ then have "VarB x T \<in> set D' \<or> VarB x T \<in> set G" by simp
+ then have "(VarB x (T[X \<mapsto> P]\<^sub>\<tau>)) \<in> set (D'[X \<mapsto> P]\<^sub>e @ G)"
+ proof
+ assume "VarB x T \<in> set D'"
+ then have "VarB x (T[X \<mapsto> P]\<^sub>\<tau>) \<in> set (D'[X \<mapsto> P]\<^sub>e)"
+ by (rule ctxt_subst_mem_VarB)
+ then show ?thesis by simp
+ next
+ assume x: "VarB x T \<in> set G"
+ from T_Var have ok: "\<turnstile> G ok" by (auto dest: subtype_implies_ok)
+ then have "X \<sharp> ty_domain G" using T_Var by (auto dest: validE_append)
+ with ok have "X \<sharp> G" by (simp add: valid_ty_domain_fresh)
+ moreover from x have "VarB x T \<in> set (D' @ G)" by simp
+ then have "VarB x (T[X \<mapsto> P]\<^sub>\<tau>) \<in> set ((D' @ G)[X \<mapsto> P]\<^sub>e)"
+ by (rule ctxt_subst_mem_VarB)
+ ultimately show ?thesis
+ by (simp add: ctxt_subst_append ctxt_subst_identity)
+ qed
+ ultimately show ?case by auto
+next
+ case (T_App G' t1 T1 T2 t2 X P D')
+ then have "D'[X \<mapsto> P]\<^sub>e @ G \<turnstile> t1[X \<mapsto>\<^sub>\<tau> P] : (T1 \<rightarrow> T2)[X \<mapsto> P]\<^sub>\<tau>" by auto
+ moreover from T_App have "D'[X \<mapsto> P]\<^sub>e @ G \<turnstile> t2[X \<mapsto>\<^sub>\<tau> P] : T1[X \<mapsto> P]\<^sub>\<tau>" by auto
+ ultimately show ?case by auto
+next
+ case (T_Abs x T1 G' t2 T2 X P D')
+ then show ?case by force
+next
+ case (T_Sub G' t S T X P D')
+ then show ?case using substT_subtype by force
+next
+ case (T_TAbs X' G' T1 t2 T2 X P D')
+ then have "X' \<sharp> ty_domain (D' @ TVarB X Q # G)"
+ and "G' closed_in (D' @ TVarB X Q # G)"
+ by (auto dest: typing_ok)
+ then have "X' \<sharp> G'" by (rule closed_in_fresh)
+ with T_TAbs show ?case by force
+next
+ case (T_TApp X' G' t1 T2 T11 T12 X P D')
+ then have "X' \<sharp> ty_domain (D' @ TVarB X Q # G)"
+ by (simp add: fresh_domain)
+ moreover from T_TApp have "T11 closed_in (D' @ TVarB X Q # G)"
+ by (auto dest: subtype_implies_closed)
+ ultimately have X': "X' \<sharp> T11" by (rule closed_in_fresh)
+ from T_TApp have "D'[X \<mapsto> P]\<^sub>e @ G \<turnstile> t1[X \<mapsto>\<^sub>\<tau> P] : (\<forall>X'<:T11. T12)[X \<mapsto> P]\<^sub>\<tau>"
+ by simp
+ with X' and T_TApp show ?case
+ by (auto simp add: fresh_atm type_substitution_lemma
+ fresh_list_append fresh_list_cons
+ ctxt_subst_fresh' type_subst_fresh subst_trm_ty_fresh
+ intro: substT_subtype)
+qed
+
+lemma Abs_type: -- {* A.13(1) *}
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>x:S. s) : T"
+ and H': "\<Gamma> \<turnstile> T <: U \<rightarrow> U'"
+ and H'': "x \<sharp> \<Gamma>"
+ obtains S' where "\<Gamma> \<turnstile> U <: S"
+ and "(VarB x S) # \<Gamma> \<turnstile> s : S'"
+ and "\<Gamma> \<turnstile> S' <: U'"
+ using H H' H''
+proof (nominal_induct \<Gamma> t \<equiv> "\<lambda>x:S. s" T avoiding: x arbitrary: U U' S s rule: typing.strong_induct)
+ case (T_Abs y T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `\<Gamma> \<turnstile> T\<^isub>1 \<rightarrow> T\<^isub>2 <: U \<rightarrow> U'`
+ obtain ty1: "\<Gamma> \<turnstile> U <: S" and ty2: "\<Gamma> \<turnstile> T\<^isub>2 <: U'" using T_Abs
+ by cases (simp_all add: ty.inject trm.inject alpha fresh_atm)
+ from T_Abs have "VarB y S # \<Gamma> \<turnstile> [(y, x)] \<bullet> s : T\<^isub>2"
+ by (simp add: trm.inject alpha fresh_atm)
+ then have "[(y, x)] \<bullet> (VarB y S # \<Gamma>) \<turnstile> [(y, x)] \<bullet> [(y, x)] \<bullet> s : [(y, x)] \<bullet> T\<^isub>2"
+ by (rule typing.eqvt)
+ moreover from T_Abs have "y \<sharp> \<Gamma>"
+ by (auto dest!: typing_ok simp add: fresh_trm_domain)
+ ultimately have "VarB x S # \<Gamma> \<turnstile> s : T\<^isub>2" using T_Abs
+ by (perm_simp add: ty_vrs_prm_simp)
+ with ty1 show ?case using ty2 by (rule T_Abs)
+next
+ case (T_Sub \<Gamma> t S T)
+ then show ?case using subtype_transitivity by blast
+qed simp_all
+
+lemma subtype_reflexivity_from_typing:
+ assumes "\<Gamma> \<turnstile> t : T"
+ shows "\<Gamma> \<turnstile> T <: T"
+using assms subtype_reflexivity typing_ok typing_closed_in by simp
+
+lemma Abs_type':
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>x:S. s) : U \<rightarrow> U'"
+ and H': "x \<sharp> \<Gamma>"
+ obtains S'
+ where "\<Gamma> \<turnstile> U <: S"
+ and "(VarB x S) # \<Gamma> \<turnstile> s : S'"
+ and "\<Gamma> \<turnstile> S' <: U'"
+ using H subtype_reflexivity_from_typing [OF H] H'
+ by (rule Abs_type)
+
+lemma TAbs_type: -- {* A.13(2) *}
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>X<:S. s) : T"
+ and H': "\<Gamma> \<turnstile> T <: (\<forall>X<:U. U')"
+ and fresh: "X \<sharp> \<Gamma>" "X \<sharp> S" "X \<sharp> U"
+ obtains S'
+ where "\<Gamma> \<turnstile> U <: S"
+ and "(TVarB X U # \<Gamma>) \<turnstile> s : S'"
+ and "(TVarB X U # \<Gamma>) \<turnstile> S' <: U'"
+ using H H' fresh
+proof (nominal_induct \<Gamma> t \<equiv> "\<lambda>X<:S. s" T avoiding: X U U' S arbitrary: s rule: typing.strong_induct)
+ case (T_TAbs Y T\<^isub>1 \<Gamma> t\<^isub>2 T\<^isub>2)
+ from `TVarB Y T\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>2` have Y: "Y \<sharp> \<Gamma>"
+ by (auto dest!: typing_ok simp add: valid_ty_domain_fresh)
+ from `Y \<sharp> U'` and `Y \<sharp> X`
+ have "(\<forall>X<:U. U') = (\<forall>Y<:U. [(Y, X)] \<bullet> U')"
+ by (simp add: ty.inject alpha' fresh_atm)
+ with T_TAbs have "\<Gamma> \<turnstile> (\<forall>Y<:S. T\<^isub>2) <: (\<forall>Y<:U. [(Y, X)] \<bullet> U')" by (simp add: trm.inject)
+ then obtain ty1: "\<Gamma> \<turnstile> U <: S" and ty2: "(TVarB Y U # \<Gamma>) \<turnstile> T\<^isub>2 <: ([(Y, X)] \<bullet> U')" using T_TAbs Y
+ by (cases rule: subtype_of.strong_cases [where X=Y]) (simp_all add: ty.inject alpha abs_fresh)
+ note ty1
+ moreover from T_TAbs have "TVarB Y S # \<Gamma> \<turnstile> ([(Y, X)] \<bullet> s) : T\<^isub>2"
+ by (simp add: trm.inject alpha fresh_atm)
+ then have "[(Y, X)] \<bullet> (TVarB Y S # \<Gamma>) \<turnstile> [(Y, X)] \<bullet> [(Y, X)] \<bullet> s : [(Y, X)] \<bullet> T\<^isub>2"
+ by (rule typing.eqvt)
+ with `X \<sharp> \<Gamma>` `X \<sharp> S` Y `Y \<sharp> S` have "TVarB X S # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^isub>2"
+ by perm_simp
+ then have "TVarB X U # \<Gamma> \<turnstile> s : [(Y, X)] \<bullet> T\<^isub>2" using ty1
+ by (rule narrow_type [of "[]", simplified])
+ moreover from ty2 have "([(Y, X)] \<bullet> (TVarB Y U # \<Gamma>)) \<turnstile> ([(Y, X)] \<bullet> T\<^isub>2) <: ([(Y, X)] \<bullet> [(Y, X)] \<bullet> U')"
+ by (rule subtype_of.eqvt)
+ with `X \<sharp> \<Gamma>` `X \<sharp> U` Y `Y \<sharp> U` have "(TVarB X U # \<Gamma>) \<turnstile> ([(Y, X)] \<bullet> T\<^isub>2) <: U'"
+ by perm_simp
+ ultimately show ?case by (rule T_TAbs)
+next
+ case (T_Sub \<Gamma> t S T)
+ then show ?case using subtype_transitivity by blast
+qed simp_all
+
+lemma TAbs_type':
+ assumes H: "\<Gamma> \<turnstile> (\<lambda>X<:S. s) : (\<forall>X<:U. U')"
+ and fresh: "X \<sharp> \<Gamma>" "X \<sharp> S" "X \<sharp> U"
+ obtains S'
+ where "\<Gamma> \<turnstile> U <: S"
+ and "(TVarB X U # \<Gamma>) \<turnstile> s : S'"
+ and "(TVarB X U # \<Gamma>) \<turnstile> S' <: U'"
+ using H subtype_reflexivity_from_typing [OF H] fresh
+ by (rule TAbs_type)
+
+theorem preservation: -- {* A.20 *}
+ assumes H: "\<Gamma> \<turnstile> t : T"
+ shows "t \<longmapsto> t' \<Longrightarrow> \<Gamma> \<turnstile> t' : T" using H
+proof (nominal_induct avoiding: t' rule: typing.strong_induct)
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2 t\<^isub>2 t')
+ obtain x::vrs where x_fresh: "x \<sharp> (\<Gamma>, t\<^isub>1 \<cdot> t\<^isub>2, t')"
+ by (rule exists_fresh) (rule fin_supp)
+ obtain X::tyvrs where "X \<sharp> (t\<^isub>1 \<cdot> t\<^isub>2, t')"
+ by (rule exists_fresh) (rule fin_supp)
+ with `t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t'` show ?case
+ proof (cases rule: eval.strong_cases [where x=x and X=X])
+ case (E_Abs v\<^isub>2 T\<^isub>1\<^isub>1' t\<^isub>1\<^isub>2)
+ with T_App and x_fresh have h: "\<Gamma> \<turnstile> (\<lambda>x:T\<^isub>1\<^isub>1'. t\<^isub>1\<^isub>2) : T\<^isub>1\<^isub>1 \<rightarrow> T\<^isub>1\<^isub>2"
+ by (simp add: trm.inject fresh_prod)
+ moreover from x_fresh have "x \<sharp> \<Gamma>" by simp
+ ultimately obtain S'
+ where T\<^isub>1\<^isub>1: "\<Gamma> \<turnstile> T\<^isub>1\<^isub>1 <: T\<^isub>1\<^isub>1'"
+ and t\<^isub>1\<^isub>2: "(VarB x T\<^isub>1\<^isub>1') # \<Gamma> \<turnstile> t\<^isub>1\<^isub>2 : S'"
+ and S': "\<Gamma> \<turnstile> S' <: T\<^isub>1\<^isub>2"
+ by (rule Abs_type') blast
+ from `\<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1\<^isub>1`
+ have "\<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1\<^isub>1'" using T\<^isub>1\<^isub>1 by (rule T_Sub)
+ with t\<^isub>1\<^isub>2 have "\<Gamma> \<turnstile> t\<^isub>1\<^isub>2[x \<mapsto> t\<^isub>2] : S'"
+ by (rule subst_type [where \<Delta>="[]", simplified])
+ hence "\<Gamma> \<turnstile> t\<^isub>1\<^isub>2[x \<mapsto> t\<^isub>2] : T\<^isub>1\<^isub>2" using S' by (rule T_Sub)
+ with E_Abs and x_fresh show ?thesis by (simp add: trm.inject fresh_prod)
+ next
+ case (E_App1 t''' t'' u)
+ hence "t\<^isub>1 \<longmapsto> t''" by (simp add:trm.inject)
+ hence "\<Gamma> \<turnstile> t'' : T\<^isub>1\<^isub>1 \<rightarrow> T\<^isub>1\<^isub>2" by (rule T_App)
+ hence "\<Gamma> \<turnstile> t'' \<cdot> t\<^isub>2 : T\<^isub>1\<^isub>2" using `\<Gamma> \<turnstile> t\<^isub>2 : T\<^isub>1\<^isub>1`
+ by (rule typing.T_App)
+ with E_App1 show ?thesis by (simp add:trm.inject)
+ next
+ case (E_App2 v t''' t'')
+ hence "t\<^isub>2 \<longmapsto> t''" by (simp add:trm.inject)
+ hence "\<Gamma> \<turnstile> t'' : T\<^isub>1\<^isub>1" by (rule T_App)
+ with T_App(1) have "\<Gamma> \<turnstile> t\<^isub>1 \<cdot> t'' : T\<^isub>1\<^isub>2"
+ by (rule typing.T_App)
+ with E_App2 show ?thesis by (simp add:trm.inject)
+ qed (simp_all add: fresh_prod)
+next
+ case (T_TApp X \<Gamma> t\<^isub>1 T\<^isub>2 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2 t')
+ obtain x::vrs where "x \<sharp> (t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2, t')"
+ by (rule exists_fresh) (rule fin_supp)
+ with `t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2 \<longmapsto> t'`
+ show ?case
+ proof (cases rule: eval.strong_cases [where X=X and x=x])
+ case (E_TAbs T\<^isub>1\<^isub>1' T\<^isub>2' t\<^isub>1\<^isub>2)
+ with T_TApp have "\<Gamma> \<turnstile> (\<lambda>X<:T\<^isub>1\<^isub>1'. t\<^isub>1\<^isub>2) : (\<forall>X<:T\<^isub>1\<^isub>1. T\<^isub>1\<^isub>2)" and "X \<sharp> \<Gamma>" and "X \<sharp> T\<^isub>1\<^isub>1'"
+ by (simp_all add: trm.inject)
+ moreover from `\<Gamma>\<turnstile>T\<^isub>2<:T\<^isub>1\<^isub>1` and `X \<sharp> \<Gamma>` have "X \<sharp> T\<^isub>1\<^isub>1"
+ by (blast intro: closed_in_fresh fresh_domain dest: subtype_implies_closed)
+ ultimately obtain S'
+ where "TVarB X T\<^isub>1\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>1\<^isub>2 : S'"
+ and "(TVarB X T\<^isub>1\<^isub>1 # \<Gamma>) \<turnstile> S' <: T\<^isub>1\<^isub>2"
+ by (rule TAbs_type') blast
+ hence "TVarB X T\<^isub>1\<^isub>1 # \<Gamma> \<turnstile> t\<^isub>1\<^isub>2 : T\<^isub>1\<^isub>2" by (rule T_Sub)
+ hence "\<Gamma> \<turnstile> t\<^isub>1\<^isub>2[X \<mapsto>\<^sub>\<tau> T\<^isub>2] : T\<^isub>1\<^isub>2[X \<mapsto> T\<^isub>2]\<^sub>\<tau>" using `\<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>1\<^isub>1`
+ by (rule substT_type [where D="[]", simplified])
+ with T_TApp and E_TAbs show ?thesis by (simp add: trm.inject)
+ next
+ case (E_TApp t''' t'' T)
+ from E_TApp have "t\<^isub>1 \<longmapsto> t''" by (simp add: trm.inject)
+ then have "\<Gamma> \<turnstile> t'' : (\<forall>X<:T\<^isub>1\<^isub>1. T\<^isub>1\<^isub>2)" by (rule T_TApp)
+ then have "\<Gamma> \<turnstile> t'' \<cdot>\<^sub>\<tau> T\<^isub>2 : T\<^isub>1\<^isub>2[X \<mapsto> T\<^isub>2]\<^sub>\<tau>" using `\<Gamma> \<turnstile> T\<^isub>2 <: T\<^isub>1\<^isub>1`
+ by (rule better_T_TApp)
+ with E_TApp show ?thesis by (simp add: trm.inject)
+ qed (simp_all add: fresh_prod)
+next
+ case (T_Sub \<Gamma> t S T t')
+ have "t \<longmapsto> t'" by fact
+ hence "\<Gamma> \<turnstile> t' : S" by (rule T_Sub)
+ moreover have "\<Gamma> \<turnstile> S <: T" by fact
+ ultimately show ?case by (rule typing.T_Sub)
+qed (auto)
+
+lemma Fun_canonical: -- {* A.14(1) *}
+ assumes ty: "[] \<turnstile> v : T\<^isub>1 \<rightarrow> T\<^isub>2"
+ shows "val v \<Longrightarrow> \<exists>x t S. v = (\<lambda>x:S. t)" using ty
+proof (induct \<Gamma>\<equiv>"[]::env" v T\<equiv>"T\<^isub>1 \<rightarrow> T\<^isub>2" arbitrary: T\<^isub>1 T\<^isub>2)
+ case (T_Sub \<Gamma> t S T)
+ hence "\<Gamma> \<turnstile> S <: T\<^isub>1 \<rightarrow> T\<^isub>2" by simp
+ then obtain S\<^isub>1 S\<^isub>2 where S: "S = S\<^isub>1 \<rightarrow> S\<^isub>2"
+ by cases (auto simp add: T_Sub)
+ with `val t` and `\<Gamma> = []` show ?case by (rule T_Sub)
+qed (auto)
+
+lemma TyAll_canonical: -- {* A.14(3) *}
+ fixes X::tyvrs
+ assumes ty: "[] \<turnstile> v : (\<forall>X<:T\<^isub>1. T\<^isub>2)"
+ shows "val v \<Longrightarrow> \<exists>X t S. v = (\<lambda>X<:S. t)" using ty
+proof (induct \<Gamma>\<equiv>"[]::env" v T\<equiv>"\<forall>X<:T\<^isub>1. T\<^isub>2" arbitrary: X T\<^isub>1 T\<^isub>2)
+ case (T_Sub \<Gamma> t S T)
+ hence "\<Gamma> \<turnstile> S <: (\<forall>X<:T\<^isub>1. T\<^isub>2)" by simp
+ then obtain X S\<^isub>1 S\<^isub>2 where S: "S = (\<forall>X<:S\<^isub>1. S\<^isub>2)"
+ by cases (auto simp add: T_Sub)
+ then show ?case using T_Sub by auto
+qed (auto)
+
+theorem progress:
+ assumes "[] \<turnstile> t : T"
+ shows "val t \<or> (\<exists>t'. t \<longmapsto> t')"
+using assms
+proof (induct \<Gamma> \<equiv> "[]::env" t T)
+ case (T_App \<Gamma> t\<^isub>1 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2 t\<^isub>2)
+ hence "val t\<^isub>1 \<or> (\<exists>t'. t\<^isub>1 \<longmapsto> t')" by simp
+ thus ?case
+ proof
+ assume t\<^isub>1_val: "val t\<^isub>1"
+ with T_App obtain x t3 S where t\<^isub>1: "t\<^isub>1 = (\<lambda>x:S. t3)"
+ by (auto dest!: Fun_canonical)
+ from T_App have "val t\<^isub>2 \<or> (\<exists>t'. t\<^isub>2 \<longmapsto> t')" by simp
+ thus ?case
+ proof
+ assume "val t\<^isub>2"
+ with t\<^isub>1 have "t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t3[x \<mapsto> t\<^isub>2]" by auto
+ thus ?case by auto
+ next
+ assume "\<exists>t'. t\<^isub>2 \<longmapsto> t'"
+ then obtain t' where "t\<^isub>2 \<longmapsto> t'" by auto
+ with t\<^isub>1_val have "t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t\<^isub>1 \<cdot> t'" by auto
+ thus ?case by auto
+ qed
+ next
+ assume "\<exists>t'. t\<^isub>1 \<longmapsto> t'"
+ then obtain t' where "t\<^isub>1 \<longmapsto> t'" by auto
+ hence "t\<^isub>1 \<cdot> t\<^isub>2 \<longmapsto> t' \<cdot> t\<^isub>2" by auto
+ thus ?case by auto
+ qed
+next
+ case (T_TApp X \<Gamma> t\<^isub>1 T\<^isub>2 T\<^isub>1\<^isub>1 T\<^isub>1\<^isub>2)
+ hence "val t\<^isub>1 \<or> (\<exists>t'. t\<^isub>1 \<longmapsto> t')" by simp
+ thus ?case
+ proof
+ assume "val t\<^isub>1"
+ with T_TApp obtain x t S where "t\<^isub>1 = (\<lambda>x<:S. t)"
+ by (auto dest!: TyAll_canonical)
+ hence "t\<^isub>1 \<cdot>\<^sub>\<tau> T\<^isub>2 \<longmapsto> t[x \<mapsto>\<^sub>\<tau> T\<^isub>2]" by auto
+ thus ?case by auto
+ next
+ assume "\<exists>t'. t\<^isub>1 \<longmapsto> t'" thus ?case by auto
+ qed
+qed (auto)
+
+end
--- a/src/HOL/Nominal/Nominal.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/Nominal.thy Fri Feb 27 18:50:35 2009 +0100
@@ -397,6 +397,37 @@
lemmas fresh_star_prod = fresh_star_prod_list fresh_star_prod_set
+lemma fresh_star_set_eq: "set xs \<sharp>* c = xs \<sharp>* c"
+ by (simp add: fresh_star_def)
+
+lemma fresh_star_Un_elim:
+ "((S \<union> T) \<sharp>* c \<Longrightarrow> PROP C) \<equiv> (S \<sharp>* c \<Longrightarrow> T \<sharp>* c \<Longrightarrow> PROP C)"
+ apply rule
+ apply (simp_all add: fresh_star_def)
+ apply (erule meta_mp)
+ apply blast
+ done
+
+lemma fresh_star_insert_elim:
+ "(insert x S \<sharp>* c \<Longrightarrow> PROP C) \<equiv> (x \<sharp> c \<Longrightarrow> S \<sharp>* c \<Longrightarrow> PROP C)"
+ by rule (simp_all add: fresh_star_def)
+
+lemma fresh_star_empty_elim:
+ "({} \<sharp>* c \<Longrightarrow> PROP C) \<equiv> PROP C"
+ by (simp add: fresh_star_def)
+
+text {* Normalization of freshness results; cf.\ @{text nominal_induct} *}
+
+lemma fresh_star_unit_elim:
+ shows "((a::'a set)\<sharp>*() \<Longrightarrow> PROP C) \<equiv> PROP C"
+ and "((b::'a list)\<sharp>*() \<Longrightarrow> PROP C) \<equiv> PROP C"
+ by (simp_all add: fresh_star_def fresh_def supp_unit)
+
+lemma fresh_star_prod_elim:
+ shows "((a::'a set)\<sharp>*(x,y) \<Longrightarrow> PROP C) \<equiv> (a\<sharp>*x \<Longrightarrow> a\<sharp>*y \<Longrightarrow> PROP C)"
+ and "((b::'a list)\<sharp>*(x,y) \<Longrightarrow> PROP C) \<equiv> (b\<sharp>*x \<Longrightarrow> b\<sharp>*y \<Longrightarrow> PROP C)"
+ by (rule, simp_all add: fresh_star_prod)+
+
section {* Abstract Properties for Permutations and Atoms *}
(*=========================================================*)
@@ -1645,6 +1676,31 @@
apply(rule at)
done
+lemma pt_fresh_star_eqvt:
+ fixes pi :: "'x prm"
+ and x :: "'a"
+ and a :: "'x set"
+ and b :: "'x list"
+ assumes pt: "pt TYPE('a) TYPE('x)"
+ and at: "at TYPE('x)"
+ shows "pi\<bullet>(a\<sharp>*x) = (pi\<bullet>a)\<sharp>*(pi\<bullet>x)"
+ and "pi\<bullet>(b\<sharp>*x) = (pi\<bullet>b)\<sharp>*(pi\<bullet>x)"
+ by (simp_all add: perm_bool pt_fresh_star_bij[OF pt, OF at])
+
+lemma pt_fresh_star_eqvt_ineq:
+ fixes pi::"'x prm"
+ and a::"'y set"
+ and b::"'y list"
+ and x::"'a"
+ assumes pta: "pt TYPE('a) TYPE('x)"
+ and ptb: "pt TYPE('y) TYPE('x)"
+ and at: "at TYPE('x)"
+ and cp: "cp TYPE('a) TYPE('x) TYPE('y)"
+ and dj: "disjoint TYPE('y) TYPE('x)"
+ shows "pi\<bullet>(a\<sharp>*x) = (pi\<bullet>a)\<sharp>*(pi\<bullet>x)"
+ and "pi\<bullet>(b\<sharp>*x) = (pi\<bullet>b)\<sharp>*(pi\<bullet>x)"
+ by (simp_all add: pt_fresh_star_bij_ineq[OF pta, OF ptb, OF at, OF cp] dj_perm_forget[OF dj] perm_bool)
+
lemma pt_fresh_bij1:
fixes pi :: "'x prm"
and x :: "'a"
--- a/src/HOL/Nominal/nominal_atoms.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/nominal_atoms.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,4 @@
(* title: HOL/Nominal/nominal_atoms.ML
- ID: $Id$
Author: Christian Urban and Stefan Berghofer, TU Muenchen
Declaration of atom types to be used in nominal datatypes.
@@ -784,6 +783,8 @@
val fresh_star_bij = @{thms "Nominal.pt_fresh_star_bij"};
val fresh_eqvt = @{thm "Nominal.pt_fresh_eqvt"};
val fresh_eqvt_ineq = @{thm "Nominal.pt_fresh_eqvt_ineq"};
+ val fresh_star_eqvt = @{thms "Nominal.pt_fresh_star_eqvt"};
+ val fresh_star_eqvt_ineq= @{thms "Nominal.pt_fresh_star_eqvt_ineq"};
val set_diff_eqvt = @{thm "Nominal.pt_set_diff_eqvt"};
val in_eqvt = @{thm "Nominal.pt_in_eqvt"};
val eq_eqvt = @{thm "Nominal.pt_eq_eqvt"};
@@ -947,13 +948,17 @@
in [(("fresh_bij", thms1 @ thms2),[])] end
||>> add_thmss_string
let val thms1 = inst_pt_at fresh_star_bij
- and thms2 = flat (map (fn ti => inst_pt_pt_at_cp [ti]) fresh_star_bij_ineq);
+ and thms2 = maps (fn ti => inst_pt_pt_at_cp [ti]) fresh_star_bij_ineq
in [(("fresh_star_bij", thms1 @ thms2),[])] end
||>> add_thmss_string
let val thms1 = inst_pt_at [fresh_eqvt]
and thms2 = inst_pt_pt_at_cp_dj [fresh_eqvt_ineq]
in [(("fresh_eqvt", thms1 @ thms2),[NominalThmDecls.eqvt_add])] end
||>> add_thmss_string
+ let val thms1 = inst_pt_at fresh_star_eqvt
+ and thms2 = maps (fn ti => inst_pt_pt_at_cp_dj [ti]) fresh_star_eqvt_ineq
+ in [(("fresh_star_eqvt", thms1 @ thms2),[NominalThmDecls.eqvt_add])] end
+ ||>> add_thmss_string
let val thms1 = inst_pt_at [in_eqvt]
in [(("in_eqvt", thms1),[NominalThmDecls.eqvt_add])] end
||>> add_thmss_string
--- a/src/HOL/Nominal/nominal_induct.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/nominal_induct.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,4 @@
-(* ID: $Id$
- Author: Christian Urban and Makarius
+(* Author: Christian Urban and Makarius
The nominal induct proof method.
*)
@@ -24,7 +23,8 @@
val split_all_tuples =
Simplifier.full_simplify (HOL_basic_ss addsimps
- [split_conv, split_paired_all, unit_all_eq1, thm "fresh_unit_elim", thm "fresh_prod_elim"]);
+ [split_conv, split_paired_all, unit_all_eq1, @{thm fresh_unit_elim}, @{thm fresh_prod_elim}] @
+ @{thms fresh_star_unit_elim} @ @{thms fresh_star_prod_elim});
(* prepare rule *)
--- a/src/HOL/Nominal/nominal_inductive.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/nominal_inductive.ML Fri Feb 27 18:50:35 2009 +0100
@@ -7,8 +7,8 @@
signature NOMINAL_INDUCTIVE =
sig
- val prove_strong_ind: string -> (string * string list) list -> theory -> Proof.state
- val prove_eqvt: string -> string list -> theory -> theory
+ val prove_strong_ind: string -> (string * string list) list -> local_theory -> Proof.state
+ val prove_eqvt: string -> string list -> local_theory -> local_theory
end
structure NominalInductive : NOMINAL_INDUCTIVE =
@@ -28,6 +28,8 @@
fun atomize_induct ctxt = Conv.fconv_rule (Conv.prems_conv ~1
(Conv.params_conv ~1 (K (Conv.prems_conv ~1 atomize_conv)) ctxt));
+fun preds_of ps t = gen_inter (op = o apfst dest_Free) (ps, Term.add_frees t []);
+
val fresh_prod = thm "fresh_prod";
val perm_bool = mk_meta_eq (thm "perm_bool");
@@ -142,9 +144,9 @@
fun first_order_mrs ths th = ths MRS
Thm.instantiate (first_order_matchs (cprems_of th) (map cprop_of ths)) th;
-fun prove_strong_ind s avoids thy =
+fun prove_strong_ind s avoids ctxt =
let
- val ctxt = ProofContext.init thy;
+ val thy = ProofContext.theory_of ctxt;
val ({names, ...}, {raw_induct, intrs, elims, ...}) =
InductivePackage.the_inductive ctxt (Sign.intern_const thy s);
val ind_params = InductivePackage.params_of raw_induct;
@@ -158,8 +160,7 @@
commas_quote xs));
val induct_cases = map fst (fst (RuleCases.get (the
(Induct.lookup_inductP ctxt (hd names)))));
- val raw_induct' = Logic.unvarify (prop_of raw_induct);
- val elims' = map (Logic.unvarify o prop_of) elims;
+ val ([raw_induct'], ctxt') = Variable.import_terms false [prop_of raw_induct] ctxt;
val concls = raw_induct' |> Logic.strip_imp_concl |> HOLogic.dest_Trueprop |>
HOLogic.dest_conj |> map (HOLogic.dest_imp ##> strip_comb);
val ps = map (fst o snd) concls;
@@ -199,8 +200,8 @@
val ind_sort = if null atomTs then HOLogic.typeS
else Sign.certify_sort thy (map (fn T => Sign.intern_class thy
("fs_" ^ Sign.base_name (fst (dest_Type T)))) atomTs);
- val fs_ctxt_tyname = Name.variant (map fst (OldTerm.term_tfrees raw_induct')) "'n";
- val fs_ctxt_name = Name.variant (OldTerm.add_term_names (raw_induct', [])) "z";
+ val ([fs_ctxt_tyname], _) = Name.variants ["'n"] (Variable.names_of ctxt');
+ val ([fs_ctxt_name], ctxt'') = Variable.variant_fixes ["z"] ctxt';
val fsT = TFree (fs_ctxt_tyname, ind_sort);
val inductive_forall_def' = Drule.instantiate'
@@ -237,7 +238,7 @@
val prem = Logic.list_implies
(map mk_fresh bvars @ mk_distinct bvars @
map (fn prem =>
- if null (OldTerm.term_frees prem inter ps) then prem
+ if null (preds_of ps prem) then prem
else lift_prem prem) prems,
HOLogic.mk_Trueprop (lift_pred p ts));
val vs = map (Var o apfst (rpair 0)) (Term.rename_wrt_term prem params')
@@ -263,7 +264,7 @@
val vc_compat = map (fn (params, bvars, prems, (p, ts)) =>
map (fn q => list_all (params, incr_boundvars ~1 (Logic.list_implies
(List.mapPartial (fn prem =>
- if null (ps inter OldTerm.term_frees prem) then SOME prem
+ if null (preds_of ps prem) then SOME prem
else map_term (split_conj (K o I) names) prem prem) prems, q))))
(mk_distinct bvars @
maps (fn (t, T) => map (fn (u, U) => HOLogic.mk_Trueprop
@@ -309,8 +310,8 @@
[ex] ctxt
in (freshs1 @ [term_of cx], freshs2 @ ths, ctxt') end;
- fun mk_ind_proof thy thss =
- Goal.prove_global thy [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
+ fun mk_ind_proof ctxt' thss =
+ Goal.prove ctxt' [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
let val th = Goal.prove ctxt [] [] concl (fn {context, ...} =>
rtac raw_induct 1 THEN
EVERY (maps (fn ((((_, bvars, oprems, _), vc_compat_ths), ihyp), (vs, ihypt)) =>
@@ -352,7 +353,7 @@
(rev pis' @ pis) th));
val (gprems1, gprems2) = split_list
(map (fn (th, t) =>
- if null (OldTerm.term_frees t inter ps) then (SOME th, mk_pi th)
+ if null (preds_of ps t) then (SOME th, mk_pi th)
else
(map_thm ctxt (split_conj (K o I) names)
(etac conjunct1 1) monos NONE th,
@@ -403,42 +404,42 @@
REPEAT (REPEAT (resolve_tac [conjI, impI] 1) THEN
etac impE 1 THEN atac 1 THEN REPEAT (etac @{thm allE_Nil} 1) THEN
asm_full_simp_tac (simpset_of thy) 1)
- end);
+ end) |> singleton (ProofContext.export ctxt' ctxt);
(** strong case analysis rule **)
val cases_prems = map (fn ((name, avoids), rule) =>
let
- val prem :: prems = Logic.strip_imp_prems rule;
- val concl = Logic.strip_imp_concl rule;
- val used = Term.add_free_names rule [];
+ val ([rule'], ctxt') = Variable.import_terms false [prop_of rule] ctxt;
+ val prem :: prems = Logic.strip_imp_prems rule';
+ val concl = Logic.strip_imp_concl rule'
in
(prem,
List.drop (snd (strip_comb (HOLogic.dest_Trueprop prem)), length ind_params),
concl,
- fst (fold_map (fn (prem, (_, avoid)) => fn used =>
+ fold_map (fn (prem, (_, avoid)) => fn ctxt =>
let
val prems = Logic.strip_assums_hyp prem;
val params = Logic.strip_params prem;
val bnds = fold (add_binders thy 0) prems [] @ mk_avoids params avoid;
- fun mk_subst (p as (s, T)) (i, j, used, ps, qs, is, ts) =
+ fun mk_subst (p as (s, T)) (i, j, ctxt, ps, qs, is, ts) =
if member (op = o apsnd fst) bnds (Bound i) then
let
- val s' = Name.variant used s;
+ val ([s'], ctxt') = Variable.variant_fixes [s] ctxt;
val t = Free (s', T)
- in (i + 1, j, s' :: used, ps, (t, T) :: qs, i :: is, t :: ts) end
- else (i + 1, j + 1, used, p :: ps, qs, is, Bound j :: ts);
- val (_, _, used', ps, qs, is, ts) = fold_rev mk_subst params
- (0, 0, used, [], [], [], [])
+ in (i + 1, j, ctxt', ps, (t, T) :: qs, i :: is, t :: ts) end
+ else (i + 1, j + 1, ctxt, p :: ps, qs, is, Bound j :: ts);
+ val (_, _, ctxt', ps, qs, is, ts) = fold_rev mk_subst params
+ (0, 0, ctxt, [], [], [], [])
in
- ((ps, qs, is, map (curry subst_bounds (rev ts)) prems), used')
- end) (prems ~~ avoids) used))
+ ((ps, qs, is, map (curry subst_bounds (rev ts)) prems), ctxt')
+ end) (prems ~~ avoids) ctxt')
end)
(InductivePackage.partition_rules' raw_induct (intrs ~~ avoids') ~~
- elims');
+ elims);
val cases_prems' =
- map (fn (prem, args, concl, prems) =>
+ map (fn (prem, args, concl, (prems, _)) =>
let
fun mk_prem (ps, [], _, prems) =
list_all (ps, Logic.list_implies (prems, concl))
@@ -462,9 +463,9 @@
val simp_fresh_atm = map
(Simplifier.simplify (HOL_basic_ss addsimps fresh_atm));
- fun mk_cases_proof thy ((((name, thss), elim), (prem, args, concl, prems)),
+ fun mk_cases_proof ((((name, thss), elim), (prem, args, concl, (prems, ctxt'))),
prems') =
- (name, Goal.prove_global thy [] (prem :: prems') concl
+ (name, Goal.prove ctxt' [] (prem :: prems') concl
(fn {prems = hyp :: hyps, context = ctxt1} =>
EVERY (rtac (hyp RS elim) 1 ::
map (fn (((_, vc_compat_ths), case_hyp), (_, qs, is, _)) =>
@@ -537,52 +538,54 @@
end) ctxt4 1)
val final = ProofContext.export ctxt3 ctxt2 [th]
in resolve_tac final 1 end) ctxt1 1)
- (thss ~~ hyps ~~ prems))))
+ (thss ~~ hyps ~~ prems))) |>
+ singleton (ProofContext.export ctxt' ctxt))
in
- thy |>
- ProofContext.init |>
- Proof.theorem_i NONE (fn thss => ProofContext.theory (fn thy =>
+ ctxt'' |>
+ Proof.theorem_i NONE (fn thss => fn ctxt =>
let
- val ctxt = ProofContext.init thy;
val rec_name = space_implode "_" (map Sign.base_name names);
+ val rec_qualified = Binding.qualify rec_name;
val ind_case_names = RuleCases.case_names induct_cases;
val induct_cases' = InductivePackage.partition_rules' raw_induct
(intrs ~~ induct_cases);
val thss' = map (map atomize_intr) thss;
val thsss = InductivePackage.partition_rules' raw_induct (intrs ~~ thss');
val strong_raw_induct =
- mk_ind_proof thy thss' |> InductivePackage.rulify;
- val strong_cases = map (mk_cases_proof thy ##> InductivePackage.rulify)
+ mk_ind_proof ctxt thss' |> InductivePackage.rulify;
+ val strong_cases = map (mk_cases_proof ##> InductivePackage.rulify)
(thsss ~~ elims ~~ cases_prems ~~ cases_prems');
val strong_induct =
if length names > 1 then
(strong_raw_induct, [ind_case_names, RuleCases.consumes 0])
else (strong_raw_induct RSN (2, rev_mp),
[ind_case_names, RuleCases.consumes 1]);
- val ([strong_induct'], thy') = thy |>
- Sign.add_path rec_name |>
- PureThy.add_thms [((Binding.name "strong_induct", #1 strong_induct), #2 strong_induct)];
+ val ((_, [strong_induct']), ctxt') = LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_induct"),
+ map (Attrib.internal o K) (#2 strong_induct)), [#1 strong_induct])
+ ctxt;
val strong_inducts =
ProjectRule.projects ctxt (1 upto length names) strong_induct'
in
- thy' |>
- PureThy.add_thmss [((Binding.name "strong_inducts", strong_inducts),
- [ind_case_names, RuleCases.consumes 1])] |> snd |>
- Sign.parent_path |>
- fold (fn ((name, elim), (_, cases)) =>
- Sign.add_path (Sign.base_name name) #>
- PureThy.add_thms [((Binding.name "strong_cases", elim),
- [RuleCases.case_names (map snd cases),
- RuleCases.consumes 1])] #> snd #>
- Sign.parent_path) (strong_cases ~~ induct_cases')
- end))
+ ctxt' |>
+ LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_inducts"),
+ [Attrib.internal (K ind_case_names),
+ Attrib.internal (K (RuleCases.consumes 1))]),
+ strong_inducts) |> snd |>
+ LocalTheory.notes Thm.theoremK (map (fn ((name, elim), (_, cases)) =>
+ ((Binding.name (NameSpace.qualified (Sign.base_name name) "strong_cases"),
+ [Attrib.internal (K (RuleCases.case_names (map snd cases))),
+ Attrib.internal (K (RuleCases.consumes 1))]), [([elim], [])]))
+ (strong_cases ~~ induct_cases')) |> snd
+ end)
(map (map (rulify_term thy #> rpair [])) vc_compat)
end;
-fun prove_eqvt s xatoms thy =
+fun prove_eqvt s xatoms ctxt =
let
- val ctxt = ProofContext.init thy;
+ val thy = ProofContext.theory_of ctxt;
val ({names, ...}, {raw_induct, intrs, elims, ...}) =
InductivePackage.the_inductive ctxt (Sign.intern_const thy s);
val raw_induct = atomize_induct ctxt raw_induct;
@@ -594,6 +597,7 @@
(s, ths ~~ InductivePackage.infer_intro_vars th k ths))
(InductivePackage.partition_rules raw_induct intrs ~~
InductivePackage.arities_of raw_induct ~~ elims));
+ val k = length (InductivePackage.params_of raw_induct);
val atoms' = NominalAtoms.atoms_of thy;
val atoms =
if null xatoms then atoms' else
@@ -612,19 +616,21 @@
(NominalThmDecls.get_eqvt_thms ctxt @ perm_pi_simp) addsimprocs
[mk_perm_bool_simproc names,
NominalPermeq.perm_simproc_app, NominalPermeq.perm_simproc_fun];
- val t = Logic.unvarify (concl_of raw_induct);
- val pi = Name.variant (OldTerm.add_term_names (t, [])) "pi";
+ val (([t], [pi]), ctxt') = ctxt |>
+ Variable.import_terms false [concl_of raw_induct] ||>>
+ Variable.variant_fixes ["pi"];
val ps = map (fst o HOLogic.dest_imp)
(HOLogic.dest_conj (HOLogic.dest_Trueprop t));
- fun eqvt_tac pi (intr, vs) st =
+ fun eqvt_tac ctxt'' pi (intr, vs) st =
let
- fun eqvt_err s = error
- ("Could not prove equivariance for introduction rule\n" ^
- Syntax.string_of_term_global (theory_of_thm intr)
- (Logic.unvarify (prop_of intr)) ^ "\n" ^ s);
+ fun eqvt_err s =
+ let val ([t], ctxt''') = Variable.import_terms true [prop_of intr] ctxt
+ in error ("Could not prove equivariance for introduction rule\n" ^
+ Syntax.string_of_term ctxt''' t ^ "\n" ^ s)
+ end;
val res = SUBPROOF (fn {prems, params, ...} =>
let
- val prems' = map (fn th => the_default th (map_thm ctxt
+ val prems' = map (fn th => the_default th (map_thm ctxt'
(split_conj (K I) names) (etac conjunct2 1) monos NONE th)) prems;
val prems'' = map (fn th => Simplifier.simplify eqvt_ss
(mk_perm_bool (cterm_of thy pi) th)) prems';
@@ -632,29 +638,36 @@
map (cterm_of thy o NominalPackage.mk_perm [] pi o term_of) params)
intr
in (rtac intr' THEN_ALL_NEW (TRY o resolve_tac prems'')) 1
- end) ctxt 1 st
+ end) ctxt' 1 st
in
case (Seq.pull res handle THM (s, _, _) => eqvt_err s) of
NONE => eqvt_err ("Rule does not match goal\n" ^
- Syntax.string_of_term_global (theory_of_thm st) (hd (prems_of st)))
+ Syntax.string_of_term ctxt'' (hd (prems_of st)))
| SOME (th, _) => Seq.single th
end;
val thss = map (fn atom =>
let val pi' = Free (pi, NominalAtoms.mk_permT (Type (atom, [])))
in map (fn th => zero_var_indexes (th RS mp))
- (DatatypeAux.split_conj_thm (Goal.prove_global thy [] []
+ (DatatypeAux.split_conj_thm (Goal.prove ctxt' [] []
(HOLogic.mk_Trueprop (foldr1 HOLogic.mk_conj (map (fn p =>
- HOLogic.mk_imp (p, list_comb
- (apsnd (map (NominalPackage.mk_perm [] pi')) (strip_comb p)))) ps)))
- (fn _ => EVERY (rtac raw_induct 1 :: map (fn intr_vs =>
+ let
+ val (h, ts) = strip_comb p;
+ val (ts1, ts2) = chop k ts
+ in
+ HOLogic.mk_imp (p, list_comb (h, ts1 @
+ map (NominalPackage.mk_perm [] pi') ts2))
+ end) ps)))
+ (fn {context, ...} => EVERY (rtac raw_induct 1 :: map (fn intr_vs =>
full_simp_tac eqvt_ss 1 THEN
- eqvt_tac pi' intr_vs) intrs'))))
+ eqvt_tac context pi' intr_vs) intrs')) |>
+ singleton (ProofContext.export ctxt' ctxt)))
end) atoms
in
- fold (fn (name, ths) =>
- Sign.add_path (Sign.base_name name) #>
- PureThy.add_thmss [((Binding.name "eqvt", ths), [NominalThmDecls.eqvt_add])] #> snd #>
- Sign.parent_path) (names ~~ transp thss) thy
+ ctxt |>
+ LocalTheory.notes Thm.theoremK (map (fn (name, ths) =>
+ ((Binding.name (NameSpace.qualified (Sign.base_name name) "eqvt"),
+ [Attrib.internal (K NominalThmDecls.eqvt_add)]), [(ths, [])]))
+ (names ~~ transp thss)) |> snd
end;
@@ -665,17 +678,17 @@
val _ = OuterKeyword.keyword "avoids";
val _ =
- OuterSyntax.command "nominal_inductive"
+ OuterSyntax.local_theory_to_proof "nominal_inductive"
"prove equivariance and strong induction theorem for inductive predicate involving nominal datatypes" K.thy_goal
- (P.name -- Scan.optional (P.$$$ "avoids" |-- P.and_list1 (P.name --
+ (P.xname -- Scan.optional (P.$$$ "avoids" |-- P.and_list1 (P.name --
(P.$$$ ":" |-- Scan.repeat1 P.name))) [] >> (fn (name, avoids) =>
- Toplevel.print o Toplevel.theory_to_proof (prove_strong_ind name avoids)));
+ prove_strong_ind name avoids));
val _ =
- OuterSyntax.command "equivariance"
+ OuterSyntax.local_theory "equivariance"
"prove equivariance for inductive predicate involving nominal datatypes" K.thy_decl
- (P.name -- Scan.optional (P.$$$ "[" |-- P.list1 P.name --| P.$$$ "]") [] >>
- (fn (name, atoms) => Toplevel.theory (prove_eqvt name atoms)));
+ (P.xname -- Scan.optional (P.$$$ "[" |-- P.list1 P.name --| P.$$$ "]") [] >>
+ (fn (name, atoms) => prove_eqvt name atoms));
end;
--- a/src/HOL/Nominal/nominal_inductive2.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/nominal_inductive2.ML Fri Feb 27 18:50:35 2009 +0100
@@ -8,7 +8,7 @@
signature NOMINAL_INDUCTIVE2 =
sig
- val prove_strong_ind: string -> (string * string list) list -> theory -> Proof.state
+ val prove_strong_ind: string -> (string * string list) list -> local_theory -> Proof.state
end
structure NominalInductive2 : NOMINAL_INDUCTIVE2 =
@@ -28,6 +28,13 @@
fun atomize_induct ctxt = Conv.fconv_rule (Conv.prems_conv ~1
(Conv.params_conv ~1 (K (Conv.prems_conv ~1 atomize_conv)) ctxt));
+val fresh_postprocess =
+ Simplifier.full_simplify (HOL_basic_ss addsimps
+ [@{thm fresh_star_set_eq}, @{thm fresh_star_Un_elim},
+ @{thm fresh_star_insert_elim}, @{thm fresh_star_empty_elim}]);
+
+fun preds_of ps t = gen_inter (op = o apfst dest_Free) (ps, Term.add_frees t []);
+
val perm_bool = mk_meta_eq (thm "perm_bool");
val perm_boolI = thm "perm_boolI";
val (_, [perm_boolI_pi, _]) = Drule.strip_comb (snd (Thm.dest_comb
@@ -148,9 +155,9 @@
map (Envir.subst_vars env #> cterm_of thy) vs ~~ cts) th
end;
-fun prove_strong_ind s avoids thy =
+fun prove_strong_ind s avoids ctxt =
let
- val ctxt = ProofContext.init thy;
+ val thy = ProofContext.theory_of ctxt;
val ({names, ...}, {raw_induct, intrs, elims, ...}) =
InductivePackage.the_inductive ctxt (Sign.intern_const thy s);
val ind_params = InductivePackage.params_of raw_induct;
@@ -166,8 +173,7 @@
(Induct.lookup_inductP ctxt (hd names)))));
val induct_cases' = if null induct_cases then replicate (length intrs) ""
else induct_cases;
- val raw_induct' = Logic.unvarify (prop_of raw_induct);
- val elims' = map (Logic.unvarify o prop_of) elims;
+ val ([raw_induct'], ctxt') = Variable.import_terms false [prop_of raw_induct] ctxt;
val concls = raw_induct' |> Logic.strip_imp_concl |> HOLogic.dest_Trueprop |>
HOLogic.dest_conj |> map (HOLogic.dest_imp ##> strip_comb);
val ps = map (fst o snd) concls;
@@ -191,12 +197,15 @@
handle TERM _ =>
error ("Expression " ^ quote s ^ " to be avoided in case " ^
quote name ^ " is not a set type");
- val ps = map mk sets
+ fun add_set p [] = [p]
+ | add_set (t, T) ((u, U) :: ps) =
+ if T = U then
+ let val S = HOLogic.mk_setT T
+ in (Const (@{const_name "op Un"}, S --> S --> S) $ u $ t, T) :: ps
+ end
+ else (u, U) :: add_set (t, T) ps
in
- case duplicates op = (map snd ps) of
- [] => ps
- | Ts => error ("More than one set in case " ^ quote name ^
- " for type(s) " ^ commas_quote (map (Syntax.string_of_typ ctxt') Ts))
+ fold (mk #> add_set) sets []
end;
val prems = map (fn (prem, name) =>
@@ -221,8 +230,8 @@
val ind_sort = if null atomTs then HOLogic.typeS
else Sign.certify_sort thy (map (fn a => Sign.intern_class thy
("fs_" ^ Sign.base_name a)) atoms);
- val fs_ctxt_tyname = Name.variant (map fst (OldTerm.term_tfrees raw_induct')) "'n";
- val fs_ctxt_name = Name.variant (OldTerm.add_term_names (raw_induct', [])) "z";
+ val ([fs_ctxt_tyname], _) = Name.variants ["'n"] (Variable.names_of ctxt');
+ val ([fs_ctxt_name], ctxt'') = Variable.variant_fixes ["z"] ctxt';
val fsT = TFree (fs_ctxt_tyname, ind_sort);
val inductive_forall_def' = Drule.instantiate'
@@ -253,7 +262,7 @@
val prem = Logic.list_implies
(map mk_fresh sets @
map (fn prem =>
- if null (OldTerm.term_frees prem inter ps) then prem
+ if null (preds_of ps prem) then prem
else lift_prem prem) prems,
HOLogic.mk_Trueprop (lift_pred p ts));
in abs_params params' prem end) prems);
@@ -276,7 +285,7 @@
val (vc_compat, vc_compat') = map (fn (params, sets, prems, (p, ts)) =>
map (fn q => abs_params params (incr_boundvars ~1 (Logic.list_implies
(List.mapPartial (fn prem =>
- if null (ps inter OldTerm.term_frees prem) then SOME prem
+ if null (preds_of ps prem) then SOME prem
else map_term (split_conj (K o I) names) prem prem) prems, q))))
(maps (fn (t, T) => map (fn (u, U) => HOLogic.mk_Trueprop
(NominalPackage.fresh_star_const U T $ u $ t)) sets)
@@ -345,8 +354,8 @@
ths1 @ ths, ths2 @ [th1], ths3 @ [th2'], ctxt')
end;
- fun mk_ind_proof thy thss =
- Goal.prove_global thy [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
+ fun mk_ind_proof ctxt' thss =
+ Goal.prove ctxt' [] prems' concl' (fn {prems = ihyps, context = ctxt} =>
let val th = Goal.prove ctxt [] [] concl (fn {context, ...} =>
rtac raw_induct 1 THEN
EVERY (maps (fn (((((_, sets, oprems, _),
@@ -363,7 +372,7 @@
fold_rev (NominalPackage.mk_perm []) pis t) sets';
val (P, ts) = strip_comb (HOLogic.dest_Trueprop (term_of concl));
val gprems1 = List.mapPartial (fn (th, t) =>
- if null (OldTerm.term_frees t inter ps) then SOME th
+ if null (preds_of ps t) then SOME th
else
map_thm ctxt' (split_conj (K o I) names)
(etac conjunct1 1) monos NONE th)
@@ -405,7 +414,7 @@
(fold_rev (mk_perm_bool o cterm_of thy)
(pis' @ pis) th));
val gprems2 = map (fn (th, t) =>
- if null (OldTerm.term_frees t inter ps) then mk_pi th
+ if null (preds_of ps t) then mk_pi th
else
mk_pi (the (map_thm ctxt (inst_conj_all names ps (rev pis''))
(inst_conj_all_tac (length pis'')) monos (SOME t) th)))
@@ -435,38 +444,42 @@
REPEAT (REPEAT (resolve_tac [conjI, impI] 1) THEN
etac impE 1 THEN atac 1 THEN REPEAT (etac @{thm allE_Nil} 1) THEN
asm_full_simp_tac (simpset_of thy) 1)
- end);
+ end) |>
+ fresh_postprocess |>
+ singleton (ProofContext.export ctxt' ctxt);
in
- thy |>
- ProofContext.init |>
- Proof.theorem_i NONE (fn thss => ProofContext.theory (fn thy =>
+ ctxt'' |>
+ Proof.theorem_i NONE (fn thss => fn ctxt =>
let
- val ctxt = ProofContext.init thy;
val rec_name = space_implode "_" (map Sign.base_name names);
+ val rec_qualified = Binding.qualify rec_name;
val ind_case_names = RuleCases.case_names induct_cases;
val induct_cases' = InductivePackage.partition_rules' raw_induct
(intrs ~~ induct_cases);
val thss' = map (map atomize_intr) thss;
val thsss = InductivePackage.partition_rules' raw_induct (intrs ~~ thss');
val strong_raw_induct =
- mk_ind_proof thy thss' |> InductivePackage.rulify;
+ mk_ind_proof ctxt thss' |> InductivePackage.rulify;
val strong_induct =
if length names > 1 then
(strong_raw_induct, [ind_case_names, RuleCases.consumes 0])
else (strong_raw_induct RSN (2, rev_mp),
[ind_case_names, RuleCases.consumes 1]);
- val ([strong_induct'], thy') = thy |>
- Sign.add_path rec_name |>
- PureThy.add_thms [((Binding.name "strong_induct", #1 strong_induct), #2 strong_induct)];
+ val ((_, [strong_induct']), ctxt') = LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_induct"),
+ map (Attrib.internal o K) (#2 strong_induct)), [#1 strong_induct])
+ ctxt;
val strong_inducts =
- ProjectRule.projects ctxt (1 upto length names) strong_induct'
+ ProjectRule.projects ctxt' (1 upto length names) strong_induct'
in
- thy' |>
- PureThy.add_thmss [((Binding.name "strong_inducts", strong_inducts),
- [ind_case_names, RuleCases.consumes 1])] |> snd |>
- Sign.parent_path
- end))
+ ctxt' |>
+ LocalTheory.note Thm.theoremK
+ ((rec_qualified (Binding.name "strong_inducts"),
+ [Attrib.internal (K ind_case_names),
+ Attrib.internal (K (RuleCases.consumes 1))]),
+ strong_inducts) |> snd
+ end)
(map (map (rulify_term thy #> rpair [])) vc_compat)
end;
@@ -476,11 +489,11 @@
local structure P = OuterParse and K = OuterKeyword in
val _ =
- OuterSyntax.command "nominal_inductive2"
+ OuterSyntax.local_theory_to_proof "nominal_inductive2"
"prove strong induction theorem for inductive predicate involving nominal datatypes" K.thy_goal
- (P.name -- Scan.optional (P.$$$ "avoids" |-- P.enum1 "|" (P.name --
+ (P.xname -- Scan.optional (P.$$$ "avoids" |-- P.enum1 "|" (P.name --
(P.$$$ ":" |-- P.and_list1 P.term))) [] >> (fn (name, avoids) =>
- Toplevel.print o Toplevel.theory_to_proof (prove_strong_ind name avoids)));
+ prove_strong_ind name avoids));
end;
--- a/src/HOL/Nominal/nominal_thmdecls.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Nominal/nominal_thmdecls.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,5 +1,4 @@
-(* ID: "$Id$"
- Authors: Julien Narboux and Christian Urban
+(* Authors: Julien Narboux and Christian Urban
This file introduces the infrastructure for the lemma
declaration "eqvts" "bijs" and "freshs".
@@ -63,10 +62,11 @@
then tac THEN print_tac ("after "^msg)
else tac
-fun tactic_eqvt ctx orig_thm pi typi =
+fun tactic_eqvt ctx orig_thm pi pi' =
let
- val mypi = Thm.cterm_of ctx (Var (pi,typi))
- val mypifree = Thm.cterm_of ctx (Const ("List.rev",typi --> typi) $ Free (fst pi,typi))
+ val mypi = Thm.cterm_of ctx pi
+ val T = fastype_of pi'
+ val mypifree = Thm.cterm_of ctx (Const ("List.rev", T --> T) $ pi')
val perm_pi_simp = PureThy.get_thms ctx "perm_pi_simp"
in
EVERY [tactic ("iffI applied",rtac iffI 1),
@@ -80,14 +80,19 @@
full_simp_tac (HOL_basic_ss addsimps perm_pi_simp) 1)]
end;
-fun get_derived_thm thy hyp concl orig_thm pi typi =
- let
- val lhs = (Const("Nominal.perm", typi --> HOLogic.boolT --> HOLogic.boolT) $ Var(pi,typi) $ hyp)
- val goal_term = Logic.unvarify (HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs,concl)))
- val _ = Display.print_cterm (cterm_of thy goal_term)
- in
- Goal.prove_global thy [] [] goal_term (fn _ => (tactic_eqvt thy orig_thm pi typi))
- end
+fun get_derived_thm ctxt hyp concl orig_thm pi typi =
+ let
+ val thy = ProofContext.theory_of ctxt;
+ val pi' = Var (pi, typi);
+ val lhs = Const ("Nominal.perm", typi --> HOLogic.boolT --> HOLogic.boolT) $ pi' $ hyp;
+ val ([goal_term, pi''], ctxt') = Variable.import_terms false
+ [HOLogic.mk_Trueprop (HOLogic.mk_eq (lhs, concl)), pi'] ctxt
+ val _ = Display.print_cterm (cterm_of thy goal_term)
+ in
+ Goal.prove ctxt' [] [] goal_term
+ (fn _ => tactic_eqvt thy orig_thm pi' pi'') |>
+ singleton (ProofContext.export ctxt' ctxt)
+ end
(* replaces every variable x in t with pi o x *)
fun apply_pi trm (pi,typi) =
@@ -145,7 +150,8 @@
if (apply_pi hyp (pi,typi) = concl)
then
(warning ("equivariance lemma of the relational form");
- [orig_thm, get_derived_thm thy hyp concl orig_thm pi typi])
+ [orig_thm,
+ get_derived_thm (Context.proof_of context) hyp concl orig_thm pi typi])
else raise EQVT_FORM "Type Implication"
end
(* case: eqvt-lemma is of the equational form *)
--- a/src/HOL/Orderings.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Orderings.thy Fri Feb 27 18:50:35 2009 +0100
@@ -331,7 +331,7 @@
fun struct_tac ((s, [eq, le, less]), thms) prems =
let
- fun decomp thy (Trueprop $ t) =
+ fun decomp thy (@{const Trueprop} $ t) =
let
fun excluded t =
(* exclude numeric types: linear arithmetic subsumes transitivity *)
@@ -350,7 +350,8 @@
of NONE => NONE
| SOME (t1, rel, t2) => SOME (t1, "~" ^ rel, t2))
| dec x = rel x;
- in dec t end;
+ in dec t end
+ | decomp thy _ = NONE;
in
case s of
"order" => Order_Tac.partial_tac decomp thms prems
--- a/src/HOL/RComplete.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/RComplete.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,8 +1,8 @@
-(* Title : HOL/RComplete.thy
- Author : Jacques D. Fleuriot, University of Edinburgh
- Author : Larry Paulson, University of Cambridge
- Author : Jeremy Avigad, Carnegie Mellon University
- Author : Florian Zuleger, Johannes Hoelzl, and Simon Funke, TU Muenchen
+(* Title: HOL/RComplete.thy
+ Author: Jacques D. Fleuriot, University of Edinburgh
+ Author: Larry Paulson, University of Cambridge
+ Author: Jeremy Avigad, Carnegie Mellon University
+ Author: Florian Zuleger, Johannes Hoelzl, and Simon Funke, TU Muenchen
*)
header {* Completeness of the Reals; Floor and Ceiling Functions *}
@@ -380,33 +380,28 @@
thus "\<exists>(n::nat). x < real n" ..
qed
+instance real :: archimedean_field
+proof
+ fix r :: real
+ obtain n :: nat where "r < real n"
+ using reals_Archimedean2 ..
+ then have "r \<le> of_int (int n)"
+ unfolding real_eq_of_nat by simp
+ then show "\<exists>z. r \<le> of_int z" ..
+qed
+
lemma reals_Archimedean3:
assumes x_greater_zero: "0 < x"
shows "\<forall>(y::real). \<exists>(n::nat). y < real n * x"
-proof
- fix y
- have x_not_zero: "x \<noteq> 0" using x_greater_zero by simp
- obtain n where "y * inverse x < real (n::nat)"
- using reals_Archimedean2 ..
- hence "y * inverse x * x < real n * x"
- using x_greater_zero by (simp add: mult_strict_right_mono)
- hence "x * inverse x * y < x * real n"
- by (simp add: algebra_simps)
- hence "y < real (n::nat) * x"
- using x_not_zero by (simp add: algebra_simps)
- thus "\<exists>(n::nat). y < real n * x" ..
-qed
+ unfolding real_of_nat_def using `0 < x`
+ by (auto intro: ex_less_of_nat_mult)
lemma reals_Archimedean6:
"0 \<le> r ==> \<exists>(n::nat). real (n - 1) \<le> r & r < real (n)"
-apply (insert reals_Archimedean2 [of r], safe)
-apply (subgoal_tac "\<exists>x::nat. r < real x \<and> (\<forall>y. r < real y \<longrightarrow> x \<le> y)", auto)
-apply (rule_tac x = x in exI)
-apply (case_tac x, simp)
-apply (rename_tac x')
-apply (drule_tac x = x' in spec, simp)
-apply (rule_tac x="LEAST n. r < real n" in exI, safe)
-apply (erule LeastI, erule Least_le)
+unfolding real_of_nat_def
+apply (rule exI [where x="nat (floor r + 1)"])
+apply (insert floor_correct [of r])
+apply (simp add: nat_add_distrib of_nat_nat)
done
lemma reals_Archimedean6a: "0 \<le> r ==> \<exists>n. real (n) \<le> r & r < real (Suc n)"
@@ -414,19 +409,11 @@
lemma reals_Archimedean_6b_int:
"0 \<le> r ==> \<exists>n::int. real n \<le> r & r < real (n+1)"
-apply (drule reals_Archimedean6a, auto)
-apply (rule_tac x = "int n" in exI)
-apply (simp add: real_of_int_real_of_nat real_of_nat_Suc)
-done
+ unfolding real_of_int_def by (rule floor_exists)
lemma reals_Archimedean_6c_int:
"r < 0 ==> \<exists>n::int. real n \<le> r & r < real (n+1)"
-apply (rule reals_Archimedean_6b_int [of "-r", THEN exE], simp, auto)
-apply (rename_tac n)
-apply (drule order_le_imp_less_or_eq, auto)
-apply (rule_tac x = "- n - 1" in exI)
-apply (rule_tac [2] x = "- n" in exI, auto)
-done
+ unfolding real_of_int_def by (rule floor_exists)
subsection{*Density of the Rational Reals in the Reals*}
@@ -485,23 +472,6 @@
subsection{*Floor and Ceiling Functions from the Reals to the Integers*}
-definition
- floor :: "real => int" where
- [code del]: "floor r = (LEAST n::int. r < real (n+1))"
-
-definition
- ceiling :: "real => int" where
- "ceiling r = - floor (- r)"
-
-notation (xsymbols)
- floor ("\<lfloor>_\<rfloor>") and
- ceiling ("\<lceil>_\<rceil>")
-
-notation (HTML output)
- floor ("\<lfloor>_\<rfloor>") and
- ceiling ("\<lceil>_\<rceil>")
-
-
lemma number_of_less_real_of_int_iff [simp]:
"((number_of n) < real (m::int)) = (number_of n < m)"
apply auto
@@ -524,51 +494,23 @@
"(real (m::int) \<le> (number_of n)) = (m \<le> number_of n)"
by (simp add: linorder_not_less [symmetric])
-lemma floor_zero [simp]: "floor 0 = 0"
-apply (simp add: floor_def del: real_of_int_add)
-apply (rule Least_equality)
-apply simp_all
-done
-
-lemma floor_real_of_nat_zero [simp]: "floor (real (0::nat)) = 0"
-by auto
+lemma floor_real_of_nat_zero: "floor (real (0::nat)) = 0"
+by auto (* delete? *)
lemma floor_real_of_nat [simp]: "floor (real (n::nat)) = int n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply (drule_tac [2] real_of_int_of_nat_eq [THEN ssubst])
-apply (drule_tac [2] real_of_int_less_iff [THEN iffD1])
-apply simp_all
-done
+unfolding real_of_nat_def by simp
lemma floor_minus_real_of_nat [simp]: "floor (- real (n::nat)) = - int n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply (drule_tac [2] real_of_int_of_nat_eq [THEN ssubst])
-apply (drule_tac [2] real_of_int_minus [THEN sym, THEN subst])
-apply (drule_tac [2] real_of_int_less_iff [THEN iffD1])
-apply simp_all
-done
+unfolding real_of_nat_def by (simp add: floor_minus)
lemma floor_real_of_int [simp]: "floor (real (n::int)) = n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply auto
-done
+unfolding real_of_int_def by simp
lemma floor_minus_real_of_int [simp]: "floor (- real (n::int)) = - n"
-apply (simp only: floor_def)
-apply (rule Least_equality)
-apply (drule_tac [2] real_of_int_minus [THEN sym, THEN subst])
-apply auto
-done
+unfolding real_of_int_def by (simp add: floor_minus)
lemma real_lb_ub_int: " \<exists>n::int. real n \<le> r & r < real (n+1)"
-apply (case_tac "r < 0")
-apply (blast intro: reals_Archimedean_6c_int)
-apply (simp only: linorder_not_less)
-apply (blast intro: reals_Archimedean_6b_int reals_Archimedean_6c_int)
-done
+unfolding real_of_int_def by (rule floor_exists)
lemma lemma_floor:
assumes a1: "real m \<le> r" and a2: "r < real n + 1"
@@ -581,48 +523,20 @@
qed
lemma real_of_int_floor_le [simp]: "real (floor r) \<le> r"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of r], safe)
-apply (rule theI2)
-apply auto
-done
-
-lemma floor_mono: "x < y ==> floor x \<le> floor y"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of x])
-apply (insert real_lb_ub_int [of y], safe)
-apply (rule theI2)
-apply (rule_tac [3] theI2)
-apply simp
-apply (erule conjI)
-apply (auto simp add: order_eq_iff int_le_real_less)
-done
-
-lemma floor_mono2: "x \<le> y ==> floor x \<le> floor y"
-by (auto dest: order_le_imp_less_or_eq simp add: floor_mono)
+unfolding real_of_int_def by (rule of_int_floor_le)
lemma lemma_floor2: "real n < real (x::int) + 1 ==> n \<le> x"
by (auto intro: lemma_floor)
lemma real_of_int_floor_cancel [simp]:
"(real (floor x) = x) = (\<exists>n::int. x = real n)"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of x], erule exE)
-apply (rule theI2)
-apply (auto intro: lemma_floor)
-done
+ using floor_real_of_int by metis
lemma floor_eq: "[| real n < x; x < real n + 1 |] ==> floor x = n"
-apply (simp add: floor_def)
-apply (rule Least_equality)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def using floor_unique [of n x] by simp
lemma floor_eq2: "[| real n \<le> x; x < real n + 1 |] ==> floor x = n"
-apply (simp add: floor_def)
-apply (rule Least_equality)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def by (rule floor_unique)
lemma floor_eq3: "[| real n < x; x < real (Suc n) |] ==> nat(floor x) = n"
apply (rule inj_int [THEN injD])
@@ -635,353 +549,205 @@
apply (auto intro: floor_eq3)
done
-lemma floor_number_of_eq [simp]:
+lemma floor_number_of_eq:
"floor(number_of n :: real) = (number_of n :: int)"
-apply (subst real_number_of [symmetric])
-apply (rule floor_real_of_int)
-done
-
-lemma floor_one [simp]: "floor 1 = 1"
- apply (rule trans)
- prefer 2
- apply (rule floor_real_of_int)
- apply simp
-done
+ by (rule floor_number_of) (* already declared [simp] *)
lemma real_of_int_floor_ge_diff_one [simp]: "r - 1 \<le> real(floor r)"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of r], safe)
-apply (rule theI2)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma real_of_int_floor_gt_diff_one [simp]: "r - 1 < real(floor r)"
-apply (simp add: floor_def Least_def)
-apply (insert real_lb_ub_int [of r], safe)
-apply (rule theI2)
-apply (auto intro: lemma_floor)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma real_of_int_floor_add_one_ge [simp]: "r \<le> real(floor r) + 1"
-apply (insert real_of_int_floor_ge_diff_one [of r])
-apply (auto simp del: real_of_int_floor_ge_diff_one)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma real_of_int_floor_add_one_gt [simp]: "r < real(floor r) + 1"
-apply (insert real_of_int_floor_gt_diff_one [of r])
-apply (auto simp del: real_of_int_floor_gt_diff_one)
-done
+ unfolding real_of_int_def using floor_correct [of r] by simp
lemma le_floor: "real a <= x ==> a <= floor x"
- apply (subgoal_tac "a < floor x + 1")
- apply arith
- apply (subst real_of_int_less_iff [THEN sym])
- apply simp
- apply (insert real_of_int_floor_add_one_gt [of x])
- apply arith
-done
+ unfolding real_of_int_def by (simp add: le_floor_iff)
lemma real_le_floor: "a <= floor x ==> real a <= x"
- apply (rule order_trans)
- prefer 2
- apply (rule real_of_int_floor_le)
- apply (subst real_of_int_le_iff)
- apply assumption
-done
+ unfolding real_of_int_def by (simp add: le_floor_iff)
lemma le_floor_eq: "(a <= floor x) = (real a <= x)"
- apply (rule iffI)
- apply (erule real_le_floor)
- apply (erule le_floor)
-done
+ unfolding real_of_int_def by (rule le_floor_iff)
-lemma le_floor_eq_number_of [simp]:
+lemma le_floor_eq_number_of:
"(number_of n <= floor x) = (number_of n <= x)"
-by (simp add: le_floor_eq)
+ by (rule number_of_le_floor) (* already declared [simp] *)
-lemma le_floor_eq_zero [simp]: "(0 <= floor x) = (0 <= x)"
-by (simp add: le_floor_eq)
+lemma le_floor_eq_zero: "(0 <= floor x) = (0 <= x)"
+ by (rule zero_le_floor) (* already declared [simp] *)
-lemma le_floor_eq_one [simp]: "(1 <= floor x) = (1 <= x)"
-by (simp add: le_floor_eq)
+lemma le_floor_eq_one: "(1 <= floor x) = (1 <= x)"
+ by (rule one_le_floor) (* already declared [simp] *)
lemma floor_less_eq: "(floor x < a) = (x < real a)"
- apply (subst linorder_not_le [THEN sym])+
- apply simp
- apply (rule le_floor_eq)
-done
+ unfolding real_of_int_def by (rule floor_less_iff)
-lemma floor_less_eq_number_of [simp]:
+lemma floor_less_eq_number_of:
"(floor x < number_of n) = (x < number_of n)"
-by (simp add: floor_less_eq)
+ by (rule floor_less_number_of) (* already declared [simp] *)
-lemma floor_less_eq_zero [simp]: "(floor x < 0) = (x < 0)"
-by (simp add: floor_less_eq)
+lemma floor_less_eq_zero: "(floor x < 0) = (x < 0)"
+ by (rule floor_less_zero) (* already declared [simp] *)
-lemma floor_less_eq_one [simp]: "(floor x < 1) = (x < 1)"
-by (simp add: floor_less_eq)
+lemma floor_less_eq_one: "(floor x < 1) = (x < 1)"
+ by (rule floor_less_one) (* already declared [simp] *)
lemma less_floor_eq: "(a < floor x) = (real a + 1 <= x)"
- apply (insert le_floor_eq [of "a + 1" x])
- apply auto
-done
+ unfolding real_of_int_def by (rule less_floor_iff)
-lemma less_floor_eq_number_of [simp]:
+lemma less_floor_eq_number_of:
"(number_of n < floor x) = (number_of n + 1 <= x)"
-by (simp add: less_floor_eq)
+ by (rule number_of_less_floor) (* already declared [simp] *)
-lemma less_floor_eq_zero [simp]: "(0 < floor x) = (1 <= x)"
-by (simp add: less_floor_eq)
+lemma less_floor_eq_zero: "(0 < floor x) = (1 <= x)"
+ by (rule zero_less_floor) (* already declared [simp] *)
-lemma less_floor_eq_one [simp]: "(1 < floor x) = (2 <= x)"
-by (simp add: less_floor_eq)
+lemma less_floor_eq_one: "(1 < floor x) = (2 <= x)"
+ by (rule one_less_floor) (* already declared [simp] *)
lemma floor_le_eq: "(floor x <= a) = (x < real a + 1)"
- apply (insert floor_less_eq [of x "a + 1"])
- apply auto
-done
+ unfolding real_of_int_def by (rule floor_le_iff)
-lemma floor_le_eq_number_of [simp]:
+lemma floor_le_eq_number_of:
"(floor x <= number_of n) = (x < number_of n + 1)"
-by (simp add: floor_le_eq)
+ by (rule floor_le_number_of) (* already declared [simp] *)
-lemma floor_le_eq_zero [simp]: "(floor x <= 0) = (x < 1)"
-by (simp add: floor_le_eq)
+lemma floor_le_eq_zero: "(floor x <= 0) = (x < 1)"
+ by (rule floor_le_zero) (* already declared [simp] *)
-lemma floor_le_eq_one [simp]: "(floor x <= 1) = (x < 2)"
-by (simp add: floor_le_eq)
+lemma floor_le_eq_one: "(floor x <= 1) = (x < 2)"
+ by (rule floor_le_one) (* already declared [simp] *)
lemma floor_add [simp]: "floor (x + real a) = floor x + a"
- apply (subst order_eq_iff)
- apply (rule conjI)
- prefer 2
- apply (subgoal_tac "floor x + a < floor (x + real a) + 1")
- apply arith
- apply (subst real_of_int_less_iff [THEN sym])
- apply simp
- apply (subgoal_tac "x + real a < real(floor(x + real a)) + 1")
- apply (subgoal_tac "real (floor x) <= x")
- apply arith
- apply (rule real_of_int_floor_le)
- apply (rule real_of_int_floor_add_one_gt)
- apply (subgoal_tac "floor (x + real a) < floor x + a + 1")
- apply arith
- apply (subst real_of_int_less_iff [THEN sym])
- apply simp
- apply (subgoal_tac "real(floor(x + real a)) <= x + real a")
- apply (subgoal_tac "x < real(floor x) + 1")
- apply arith
- apply (rule real_of_int_floor_add_one_gt)
- apply (rule real_of_int_floor_le)
-done
-
-lemma floor_add_number_of [simp]:
- "floor (x + number_of n) = floor x + number_of n"
- apply (subst floor_add [THEN sym])
- apply simp
-done
-
-lemma floor_add_one [simp]: "floor (x + 1) = floor x + 1"
- apply (subst floor_add [THEN sym])
- apply simp
-done
+ unfolding real_of_int_def by (rule floor_add_of_int)
lemma floor_subtract [simp]: "floor (x - real a) = floor x - a"
- apply (subst diff_minus)+
- apply (subst real_of_int_minus [THEN sym])
- apply (rule floor_add)
-done
+ unfolding real_of_int_def by (rule floor_diff_of_int)
-lemma floor_subtract_number_of [simp]: "floor (x - number_of n) =
+lemma floor_subtract_number_of: "floor (x - number_of n) =
floor x - number_of n"
- apply (subst floor_subtract [THEN sym])
- apply simp
-done
+ by (rule floor_diff_number_of) (* already declared [simp] *)
-lemma floor_subtract_one [simp]: "floor (x - 1) = floor x - 1"
- apply (subst floor_subtract [THEN sym])
- apply simp
-done
-
-lemma ceiling_zero [simp]: "ceiling 0 = 0"
-by (simp add: ceiling_def)
+lemma floor_subtract_one: "floor (x - 1) = floor x - 1"
+ by (rule floor_diff_one) (* already declared [simp] *)
lemma ceiling_real_of_nat [simp]: "ceiling (real (n::nat)) = int n"
-by (simp add: ceiling_def)
+ unfolding real_of_nat_def by simp
-lemma ceiling_real_of_nat_zero [simp]: "ceiling (real (0::nat)) = 0"
-by auto
+lemma ceiling_real_of_nat_zero: "ceiling (real (0::nat)) = 0"
+by auto (* delete? *)
lemma ceiling_floor [simp]: "ceiling (real (floor r)) = floor r"
-by (simp add: ceiling_def)
+ unfolding real_of_int_def by simp
lemma floor_ceiling [simp]: "floor (real (ceiling r)) = ceiling r"
-by (simp add: ceiling_def)
+ unfolding real_of_int_def by simp
lemma real_of_int_ceiling_ge [simp]: "r \<le> real (ceiling r)"
-apply (simp add: ceiling_def)
-apply (subst le_minus_iff, simp)
-done
+ unfolding real_of_int_def by (rule le_of_int_ceiling)
-lemma ceiling_mono: "x < y ==> ceiling x \<le> ceiling y"
-by (simp add: floor_mono ceiling_def)
-
-lemma ceiling_mono2: "x \<le> y ==> ceiling x \<le> ceiling y"
-by (simp add: floor_mono2 ceiling_def)
+lemma ceiling_real_of_int [simp]: "ceiling (real (n::int)) = n"
+ unfolding real_of_int_def by simp
lemma real_of_int_ceiling_cancel [simp]:
"(real (ceiling x) = x) = (\<exists>n::int. x = real n)"
-apply (auto simp add: ceiling_def)
-apply (drule arg_cong [where f = uminus], auto)
-apply (rule_tac x = "-n" in exI, auto)
-done
+ using ceiling_real_of_int by metis
lemma ceiling_eq: "[| real n < x; x < real n + 1 |] ==> ceiling x = n + 1"
-apply (simp add: ceiling_def)
-apply (rule minus_equation_iff [THEN iffD1])
-apply (simp add: floor_eq [where n = "-(n+1)"])
-done
+ unfolding real_of_int_def using ceiling_unique [of "n + 1" x] by simp
lemma ceiling_eq2: "[| real n < x; x \<le> real n + 1 |] ==> ceiling x = n + 1"
-by (simp add: ceiling_def floor_eq2 [where n = "-(n+1)"])
+ unfolding real_of_int_def using ceiling_unique [of "n + 1" x] by simp
lemma ceiling_eq3: "[| real n - 1 < x; x \<le> real n |] ==> ceiling x = n"
-by (simp add: ceiling_def floor_eq2 [where n = "-n"])
+ unfolding real_of_int_def using ceiling_unique [of n x] by simp
-lemma ceiling_real_of_int [simp]: "ceiling (real (n::int)) = n"
-by (simp add: ceiling_def)
-
-lemma ceiling_number_of_eq [simp]:
+lemma ceiling_number_of_eq:
"ceiling (number_of n :: real) = (number_of n)"
-apply (subst real_number_of [symmetric])
-apply (rule ceiling_real_of_int)
-done
-
-lemma ceiling_one [simp]: "ceiling 1 = 1"
- by (unfold ceiling_def, simp)
+ by (rule ceiling_number_of) (* already declared [simp] *)
lemma real_of_int_ceiling_diff_one_le [simp]: "real (ceiling r) - 1 \<le> r"
-apply (rule neg_le_iff_le [THEN iffD1])
-apply (simp add: ceiling_def diff_minus)
-done
+ unfolding real_of_int_def using ceiling_correct [of r] by simp
lemma real_of_int_ceiling_le_add_one [simp]: "real (ceiling r) \<le> r + 1"
-apply (insert real_of_int_ceiling_diff_one_le [of r])
-apply (simp del: real_of_int_ceiling_diff_one_le)
-done
+ unfolding real_of_int_def using ceiling_correct [of r] by simp
lemma ceiling_le: "x <= real a ==> ceiling x <= a"
- apply (unfold ceiling_def)
- apply (subgoal_tac "-a <= floor(- x)")
- apply simp
- apply (rule le_floor)
- apply simp
-done
+ unfolding real_of_int_def by (simp add: ceiling_le_iff)
lemma ceiling_le_real: "ceiling x <= a ==> x <= real a"
- apply (unfold ceiling_def)
- apply (subgoal_tac "real(- a) <= - x")
- apply simp
- apply (rule real_le_floor)
- apply simp
-done
+ unfolding real_of_int_def by (simp add: ceiling_le_iff)
lemma ceiling_le_eq: "(ceiling x <= a) = (x <= real a)"
- apply (rule iffI)
- apply (erule ceiling_le_real)
- apply (erule ceiling_le)
-done
+ unfolding real_of_int_def by (rule ceiling_le_iff)
-lemma ceiling_le_eq_number_of [simp]:
+lemma ceiling_le_eq_number_of:
"(ceiling x <= number_of n) = (x <= number_of n)"
-by (simp add: ceiling_le_eq)
+ by (rule ceiling_le_number_of) (* already declared [simp] *)
-lemma ceiling_le_zero_eq [simp]: "(ceiling x <= 0) = (x <= 0)"
-by (simp add: ceiling_le_eq)
+lemma ceiling_le_zero_eq: "(ceiling x <= 0) = (x <= 0)"
+ by (rule ceiling_le_zero) (* already declared [simp] *)
-lemma ceiling_le_eq_one [simp]: "(ceiling x <= 1) = (x <= 1)"
-by (simp add: ceiling_le_eq)
+lemma ceiling_le_eq_one: "(ceiling x <= 1) = (x <= 1)"
+ by (rule ceiling_le_one) (* already declared [simp] *)
lemma less_ceiling_eq: "(a < ceiling x) = (real a < x)"
- apply (subst linorder_not_le [THEN sym])+
- apply simp
- apply (rule ceiling_le_eq)
-done
+ unfolding real_of_int_def by (rule less_ceiling_iff)
-lemma less_ceiling_eq_number_of [simp]:
+lemma less_ceiling_eq_number_of:
"(number_of n < ceiling x) = (number_of n < x)"
-by (simp add: less_ceiling_eq)
+ by (rule number_of_less_ceiling) (* already declared [simp] *)
-lemma less_ceiling_eq_zero [simp]: "(0 < ceiling x) = (0 < x)"
-by (simp add: less_ceiling_eq)
+lemma less_ceiling_eq_zero: "(0 < ceiling x) = (0 < x)"
+ by (rule zero_less_ceiling) (* already declared [simp] *)
-lemma less_ceiling_eq_one [simp]: "(1 < ceiling x) = (1 < x)"
-by (simp add: less_ceiling_eq)
+lemma less_ceiling_eq_one: "(1 < ceiling x) = (1 < x)"
+ by (rule one_less_ceiling) (* already declared [simp] *)
lemma ceiling_less_eq: "(ceiling x < a) = (x <= real a - 1)"
- apply (insert ceiling_le_eq [of x "a - 1"])
- apply auto
-done
+ unfolding real_of_int_def by (rule ceiling_less_iff)
-lemma ceiling_less_eq_number_of [simp]:
+lemma ceiling_less_eq_number_of:
"(ceiling x < number_of n) = (x <= number_of n - 1)"
-by (simp add: ceiling_less_eq)
+ by (rule ceiling_less_number_of) (* already declared [simp] *)
-lemma ceiling_less_eq_zero [simp]: "(ceiling x < 0) = (x <= -1)"
-by (simp add: ceiling_less_eq)
+lemma ceiling_less_eq_zero: "(ceiling x < 0) = (x <= -1)"
+ by (rule ceiling_less_zero) (* already declared [simp] *)
-lemma ceiling_less_eq_one [simp]: "(ceiling x < 1) = (x <= 0)"
-by (simp add: ceiling_less_eq)
+lemma ceiling_less_eq_one: "(ceiling x < 1) = (x <= 0)"
+ by (rule ceiling_less_one) (* already declared [simp] *)
lemma le_ceiling_eq: "(a <= ceiling x) = (real a - 1 < x)"
- apply (insert less_ceiling_eq [of "a - 1" x])
- apply auto
-done
+ unfolding real_of_int_def by (rule le_ceiling_iff)
-lemma le_ceiling_eq_number_of [simp]:
+lemma le_ceiling_eq_number_of:
"(number_of n <= ceiling x) = (number_of n - 1 < x)"
-by (simp add: le_ceiling_eq)
+ by (rule number_of_le_ceiling) (* already declared [simp] *)
-lemma le_ceiling_eq_zero [simp]: "(0 <= ceiling x) = (-1 < x)"
-by (simp add: le_ceiling_eq)
+lemma le_ceiling_eq_zero: "(0 <= ceiling x) = (-1 < x)"
+ by (rule zero_le_ceiling) (* already declared [simp] *)
-lemma le_ceiling_eq_one [simp]: "(1 <= ceiling x) = (0 < x)"
-by (simp add: le_ceiling_eq)
+lemma le_ceiling_eq_one: "(1 <= ceiling x) = (0 < x)"
+ by (rule one_le_ceiling) (* already declared [simp] *)
lemma ceiling_add [simp]: "ceiling (x + real a) = ceiling x + a"
- apply (unfold ceiling_def, simp)
- apply (subst real_of_int_minus [THEN sym])
- apply (subst floor_add)
- apply simp
-done
-
-lemma ceiling_add_number_of [simp]: "ceiling (x + number_of n) =
- ceiling x + number_of n"
- apply (subst ceiling_add [THEN sym])
- apply simp
-done
-
-lemma ceiling_add_one [simp]: "ceiling (x + 1) = ceiling x + 1"
- apply (subst ceiling_add [THEN sym])
- apply simp
-done
+ unfolding real_of_int_def by (rule ceiling_add_of_int)
lemma ceiling_subtract [simp]: "ceiling (x - real a) = ceiling x - a"
- apply (subst diff_minus)+
- apply (subst real_of_int_minus [THEN sym])
- apply (rule ceiling_add)
-done
+ unfolding real_of_int_def by (rule ceiling_diff_of_int)
-lemma ceiling_subtract_number_of [simp]: "ceiling (x - number_of n) =
+lemma ceiling_subtract_number_of: "ceiling (x - number_of n) =
ceiling x - number_of n"
- apply (subst ceiling_subtract [THEN sym])
- apply simp
-done
+ by (rule ceiling_diff_number_of) (* already declared [simp] *)
-lemma ceiling_subtract_one [simp]: "ceiling (x - 1) = ceiling x - 1"
- apply (subst ceiling_subtract [THEN sym])
- apply simp
-done
+lemma ceiling_subtract_one: "ceiling (x - 1) = ceiling x - 1"
+ by (rule ceiling_diff_one) (* already declared [simp] *)
+
subsection {* Versions for the natural numbers *}
@@ -1015,7 +781,7 @@
apply (unfold natfloor_def)
apply (subgoal_tac "floor x <= floor 0")
apply simp
- apply (erule floor_mono2)
+ apply (erule floor_mono)
done
lemma natfloor_mono: "x <= y ==> natfloor x <= natfloor y"
@@ -1023,7 +789,7 @@
apply (subst natfloor_def)+
apply (subst nat_le_eq_zle)
apply force
- apply (erule floor_mono2)
+ apply (erule floor_mono)
apply (subst natfloor_neg)
apply simp
apply simp
@@ -1144,7 +910,7 @@
apply (subst real_nat_eq_real)
apply (subgoal_tac "ceiling 0 <= ceiling x")
apply simp
- apply (rule ceiling_mono2)
+ apply (rule ceiling_mono)
apply simp
apply simp
done
@@ -1165,7 +931,7 @@
apply simp
apply (erule order_trans)
apply simp
- apply (erule ceiling_mono2)
+ apply (erule ceiling_mono)
apply (subst natceiling_neg)
apply simp_all
done
@@ -1215,7 +981,7 @@
apply (subst eq_nat_nat_iff)
apply (subgoal_tac "ceiling 0 <= ceiling x")
apply simp
- apply (rule ceiling_mono2)
+ apply (rule ceiling_mono)
apply force
apply force
apply (rule ceiling_eq2)
@@ -1233,7 +999,7 @@
apply (subst nat_add_distrib)
apply (subgoal_tac "0 = ceiling 0")
apply (erule ssubst)
- apply (erule ceiling_mono2)
+ apply (erule ceiling_mono)
apply simp_all
done
--- a/src/HOL/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/ROOT.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,7 +1,5 @@
(* Classical Higher-order Logic -- batteries included *)
-use_thy "Main";
-share_common_data ();
use_thy "Complex_Main";
val HOL_proofs = ! Proofterm.proofs;
--- a/src/HOL/Rational.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Rational.thy Fri Feb 27 18:50:35 2009 +0100
@@ -5,7 +5,7 @@
header {* Rational numbers *}
theory Rational
-imports GCD
+imports GCD Archimedean_Field
uses ("Tools/rat_arith.ML")
begin
@@ -255,7 +255,6 @@
with `b \<noteq> 0` have "a \<noteq> 0" by (simp add: Zero_rat_def eq_rat)
with Fract `q = Fract a b` `b \<noteq> 0` show C by auto
qed
-
subsubsection {* The field of rational numbers *}
@@ -532,8 +531,67 @@
qed
lemma zero_less_Fract_iff:
- "0 < b ==> (0 < Fract a b) = (0 < a)"
-by (simp add: Zero_rat_def order_less_imp_not_eq2 zero_less_mult_iff)
+ "0 < b \<Longrightarrow> 0 < Fract a b \<longleftrightarrow> 0 < a"
+ by (simp add: Zero_rat_def zero_less_mult_iff)
+
+lemma Fract_less_zero_iff:
+ "0 < b \<Longrightarrow> Fract a b < 0 \<longleftrightarrow> a < 0"
+ by (simp add: Zero_rat_def mult_less_0_iff)
+
+lemma zero_le_Fract_iff:
+ "0 < b \<Longrightarrow> 0 \<le> Fract a b \<longleftrightarrow> 0 \<le> a"
+ by (simp add: Zero_rat_def zero_le_mult_iff)
+
+lemma Fract_le_zero_iff:
+ "0 < b \<Longrightarrow> Fract a b \<le> 0 \<longleftrightarrow> a \<le> 0"
+ by (simp add: Zero_rat_def mult_le_0_iff)
+
+lemma one_less_Fract_iff:
+ "0 < b \<Longrightarrow> 1 < Fract a b \<longleftrightarrow> b < a"
+ by (simp add: One_rat_def mult_less_cancel_right_disj)
+
+lemma Fract_less_one_iff:
+ "0 < b \<Longrightarrow> Fract a b < 1 \<longleftrightarrow> a < b"
+ by (simp add: One_rat_def mult_less_cancel_right_disj)
+
+lemma one_le_Fract_iff:
+ "0 < b \<Longrightarrow> 1 \<le> Fract a b \<longleftrightarrow> b \<le> a"
+ by (simp add: One_rat_def mult_le_cancel_right)
+
+lemma Fract_le_one_iff:
+ "0 < b \<Longrightarrow> Fract a b \<le> 1 \<longleftrightarrow> a \<le> b"
+ by (simp add: One_rat_def mult_le_cancel_right)
+
+
+subsubsection {* Rationals are an Archimedean field *}
+
+lemma rat_floor_lemma:
+ assumes "0 < b"
+ shows "of_int (a div b) \<le> Fract a b \<and> Fract a b < of_int (a div b + 1)"
+proof -
+ have "Fract a b = of_int (a div b) + Fract (a mod b) b"
+ using `0 < b` by (simp add: of_int_rat)
+ moreover have "0 \<le> Fract (a mod b) b \<and> Fract (a mod b) b < 1"
+ using `0 < b` by (simp add: zero_le_Fract_iff Fract_less_one_iff)
+ ultimately show ?thesis by simp
+qed
+
+instance rat :: archimedean_field
+proof
+ fix r :: rat
+ show "\<exists>z. r \<le> of_int z"
+ proof (induct r)
+ case (Fract a b)
+ then have "Fract a b \<le> of_int (a div b + 1)"
+ using rat_floor_lemma [of b a] by simp
+ then show "\<exists>z. Fract a b \<le> of_int z" ..
+ qed
+qed
+
+lemma floor_Fract:
+ assumes "0 < b" shows "floor (Fract a b) = a div b"
+ using rat_floor_lemma [OF `0 < b`, of a]
+ by (simp add: floor_unique)
subsection {* Arithmetic setup *}
--- a/src/HOL/RealDef.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/RealDef.thy Fri Feb 27 18:50:35 2009 +0100
@@ -705,6 +705,9 @@
lemma real_of_nat_zero [simp]: "real (0::nat) = 0"
by (simp add: real_of_nat_def)
+lemma real_of_nat_1 [simp]: "real (1::nat) = 1"
+by (simp add: real_of_nat_def)
+
lemma real_of_nat_one [simp]: "real (Suc 0) = (1::real)"
by (simp add: real_of_nat_def)
--- a/src/HOL/RealPow.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/RealPow.thy Fri Feb 27 18:50:35 2009 +0100
@@ -44,7 +44,8 @@
by (insert power_decreasing [of 1 "Suc n" r], simp)
lemma realpow_minus_mult [rule_format]:
- "0 < n --> (x::real) ^ (n - 1) * x = x ^ n"
+ "0 < n --> (x::real) ^ (n - 1) * x = x ^ n"
+unfolding One_nat_def
apply (simp split add: nat_diff_split)
done
--- a/src/HOL/SEQ.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/SEQ.thy Fri Feb 27 18:50:35 2009 +0100
@@ -338,10 +338,10 @@
done
lemma LIMSEQ_Suc: "f ----> l \<Longrightarrow> (\<lambda>n. f (Suc n)) ----> l"
-by (drule_tac k="1" in LIMSEQ_ignore_initial_segment, simp)
+by (drule_tac k="Suc 0" in LIMSEQ_ignore_initial_segment, simp)
lemma LIMSEQ_imp_Suc: "(\<lambda>n. f (Suc n)) ----> l \<Longrightarrow> f ----> l"
-by (rule_tac k="1" in LIMSEQ_offset, simp)
+by (rule_tac k="Suc 0" in LIMSEQ_offset, simp)
lemma LIMSEQ_Suc_iff: "(\<lambda>n. f (Suc n)) ----> l = f ----> l"
by (blast intro: LIMSEQ_imp_Suc LIMSEQ_Suc)
--- a/src/HOL/Series.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Series.thy Fri Feb 27 18:50:35 2009 +0100
@@ -312,6 +312,7 @@
shows "\<lbrakk>summable f;
\<forall>d. 0 < f (k + (Suc(Suc 0) * d)) + f (k + ((Suc(Suc 0) * d) + 1))\<rbrakk>
\<Longrightarrow> setsum f {0..<k} < suminf f"
+unfolding One_nat_def
apply (subst suminf_split_initial_segment [where k="k"])
apply assumption
apply simp
@@ -537,7 +538,7 @@
apply (safe, subgoal_tac "\<forall>n. N < n --> f (n) = 0")
prefer 2
apply clarify
- apply(erule_tac x = "n - 1" in allE)
+ apply(erule_tac x = "n - Suc 0" in allE)
apply (simp add:diff_Suc split:nat.splits)
apply (blast intro: norm_ratiotest_lemma)
apply (rule_tac x = "Suc N" in exI, clarify)
--- a/src/HOL/Tools/Qelim/langford.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Tools/Qelim/langford.ML Fri Feb 27 18:50:35 2009 +0100
@@ -113,11 +113,6 @@
val eqI = instantiate' [] [SOME ll, SOME rr] @{thm iffI}
in implies_elim (implies_elim eqI thl) thr |> mk_meta_eq end;
-fun partition f [] = ([],[])
- | partition f (x::xs) =
- let val (yes,no) = partition f xs
- in if f x then (x::yes,no) else (yes, x::no) end;
-
fun contains x ct = member (op aconv) (OldTerm.term_frees (term_of ct)) (term_of x);
fun is_eqx x eq = case term_of eq of
@@ -132,11 +127,11 @@
val e = Thm.dest_fun ct
val (x,p) = Thm.dest_abs (SOME xn) (Thm.dest_arg ct)
val Pp = Thm.capply @{cterm "Trueprop"} p
- val (eqs,neqs) = partition (is_eqx x) (all_conjuncts p)
+ val (eqs,neqs) = List.partition (is_eqx x) (all_conjuncts p)
in case eqs of
[] =>
let
- val (dx,ndx) = partition (contains x) neqs
+ val (dx,ndx) = List.partition (contains x) neqs
in case ndx of [] => NONE
| _ =>
conj_aci_rule (Thm.mk_binop @{cterm "op == :: prop => _"} Pp
--- a/src/HOL/Tools/inductive_package.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Tools/inductive_package.ML Fri Feb 27 18:50:35 2009 +0100
@@ -738,7 +738,7 @@
val _ = message (quiet_mode andalso not verbose)
("Proofs for " ^ coind_prefix coind ^ "inductive predicate(s) " ^ commas_quote names);
- val cnames = map (Sign.full_name (ProofContext.theory_of ctxt) o #1) cnames_syn; (* FIXME *)
+ val cnames = map (LocalTheory.full_name ctxt o #1) cnames_syn; (* FIXME *)
val ((intr_names, intr_atts), intr_ts) =
apfst split_list (split_list (map (check_rule ctxt cs params) intros));
--- a/src/HOL/Tools/inductive_set_package.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Tools/inductive_set_package.ML Fri Feb 27 18:50:35 2009 +0100
@@ -503,7 +503,7 @@
if Binding.is_empty alt_name then
Binding.name (space_implode "_" (map (Binding.base_name o fst) cnames_syn))
else alt_name;
- val cnames = map (Sign.full_name (ProofContext.theory_of ctxt3) o #1) cnames_syn; (* FIXME *)
+ val cnames = map (LocalTheory.full_name ctxt3 o #1) cnames_syn; (* FIXME *)
val (intr_names, intr_atts) = split_list (map fst intros);
val raw_induct' = to_set [] (Context.Proof ctxt3) raw_induct;
val (intrs', elims', induct, ctxt4) =
--- a/src/HOL/Transcendental.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Transcendental.thy Fri Feb 27 18:50:35 2009 +0100
@@ -120,7 +120,7 @@
case (Suc n)
have "(\<Sum> i = 0 ..< 2 * Suc n. if even i then f i else g i) =
(\<Sum> i = 0 ..< n. f (2 * i)) + (\<Sum> i = 0 ..< n. g (2 * i + 1)) + (f (2 * n) + g (2 * n + 1))"
- using Suc.hyps by auto
+ using Suc.hyps unfolding One_nat_def by auto
also have "\<dots> = (\<Sum> i = 0 ..< Suc n. f (2 * i)) + (\<Sum> i = 0 ..< Suc n. g (2 * i + 1))" by auto
finally show ?case .
qed auto
@@ -187,16 +187,18 @@
((\<forall>n. l \<le> (\<Sum>i=0..<2*n + 1. -1^i*a i)) \<and> (\<lambda> n. \<Sum>i=0..<2*n + 1. -1^i*a i) ----> l)"
(is "\<exists>l. ((\<forall>n. ?f n \<le> l) \<and> _) \<and> ((\<forall>n. l \<le> ?g n) \<and> _)")
proof -
- have fg_diff: "\<And>n. ?f n - ?g n = - a (2 * n)" by auto
+ have fg_diff: "\<And>n. ?f n - ?g n = - a (2 * n)" unfolding One_nat_def by auto
have "\<forall> n. ?f n \<le> ?f (Suc n)"
proof fix n show "?f n \<le> ?f (Suc n)" using mono[of "2*n"] by auto qed
moreover
have "\<forall> n. ?g (Suc n) \<le> ?g n"
- proof fix n show "?g (Suc n) \<le> ?g n" using mono[of "Suc (2*n)"] by auto qed
+ proof fix n show "?g (Suc n) \<le> ?g n" using mono[of "Suc (2*n)"]
+ unfolding One_nat_def by auto qed
moreover
have "\<forall> n. ?f n \<le> ?g n"
- proof fix n show "?f n \<le> ?g n" using fg_diff a_pos by auto qed
+ proof fix n show "?f n \<le> ?g n" using fg_diff a_pos
+ unfolding One_nat_def by auto qed
moreover
have "(\<lambda> n. ?f n - ?g n) ----> 0" unfolding fg_diff
proof (rule LIMSEQ_I)
@@ -904,7 +906,7 @@
proof -
have "(\<Sum>n = 0..<1. f n * 0 ^ n) = (\<Sum>n. f n * 0 ^ n)"
by (rule sums_unique [OF series_zero], simp add: power_0_left)
- thus ?thesis by simp
+ thus ?thesis unfolding One_nat_def by simp
qed
lemma exp_zero [simp]: "exp 0 = 1"
@@ -1234,10 +1236,11 @@
show "x - 1 \<in> {- 1<..<1}" and "(0 :: real) < 1" using `0 < x` `x < 2` by auto
{ fix x :: real assume "x \<in> {- 1<..<1}" hence "norm (-x) < 1" by auto
show "summable (\<lambda>n. -1 ^ n * (1 / real (n + 1)) * real (Suc n) * x ^ n)"
+ unfolding One_nat_def
by (auto simp del: power_mult_distrib simp add: power_mult_distrib[symmetric] summable_geometric[OF `norm (-x) < 1`])
}
qed
- hence "DERIV (\<lambda>x. suminf (?f x)) (x - 1) :> suminf (?f' x)" by auto
+ hence "DERIV (\<lambda>x. suminf (?f x)) (x - 1) :> suminf (?f' x)" unfolding One_nat_def by auto
hence "DERIV (\<lambda>x. suminf (?f (x - 1))) x :> suminf (?f' x)" unfolding DERIV_iff repos .
ultimately have "DERIV (\<lambda>x. ln x - suminf (?f (x - 1))) x :> (suminf (?f' x) - suminf (?f' x))"
by (rule DERIV_diff)
@@ -1514,6 +1517,7 @@
lemma DERIV_fun_pow: "DERIV g x :> m ==>
DERIV (%x. (g x) ^ n) x :> real n * (g x) ^ (n - 1) * m"
+unfolding One_nat_def
apply (rule lemma_DERIV_subst)
apply (rule_tac f = "(%x. x ^ n)" in DERIV_chain2)
apply (rule DERIV_pow, auto)
@@ -1635,7 +1639,7 @@
sums sin x"
unfolding sin_def
by (rule sin_converges [THEN sums_summable, THEN sums_group], simp)
- thus ?thesis by (simp add: mult_ac)
+ thus ?thesis unfolding One_nat_def by (simp add: mult_ac)
qed
lemma sin_gt_zero: "[|0 < x; x < 2 |] ==> 0 < sin x"
@@ -1647,6 +1651,7 @@
apply (rule sin_paired [THEN sums_summable, THEN sums_group], simp)
apply (rotate_tac 2)
apply (drule sin_paired [THEN sums_unique, THEN ssubst])
+unfolding One_nat_def
apply (auto simp del: fact_Suc realpow_Suc)
apply (frule sums_unique)
apply (auto simp del: fact_Suc realpow_Suc)
@@ -1720,6 +1725,7 @@
apply (simp (no_asm) add: mult_assoc del: setsum_op_ivl_Suc)
apply (rule sumr_pos_lt_pair)
apply (erule sums_summable, safe)
+unfolding One_nat_def
apply (simp (no_asm) add: divide_inverse real_0_less_add_iff mult_assoc [symmetric]
del: fact_Suc)
apply (rule real_mult_inverse_cancel2)
@@ -2792,7 +2798,7 @@
lemma monoseq_arctan_series: fixes x :: real
assumes "\<bar>x\<bar> \<le> 1" shows "monoseq (\<lambda> n. 1 / real (n*2+1) * x^(n*2+1))" (is "monoseq ?a")
-proof (cases "x = 0") case True thus ?thesis unfolding monoseq_def by auto
+proof (cases "x = 0") case True thus ?thesis unfolding monoseq_def One_nat_def by auto
next
case False
have "norm x \<le> 1" and "x \<le> 1" and "-1 \<le> x" using assms by auto
@@ -2823,7 +2829,7 @@
lemma zeroseq_arctan_series: fixes x :: real
assumes "\<bar>x\<bar> \<le> 1" shows "(\<lambda> n. 1 / real (n*2+1) * x^(n*2+1)) ----> 0" (is "?a ----> 0")
-proof (cases "x = 0") case True thus ?thesis by (auto simp add: LIMSEQ_const)
+proof (cases "x = 0") case True thus ?thesis unfolding One_nat_def by (auto simp add: LIMSEQ_const)
next
case False
have "norm x \<le> 1" and "x \<le> 1" and "-1 \<le> x" using assms by auto
@@ -2831,12 +2837,14 @@
proof (cases "\<bar>x\<bar> < 1")
case True hence "norm x < 1" by auto
from LIMSEQ_mult[OF LIMSEQ_inverse_real_of_nat LIMSEQ_power_zero[OF `norm x < 1`, THEN LIMSEQ_Suc]]
- show ?thesis unfolding inverse_eq_divide Suc_plus1 using LIMSEQ_linear[OF _ pos2] by auto
+ have "(\<lambda>n. 1 / real (n + 1) * x ^ (n + 1)) ----> 0"
+ unfolding inverse_eq_divide Suc_plus1 by simp
+ then show ?thesis using pos2 by (rule LIMSEQ_linear)
next
case False hence "x = -1 \<or> x = 1" using `\<bar>x\<bar> \<le> 1` by auto
- hence n_eq: "\<And> n. x ^ (n * 2 + 1) = x" by auto
+ hence n_eq: "\<And> n. x ^ (n * 2 + 1) = x" unfolding One_nat_def by auto
from LIMSEQ_mult[OF LIMSEQ_inverse_real_of_nat[THEN LIMSEQ_linear, OF pos2, unfolded inverse_eq_divide] LIMSEQ_const[of x]]
- show ?thesis unfolding n_eq by auto
+ show ?thesis unfolding n_eq Suc_plus1 by auto
qed
qed
@@ -2989,7 +2997,7 @@
from `even n` obtain m where "2 * m = n" unfolding even_mult_two_ex by auto
from bounds[of m, unfolded this atLeastAtMost_iff]
have "\<bar>arctan x - (\<Sum>i = 0..<n. (?c x i))\<bar> \<le> (\<Sum>i = 0..<n + 1. (?c x i)) - (\<Sum>i = 0..<n. (?c x i))" by auto
- also have "\<dots> = ?c x n" by auto
+ also have "\<dots> = ?c x n" unfolding One_nat_def by auto
also have "\<dots> = ?a x n" unfolding sgn_pos a_pos by auto
finally show ?thesis .
next
@@ -2998,7 +3006,7 @@
hence m_plus: "2 * (m + 1) = n + 1" by auto
from bounds[of "m + 1", unfolded this atLeastAtMost_iff, THEN conjunct1] bounds[of m, unfolded m_def atLeastAtMost_iff, THEN conjunct2]
have "\<bar>arctan x - (\<Sum>i = 0..<n. (?c x i))\<bar> \<le> (\<Sum>i = 0..<n. (?c x i)) - (\<Sum>i = 0..<n+1. (?c x i))" by auto
- also have "\<dots> = - ?c x n" by auto
+ also have "\<dots> = - ?c x n" unfolding One_nat_def by auto
also have "\<dots> = ?a x n" unfolding sgn_neg a_pos by auto
finally show ?thesis .
qed
@@ -3011,7 +3019,9 @@
ultimately have "0 \<le> ?a 1 n - ?diff 1 n" by (rule LIM_less_bound)
hence "?diff 1 n \<le> ?a 1 n" by auto
}
- have "?a 1 ----> 0" unfolding LIMSEQ_rabs_zero power_one divide_inverse by (auto intro!: LIMSEQ_mult LIMSEQ_linear LIMSEQ_inverse_real_of_nat)
+ have "?a 1 ----> 0"
+ unfolding LIMSEQ_rabs_zero power_one divide_inverse One_nat_def
+ by (auto intro!: LIMSEQ_mult LIMSEQ_linear LIMSEQ_inverse_real_of_nat)
have "?diff 1 ----> 0"
proof (rule LIMSEQ_I)
fix r :: real assume "0 < r"
@@ -3031,7 +3041,7 @@
have "- (pi / 2) < 0" using pi_gt_zero by auto
have "- (2 * pi) < 0" using pi_gt_zero by auto
- have c_minus_minus: "\<And> i. ?c (- 1) i = - ?c 1 i" by auto
+ have c_minus_minus: "\<And> i. ?c (- 1) i = - ?c 1 i" unfolding One_nat_def by auto
have "arctan (- 1) = arctan (tan (-(pi / 4)))" unfolding tan_45 tan_minus ..
also have "\<dots> = - (pi / 4)" by (rule arctan_tan, auto simp add: order_less_trans[OF `- (pi / 2) < 0` pi_gt_zero])
@@ -3179,4 +3189,4 @@
apply (erule polar_ex2)
done
-end
+end
--- a/src/HOL/Transitive_Closure.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/Transitive_Closure.thy Fri Feb 27 18:50:35 2009 +0100
@@ -646,7 +646,7 @@
val trancl_rtrancl_trancl = @{thm trancl_rtrancl_trancl};
val rtrancl_trans = @{thm rtrancl_trans};
- fun decomp (Trueprop $ t) =
+ fun decomp (@{const Trueprop} $ t) =
let fun dec (Const ("op :", _) $ (Const ("Pair", _) $ a $ b) $ rel ) =
let fun decr (Const ("Transitive_Closure.rtrancl", _ ) $ r) = (r,"r*")
| decr (Const ("Transitive_Closure.trancl", _ ) $ r) = (r,"r+")
@@ -654,7 +654,8 @@
val (rel,r) = decr (Envir.beta_eta_contract rel);
in SOME (a,b,rel,r) end
| dec _ = NONE
- in dec t end;
+ in dec t end
+ | decomp _ = NONE;
end);
@@ -669,7 +670,7 @@
val trancl_rtrancl_trancl = @{thm tranclp_rtranclp_tranclp};
val rtrancl_trans = @{thm rtranclp_trans};
- fun decomp (Trueprop $ t) =
+ fun decomp (@{const Trueprop} $ t) =
let fun dec (rel $ a $ b) =
let fun decr (Const ("Transitive_Closure.rtranclp", _ ) $ r) = (r,"r*")
| decr (Const ("Transitive_Closure.tranclp", _ ) $ r) = (r,"r+")
@@ -677,7 +678,8 @@
val (rel,r) = decr rel;
in SOME (a, b, rel, r) end
| dec _ = NONE
- in dec t end;
+ in dec t end
+ | decomp _ = NONE;
end);
*}
--- a/src/HOL/ex/ApproximationEx.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOL/ex/ApproximationEx.thy Fri Feb 27 18:50:35 2009 +0100
@@ -1,6 +1,7 @@
-(* Title: HOL/ex/ApproximationEx.thy
- Author: Johannes Hoelzl <hoelzl@in.tum.de> 2009
+(* Title: HOL/ex/ApproximationEx.thy
+ Author: Johannes Hoelzl <hoelzl@in.tum.de> 2009
*)
+
theory ApproximationEx
imports "~~/src/HOL/Reflection/Approximation"
begin
--- a/src/HOLCF/Fixrec.thy Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOLCF/Fixrec.thy Fri Feb 27 18:50:35 2009 +0100
@@ -583,6 +583,20 @@
use "Tools/fixrec_package.ML"
+setup {* FixrecPackage.setup *}
+
+setup {*
+ FixrecPackage.add_matchers
+ [ (@{const_name up}, @{const_name match_up}),
+ (@{const_name sinl}, @{const_name match_sinl}),
+ (@{const_name sinr}, @{const_name match_sinr}),
+ (@{const_name spair}, @{const_name match_spair}),
+ (@{const_name cpair}, @{const_name match_cpair}),
+ (@{const_name ONE}, @{const_name match_ONE}),
+ (@{const_name TT}, @{const_name match_TT}),
+ (@{const_name FF}, @{const_name match_FF}) ]
+*}
+
hide (open) const return bind fail run cases
end
--- a/src/HOLCF/Tools/domain/domain_axioms.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOLCF/Tools/domain/domain_axioms.ML Fri Feb 27 18:50:35 2009 +0100
@@ -39,7 +39,7 @@
fun one_con (con,args) =
foldr /\# (list_ccomb (%%:con, mapn (idxs (length args)) 1 args)) args;
in ("copy_def", %%:(dname^"_copy") ==
- /\"f" (list_ccomb (%%:(dname^"_when"), map one_con cons))) end;
+ /\ "f" (list_ccomb (%%:(dname^"_when"), map one_con cons))) end;
(* -- definitions concerning the constructors, discriminators and selectors - *)
@@ -107,7 +107,7 @@
[when_def, copy_def] @
con_defs @ dis_defs @ mat_defs @ pat_defs @ sel_defs @
[take_def, finite_def])
-end; (* let *)
+end; (* let (calc_axioms) *)
fun infer_props thy = map (apsnd (FixrecPackage.legacy_infer_prop thy));
@@ -117,6 +117,14 @@
fun add_defs_i x = snd o (PureThy.add_defs false) (map (Thm.no_attributes o apfst Binding.name) x);
fun add_defs_infer defs thy = add_defs_i (infer_props thy defs) thy;
+fun add_matchers (((dname,_),cons) : eq) thy =
+ let
+ val con_names = map fst cons;
+ val mat_names = map mat_name con_names;
+ fun qualify n = Sign.full_name thy (Binding.name n);
+ val ms = map qualify con_names ~~ map qualify mat_names;
+ in FixrecPackage.add_matchers ms thy end;
+
in (* local *)
fun add_axioms (comp_dnam, eqs : eq list) thy' = let
@@ -125,7 +133,7 @@
val x_name = idx_name dnames "x";
fun copy_app dname = %%:(dname^"_copy")`Bound 0;
val copy_def = ("copy_def" , %%:(comp_dname^"_copy") ==
- /\"f"(mk_ctuple (map copy_app dnames)));
+ /\ "f"(mk_ctuple (map copy_app dnames)));
val bisim_def = ("bisim_def",%%:(comp_dname^"_bisim")==mk_lam("R",
let
fun one_con (con,args) = let
@@ -164,7 +172,8 @@
in thy |> Sign.add_path comp_dnam
|> add_defs_infer (bisim_def::(if length eqs>1 then [copy_def] else []))
|> Sign.parent_path
-end;
+ |> fold add_matchers eqs
+end; (* let (add_axioms) *)
end; (* local *)
end; (* struct *)
--- a/src/HOLCF/Tools/fixrec_package.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/HOLCF/Tools/fixrec_package.ML Fri Feb 27 18:50:35 2009 +0100
@@ -8,17 +8,20 @@
sig
val legacy_infer_term: theory -> term -> term
val legacy_infer_prop: theory -> term -> term
+
val add_fixrec: bool -> (Attrib.binding * string) list list -> theory -> theory
val add_fixrec_i: bool -> ((binding * attribute list) * term) list list -> theory -> theory
val add_fixpat: Attrib.binding * string list -> theory -> theory
val add_fixpat_i: (binding * attribute list) * term list -> theory -> theory
+ val add_matchers: (string * string) list -> theory -> theory
+ val setup: theory -> theory
end;
structure FixrecPackage: FIXREC_PACKAGE =
struct
(* legacy type inference *)
-
+(* used by the domain package *)
fun legacy_infer_term thy t =
singleton (Syntax.check_terms (ProofContext.init thy)) (Sign.intern_term thy t);
@@ -33,15 +36,41 @@
fun fixrec_eq_err thy s eq =
fixrec_err (s ^ "\nin\n" ^ quote (Syntax.string_of_term_global thy eq));
+(*************************************************************************)
+(***************************** building types ****************************)
+(*************************************************************************)
+
(* ->> is taken from holcf_logic.ML *)
-(* TODO: fix dependencies so we can import HOLCFLogic here *)
-infixr 6 ->>;
-fun S ->> T = Type (@{type_name "->"},[S,T]);
+fun cfunT (T, U) = Type(@{type_name "->"}, [T, U]);
+
+infixr 6 ->>; val (op ->>) = cfunT;
+
+fun dest_cfunT (Type(@{type_name "->"}, [T, U])) = (T, U)
+ | dest_cfunT T = raise TYPE ("dest_cfunT", [T], []);
+
+fun binder_cfun (Type(@{type_name "->"},[T, U])) = T :: binder_cfun U
+ | binder_cfun _ = [];
+
+fun body_cfun (Type(@{type_name "->"},[T, U])) = body_cfun U
+ | body_cfun T = T;
-(* extern_name is taken from domain/library.ML *)
-fun extern_name con = case Symbol.explode con of
- ("o"::"p"::" "::rest) => implode rest
- | _ => con;
+fun strip_cfun T : typ list * typ =
+ (binder_cfun T, body_cfun T);
+
+fun maybeT T = Type(@{type_name "maybe"}, [T]);
+
+fun dest_maybeT (Type(@{type_name "maybe"}, [T])) = T
+ | dest_maybeT T = raise TYPE ("dest_maybeT", [T], []);
+
+fun tupleT [] = @{typ "unit"}
+ | tupleT [T] = T
+ | tupleT (T :: Ts) = HOLogic.mk_prodT (T, tupleT Ts);
+
+fun matchT T = body_cfun T ->> maybeT (tupleT (binder_cfun T));
+
+(*************************************************************************)
+(***************************** building terms ****************************)
+(*************************************************************************)
val mk_trp = HOLogic.mk_Trueprop;
@@ -52,30 +81,86 @@
fun chead_of (Const(@{const_name Rep_CFun},_)$f$t) = chead_of f
| chead_of u = u;
-(* these are helpful functions copied from HOLCF/domain/library.ML *)
-fun %: s = Free(s,dummyT);
-fun %%: s = Const(s,dummyT);
-infix 0 ==; fun S == T = %%:"==" $ S $ T;
-infix 1 ===; fun S === T = %%:"op =" $ S $ T;
-infix 9 ` ; fun f ` x = %%:@{const_name Rep_CFun} $ f $ x;
+fun capply_const (S, T) =
+ Const(@{const_name Rep_CFun}, (S ->> T) --> (S --> T));
+
+fun cabs_const (S, T) =
+ Const(@{const_name Abs_CFun}, (S --> T) --> (S ->> T));
+
+fun mk_capply (t, u) =
+ let val (S, T) =
+ case Term.fastype_of t of
+ Type(@{type_name "->"}, [S, T]) => (S, T)
+ | _ => raise TERM ("mk_capply " ^ ML_Syntax.print_list ML_Syntax.print_term [t, u], [t, u]);
+ in capply_const (S, T) $ t $ u end;
+
+infix 0 ==; val (op ==) = Logic.mk_equals;
+infix 1 ===; val (op ===) = HOLogic.mk_eq;
+infix 9 ` ; val (op `) = mk_capply;
+
+
+fun mk_cpair (t, u) =
+ let val T = Term.fastype_of t
+ val U = Term.fastype_of u
+ val cpairT = T ->> U ->> HOLogic.mk_prodT (T, U)
+ in Const(@{const_name cpair}, cpairT) ` t ` u end;
+
+fun mk_cfst t =
+ let val T = Term.fastype_of t;
+ val (U, _) = HOLogic.dest_prodT T;
+ in Const(@{const_name cfst}, T ->> U) ` t end;
+
+fun mk_csnd t =
+ let val T = Term.fastype_of t;
+ val (_, U) = HOLogic.dest_prodT T;
+ in Const(@{const_name csnd}, T ->> U) ` t end;
+
+fun mk_csplit t =
+ let val (S, TU) = dest_cfunT (Term.fastype_of t);
+ val (T, U) = dest_cfunT TU;
+ val csplitT = (S ->> T ->> U) ->> HOLogic.mk_prodT (S, T) ->> U;
+ in Const(@{const_name csplit}, csplitT) ` t end;
(* builds the expression (LAM v. rhs) *)
-fun big_lambda v rhs = %%:@{const_name Abs_CFun}$(Term.lambda v rhs);
+fun big_lambda v rhs =
+ cabs_const (Term.fastype_of v, Term.fastype_of rhs) $ Term.lambda v rhs;
(* builds the expression (LAM v1 v2 .. vn. rhs) *)
fun big_lambdas [] rhs = rhs
| big_lambdas (v::vs) rhs = big_lambda v (big_lambdas vs rhs);
(* builds the expression (LAM <v1,v2,..,vn>. rhs) *)
-fun lambda_ctuple [] rhs = big_lambda (%:"unit") rhs
+fun lambda_ctuple [] rhs = big_lambda (Free("unit", HOLogic.unitT)) rhs
| lambda_ctuple (v::[]) rhs = big_lambda v rhs
| lambda_ctuple (v::vs) rhs =
- %%:@{const_name csplit}`(big_lambda v (lambda_ctuple vs rhs));
+ mk_csplit (big_lambda v (lambda_ctuple vs rhs));
(* builds the expression <v1,v2,..,vn> *)
-fun mk_ctuple [] = %%:"UU"
+fun mk_ctuple [] = @{term "UU::unit"}
| mk_ctuple (t::[]) = t
-| mk_ctuple (t::ts) = %%:@{const_name cpair}`t`(mk_ctuple ts);
+| mk_ctuple (t::ts) = mk_cpair (t, mk_ctuple ts);
+
+fun mk_return t =
+ let val T = Term.fastype_of t
+ in Const(@{const_name Fixrec.return}, T ->> maybeT T) ` t end;
+
+fun mk_bind (t, u) =
+ let val (T, mU) = dest_cfunT (Term.fastype_of u);
+ val bindT = maybeT T ->> (T ->> mU) ->> mU;
+ in Const(@{const_name Fixrec.bind}, bindT) ` t ` u end;
+
+fun mk_mplus (t, u) =
+ let val mT = Term.fastype_of t
+ in Const(@{const_name Fixrec.mplus}, mT ->> mT ->> mT) ` t ` u end;
+
+fun mk_run t =
+ let val mT = Term.fastype_of t
+ val T = dest_maybeT mT
+ in Const(@{const_name Fixrec.run}, mT ->> T) ` t end;
+
+fun mk_fix t =
+ let val (T, _) = dest_cfunT (Term.fastype_of t)
+ in Const(@{const_name fix}, (T ->> T) ->> T) ` t end;
(*************************************************************************)
(************* fixed-point definitions and unfolding theorems ************)
@@ -84,22 +169,21 @@
fun add_fixdefs eqs thy =
let
val (lhss,rhss) = ListPair.unzip (map dest_eqs eqs);
- val fixpoint = %%:@{const_name fix}`lambda_ctuple lhss (mk_ctuple rhss);
+ val fixpoint = mk_fix (lambda_ctuple lhss (mk_ctuple rhss));
fun one_def (l as Const(n,T)) r =
let val b = Sign.base_name n in (b, (b^"_def", l == r)) end
| one_def _ _ = fixrec_err "fixdefs: lhs not of correct form";
fun defs [] _ = []
| defs (l::[]) r = [one_def l r]
- | defs (l::ls) r = one_def l (%%:@{const_name cfst}`r) :: defs ls (%%:@{const_name csnd}`r);
- val (names, pre_fixdefs) = ListPair.unzip (defs lhss fixpoint);
+ | defs (l::ls) r = one_def l (mk_cfst r) :: defs ls (mk_csnd r);
+ val (names, fixdefs) = ListPair.unzip (defs lhss fixpoint);
- val fixdefs = map (apsnd (legacy_infer_prop thy)) pre_fixdefs;
val (fixdef_thms, thy') =
PureThy.add_defs false (map (Thm.no_attributes o apfst Binding.name) fixdefs) thy;
val ctuple_fixdef_thm = foldr1 (fn (x,y) => @{thm cpair_equalI} OF [x,y]) fixdef_thms;
- val ctuple_unfold = legacy_infer_term thy' (mk_trp (mk_ctuple lhss === mk_ctuple rhss));
+ val ctuple_unfold = mk_trp (mk_ctuple lhss === mk_ctuple rhss);
val ctuple_unfold_thm = Goal.prove_global thy' [] [] ctuple_unfold
(fn _ => EVERY [rtac (ctuple_fixdef_thm RS fix_eq2 RS trans) 1,
simp_tac (simpset_of thy') 1]);
@@ -123,6 +207,17 @@
(*********** monadic notation and pattern matching compilation ***********)
(*************************************************************************)
+structure FixrecMatchData = TheoryDataFun (
+ type T = string Symtab.table;
+ val empty = Symtab.empty;
+ val copy = I;
+ val extend = I;
+ fun merge _ tabs : T = Symtab.merge (K true) tabs;
+);
+
+(* associate match functions with pattern constants *)
+fun add_matchers ms = FixrecMatchData.map (fold Symtab.update ms);
+
fun add_names (Const(a,_), bs) = insert (op =) (Sign.base_name a) bs
| add_names (Free(a,_) , bs) = insert (op =) a bs
| add_names (f $ u , bs) = add_names (f, add_names(u, bs))
@@ -132,56 +227,63 @@
fun add_terms ts xs = foldr add_names xs ts;
(* builds a monadic term for matching a constructor pattern *)
-fun pre_build pat rhs vs taken =
+fun pre_build match_name pat rhs vs taken =
case pat of
Const(@{const_name Rep_CFun},_)$f$(v as Free(n,T)) =>
- pre_build f rhs (v::vs) taken
+ pre_build match_name f rhs (v::vs) taken
| Const(@{const_name Rep_CFun},_)$f$x =>
- let val (rhs', v, taken') = pre_build x rhs [] taken;
- in pre_build f rhs' (v::vs) taken' end
+ let val (rhs', v, taken') = pre_build match_name x rhs [] taken;
+ in pre_build match_name f rhs' (v::vs) taken' end
| Const(c,T) =>
let
val n = Name.variant taken "v";
fun result_type (Type(@{type_name "->"},[_,T])) (x::xs) = result_type T xs
| result_type T _ = T;
val v = Free(n, result_type T vs);
- val m = "match_"^(extern_name(Sign.base_name c));
+ val m = Const(match_name c, matchT T);
val k = lambda_ctuple vs rhs;
in
- (%%:@{const_name Fixrec.bind}`(%%:m`v)`k, v, n::taken)
+ (mk_bind (m`v, k), v, n::taken)
end
| Free(n,_) => fixrec_err ("expected constructor, found free variable " ^ quote n)
| _ => fixrec_err "pre_build: invalid pattern";
(* builds a monadic term for matching a function definition pattern *)
(* returns (name, arity, matcher) *)
-fun building pat rhs vs taken =
+fun building match_name pat rhs vs taken =
case pat of
Const(@{const_name Rep_CFun}, _)$f$(v as Free(n,T)) =>
- building f rhs (v::vs) taken
+ building match_name f rhs (v::vs) taken
| Const(@{const_name Rep_CFun}, _)$f$x =>
- let val (rhs', v, taken') = pre_build x rhs [] taken;
- in building f rhs' (v::vs) taken' end
- | Const(name,_) => (name, length vs, big_lambdas vs rhs)
+ let val (rhs', v, taken') = pre_build match_name x rhs [] taken;
+ in building match_name f rhs' (v::vs) taken' end
+ | Const(name,_) => (pat, length vs, big_lambdas vs rhs)
| _ => fixrec_err "function is not declared as constant in theory";
-fun match_eq eq =
+fun match_eq match_name eq =
let val (lhs,rhs) = dest_eqs eq;
- in building lhs (%%:@{const_name Fixrec.return}`rhs) [] (add_terms [eq] []) end;
+ in
+ building match_name lhs (mk_return rhs) []
+ (add_terms [eq] [])
+ end;
(* returns the sum (using +++) of the terms in ms *)
(* also applies "run" to the result! *)
fun fatbar arity ms =
let
+ fun LAM_Ts 0 t = ([], Term.fastype_of t)
+ | LAM_Ts n (_ $ Abs(_,T,t)) =
+ let val (Ts, U) = LAM_Ts (n-1) t in (T::Ts, U) end
+ | LAM_Ts _ _ = fixrec_err "fatbar: internal error, not enough LAMs";
fun unLAM 0 t = t
| unLAM n (_$Abs(_,_,t)) = unLAM (n-1) t
| unLAM _ _ = fixrec_err "fatbar: internal error, not enough LAMs";
- fun reLAM 0 t = t
- | reLAM n t = reLAM (n-1) (%%:@{const_name Abs_CFun} $ Abs("",dummyT,t));
- fun mplus (x,y) = %%:@{const_name Fixrec.mplus}`x`y;
- val msum = foldr1 mplus (map (unLAM arity) ms);
+ fun reLAM ([], U) t = t
+ | reLAM (T::Ts, U) t = reLAM (Ts, T ->> U) (cabs_const(T,U)$Abs("",T,t));
+ val msum = foldr1 mk_mplus (map (unLAM arity) ms);
+ val (Ts, U) = LAM_Ts arity (hd ms)
in
- reLAM arity (%%:@{const_name Fixrec.run}`msum)
+ reLAM (rev Ts, dest_maybeT U) (mk_run msum)
end;
fun unzip3 [] = ([],[],[])
@@ -190,16 +292,16 @@
in (x::xs, y::ys, z::zs) end;
(* this is the pattern-matching compiler function *)
-fun compile_pats eqs =
+fun compile_pats match_name eqs =
let
- val ((n::names),(a::arities),mats) = unzip3 (map match_eq eqs);
+ val ((n::names),(a::arities),mats) = unzip3 (map (match_eq match_name) eqs);
val cname = if forall (fn x => n=x) names then n
else fixrec_err "all equations in block must define the same function";
val arity = if forall (fn x => a=x) arities then a
else fixrec_err "all equations in block must have the same arity";
val rhs = fatbar arity mats;
in
- mk_trp (%%:cname === rhs)
+ mk_trp (cname === rhs)
end;
(*************************************************************************)
@@ -235,8 +337,14 @@
fun unconcat [] _ = []
| unconcat (n::ns) xs = List.take (xs,n) :: unconcat ns (List.drop (xs,n));
+ val matcher_tab = FixrecMatchData.get thy;
+ fun match_name c =
+ case Symtab.lookup matcher_tab c of SOME m => m
+ | NONE => fixrec_err ("unknown pattern constructor: " ^ c);
+
val pattern_blocks = unconcat lengths (map Logic.strip_imp_concl eqn_ts');
- val compiled_ts = map (legacy_infer_term thy o compile_pats) pattern_blocks;
+ val compiled_ts =
+ map (compile_pats match_name) pattern_blocks;
val (thy', cnames, fixdef_thms, unfold_thms) = add_fixdefs compiled_ts thy;
in
if strict then let (* only prove simp rules if strict = true *)
@@ -312,4 +420,6 @@
end; (* local structure *)
+val setup = FixrecMatchData.init;
+
end;
--- a/src/Pure/General/swing.scala Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/General/swing.scala Fri Feb 27 18:50:35 2009 +0100
@@ -10,9 +10,11 @@
object Swing
{
- def now(body: => Unit) {
- if (SwingUtilities.isEventDispatchThread) body
- else SwingUtilities.invokeAndWait(new Runnable { def run = body })
+ def now[A](body: => A): A = {
+ var result: Option[A] = None
+ if (SwingUtilities.isEventDispatchThread) { result = Some(body) }
+ else SwingUtilities.invokeAndWait(new Runnable { def run = { result = Some(body) } })
+ result.get
}
def later(body: => Unit) {
--- a/src/Pure/IsaMakefile Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/IsaMakefile Fri Feb 27 18:50:35 2009 +0100
@@ -19,9 +19,30 @@
## Pure
+BOOTSTRAP_FILES = ML-Systems/alice.ML ML-Systems/exn.ML \
+ ML-Systems/install_pp_polyml.ML ML-Systems/ml_name_space.ML \
+ ML-Systems/mosml.ML ML-Systems/multithreading.ML \
+ ML-Systems/multithreading_polyml.ML ML-Systems/overloading_smlnj.ML \
+ ML-Systems/polyml-4.1.3.ML ML-Systems/polyml-4.1.4.ML \
+ ML-Systems/polyml-4.2.0.ML ML-Systems/polyml-5.0.ML \
+ ML-Systems/polyml-5.1.ML ML-Systems/polyml-experimental.ML \
+ ML-Systems/polyml.ML ML-Systems/polyml_common.ML \
+ ML-Systems/polyml_old_compiler4.ML \
+ ML-Systems/polyml_old_compiler5.ML ML-Systems/proper_int.ML \
+ ML-Systems/smlnj.ML ML-Systems/system_shell.ML \
+ ML-Systems/thread_dummy.ML ML-Systems/time_limit.ML \
+ ML-Systems/universal.ML
+
+RAW: $(OUT)/RAW
+
+$(OUT)/RAW: $(BOOTSTRAP_FILES)
+ @./mk -r
+
+
Pure: $(OUT)/Pure
-$(OUT)/Pure: Concurrent/ROOT.ML Concurrent/future.ML \
+$(OUT)/Pure: $(BOOTSTRAP_FILES) ../Tools/auto_solve.ML \
+ ../Tools/quickcheck.ML Concurrent/ROOT.ML Concurrent/future.ML \
Concurrent/mailbox.ML Concurrent/par_list.ML \
Concurrent/par_list_dummy.ML Concurrent/simple_thread.ML \
Concurrent/synchronized.ML Concurrent/task_queue.ML General/ROOT.ML \
@@ -38,33 +59,21 @@
Isar/attrib.ML Isar/auto_bind.ML Isar/calculation.ML Isar/class.ML \
Isar/class_target.ML Isar/code.ML Isar/code_unit.ML \
Isar/constdefs.ML Isar/context_rules.ML Isar/element.ML \
- Isar/expression.ML Isar/find_theorems.ML Isar/find_consts.ML \
- Isar/isar.ML Isar/isar_document.ML Isar/isar_cmd.ML Isar/isar_syn.ML \
- Isar/local_defs.ML Isar/local_syntax.ML Isar/local_theory.ML \
- Isar/locale.ML Isar/method.ML Isar/net_rules.ML \
- Isar/object_logic.ML Isar/obtain.ML Isar/outer_keyword.ML \
- Isar/outer_lex.ML Isar/outer_parse.ML Isar/outer_syntax.ML \
- Isar/overloading.ML Isar/proof.ML Isar/proof_context.ML \
- Isar/proof_display.ML Isar/proof_node.ML Isar/rule_cases.ML \
- Isar/rule_insts.ML Isar/session.ML Isar/skip_proof.ML \
- Isar/spec_parse.ML Isar/specification.ML Isar/theory_target.ML \
- Isar/toplevel.ML Isar/value_parse.ML ML-Systems/alice.ML \
- ML-Systems/exn.ML ML-Systems/install_pp_polyml.ML \
- ML-Systems/ml_name_space.ML ML-Systems/multithreading.ML \
- ML-Systems/mosml.ML ML-Systems/multithreading_polyml.ML \
- ML-Systems/overloading_smlnj.ML ML-Systems/polyml-4.1.3.ML \
- ML-Systems/polyml-4.1.4.ML ML-Systems/polyml-4.2.0.ML \
- ML-Systems/polyml-5.0.ML ML-Systems/polyml-5.1.ML \
- ML-Systems/polyml_common.ML ML-Systems/polyml.ML \
- ML-Systems/polyml_old_compiler4.ML \
- ML-Systems/polyml_old_compiler5.ML ML-Systems/proper_int.ML \
- ML-Systems/smlnj.ML ML-Systems/system_shell.ML \
- ML-Systems/time_limit.ML ML-Systems/thread_dummy.ML \
- ML-Systems/universal.ML ML/ml_context.ML ML/ml_antiquote.ML \
- ML/ml_lex.ML ML/ml_parse.ML ML/ml_syntax.ML ML/ml_thms.ML \
- Proof/extraction.ML Proof/proof_rewrite_rules.ML \
- Proof/proof_syntax.ML Proof/proofchecker.ML Proof/reconstruct.ML \
- ProofGeneral/ROOT.ML ProofGeneral/pgip.ML ProofGeneral/pgip_input.ML \
+ Isar/expression.ML Isar/isar.ML Isar/isar_cmd.ML \
+ Isar/isar_document.ML Isar/isar_syn.ML Isar/local_defs.ML \
+ Isar/local_syntax.ML Isar/local_theory.ML Isar/locale.ML \
+ Isar/method.ML Isar/net_rules.ML Isar/object_logic.ML Isar/obtain.ML \
+ Isar/outer_keyword.ML Isar/outer_lex.ML Isar/outer_parse.ML \
+ Isar/outer_syntax.ML Isar/overloading.ML Isar/proof.ML \
+ Isar/proof_context.ML Isar/proof_display.ML Isar/proof_node.ML \
+ Isar/rule_cases.ML Isar/rule_insts.ML Isar/session.ML \
+ Isar/skip_proof.ML Isar/spec_parse.ML Isar/specification.ML \
+ Isar/theory_target.ML Isar/toplevel.ML Isar/value_parse.ML \
+ ML/ml_antiquote.ML ML/ml_context.ML ML/ml_lex.ML ML/ml_parse.ML \
+ ML/ml_syntax.ML ML/ml_thms.ML Proof/extraction.ML \
+ Proof/proof_rewrite_rules.ML Proof/proof_syntax.ML \
+ Proof/proofchecker.ML Proof/reconstruct.ML ProofGeneral/ROOT.ML \
+ ProofGeneral/pgip.ML ProofGeneral/pgip_input.ML \
ProofGeneral/pgip_isabelle.ML ProofGeneral/pgip_markup.ML \
ProofGeneral/pgip_output.ML ProofGeneral/pgip_parser.ML \
ProofGeneral/pgip_tests.ML ProofGeneral/pgip_types.ML \
@@ -75,21 +84,21 @@
Syntax/syn_trans.ML Syntax/syntax.ML Syntax/type_ext.ML Thy/html.ML \
Thy/latex.ML Thy/present.ML Thy/term_style.ML Thy/thm_deps.ML \
Thy/thy_header.ML Thy/thy_info.ML Thy/thy_load.ML Thy/thy_output.ML \
- Thy/thy_syntax.ML Tools/ROOT.ML \
- Tools/isabelle_process.ML Tools/named_thms.ML Tools/xml_syntax.ML \
- assumption.ML axclass.ML codegen.ML config.ML conjunction.ML \
- consts.ML context.ML context_position.ML conv.ML defs.ML display.ML \
- drule.ML envir.ML facts.ML goal.ML interpretation.ML library.ML \
- logic.ML meta_simplifier.ML more_thm.ML morphism.ML name.ML net.ML \
- old_goals.ML old_term.ML pattern.ML primitive_defs.ML proofterm.ML \
- pure_setup.ML pure_thy.ML search.ML sign.ML simplifier.ML sorts.ML \
- subgoal.ML tactic.ML tctical.ML term.ML term_ord.ML term_subst.ML \
- theory.ML thm.ML type.ML type_infer.ML unify.ML variable.ML \
- ../Tools/quickcheck.ML ../Tools/auto_solve.ML
+ Thy/thy_syntax.ML Tools/ROOT.ML Tools/find_consts.ML \
+ Tools/find_theorems.ML Tools/isabelle_process.ML Tools/named_thms.ML \
+ Tools/xml_syntax.ML assumption.ML axclass.ML codegen.ML config.ML \
+ conjunction.ML consts.ML context.ML context_position.ML conv.ML \
+ defs.ML display.ML drule.ML envir.ML facts.ML goal.ML \
+ interpretation.ML library.ML logic.ML meta_simplifier.ML more_thm.ML \
+ morphism.ML name.ML net.ML old_goals.ML old_term.ML pattern.ML \
+ primitive_defs.ML proofterm.ML pure_setup.ML pure_thy.ML search.ML \
+ sign.ML simplifier.ML sorts.ML subgoal.ML tactic.ML tctical.ML \
+ term.ML term_ord.ML term_subst.ML theory.ML thm.ML type.ML \
+ type_infer.ML unify.ML variable.ML
@./mk
-## special targets
+## Proof General keywords
Pure-ProofGeneral: Pure $(LOG)/Pure-ProofGeneral.gz
@@ -97,28 +106,11 @@
@$(ISABELLE_TOOL) usedir -f proof_general_keywords.ML $(OUT)/Pure ProofGeneral
-RAW: $(OUT)/RAW
-
-$(OUT)/RAW: ML-Systems/alice.ML ML-Systems/exn.ML \
- ML-Systems/ml_name_space.ML ML-Systems/multithreading.ML \
- ML-Systems/mosml.ML ML-Systems/multithreading_polyml.ML \
- ML-Systems/overloading_smlnj.ML ML-Systems/polyml-4.1.3.ML \
- ML-Systems/polyml-4.1.4.ML ML-Systems/polyml-4.2.0.ML \
- ML-Systems/polyml-5.0.ML ML-Systems/polyml-5.1.ML \
- ML-Systems/polyml_common.ML ML-Systems/polyml.ML \
- ML-Systems/polyml_old_compiler4.ML \
- ML-Systems/polyml_old_compiler5.ML ML-Systems/proper_int.ML \
- ML-Systems/smlnj.ML ML-Systems/system_shell.ML \
- ML-Systems/time_limit.ML ML-Systems/thread_dummy.ML \
- ML-Systems/universal.ML
- @./mk -r
-
-
## clean
clean:
- @rm -f $(OUT)/Pure $(LOG)/Pure.gz $(LOG)/Pure-ProofGeneral.gz \
- $(OUT)/RAW $(LOG)/RAW.gz
+ @rm -f $(OUT)/Pure $(LOG)/Pure.gz $(OUT)/RAW $(LOG)/RAW.gz \
+ $(LOG)/Pure-ProofGeneral.gz
## Scala material
--- a/src/Pure/Isar/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Isar/ROOT.ML Fri Feb 27 18:50:35 2009 +0100
@@ -89,7 +89,5 @@
(*theory and proof operations*)
use "rule_insts.ML";
use "../Thy/thm_deps.ML";
-use "find_theorems.ML";
-use "find_consts.ML";
use "isar_cmd.ML";
use "isar_syn.ML";
--- a/src/Pure/Isar/find_consts.ML Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-(* Title: find_consts.ML
- Author: Timothy Bourke and Gerwin Klein, NICTA
-
- Hoogle-like (http://www-users.cs.york.ac.uk/~ndm/hoogle) searching by type
- over constants, but matching is not fuzzy
-*)
-
-signature FIND_CONSTS =
-sig
- datatype criterion = Strict of string
- | Loose of string
- | Name of string
-
- val default_criteria : (bool * criterion) list ref
-
- val find_consts : Proof.context -> (bool * criterion) list -> unit
-end;
-
-structure FindConsts : FIND_CONSTS =
-struct
-
-datatype criterion = Strict of string
- | Loose of string
- | Name of string;
-
-val default_criteria = ref [(false, Name ".sko_")];
-
-fun add_tye (_, (_, t)) n = size_of_typ t + n;
-
-fun matches_subtype thy typat = let
- val p = can (fn ty => Sign.typ_match thy (typat, ty) Vartab.empty);
-
- fun fs [] = false
- | fs (t::ts) = f t orelse fs ts
-
- and f (t as Type (_, ars)) = p t orelse fs ars
- | f t = p t;
- in f end;
-
-fun check_const p (nm, (ty, _)) = if p (nm, ty)
- then SOME (size_of_typ ty)
- else NONE;
-
-fun opt_not f (c as (_, (ty, _))) = if is_some (f c)
- then NONE else SOME (size_of_typ ty);
-
-fun filter_const (_, NONE) = NONE
- | filter_const (f, (SOME (c, r))) = Option.map
- (pair c o ((curry Int.min) r)) (f c);
-
-fun pretty_criterion (b, c) =
- let
- fun prfx s = if b then s else "-" ^ s;
- in
- (case c of
- Strict pat => Pretty.str (prfx "strict: " ^ quote pat)
- | Loose pat => Pretty.str (prfx (quote pat))
- | Name name => Pretty.str (prfx "name: " ^ quote name))
- end;
-
-fun pretty_const ctxt (nm, ty) = let
- val ty' = Logic.unvarifyT ty;
- in
- Pretty.block [Pretty.quote (Pretty.str nm), Pretty.fbrk,
- Pretty.str "::", Pretty.brk 1,
- Pretty.quote (Syntax.pretty_typ ctxt ty')]
- end;
-
-fun find_consts ctxt raw_criteria = let
- val start = start_timing ();
-
- val thy = ProofContext.theory_of ctxt;
- val low_ranking = 10000;
-
- fun make_pattern crit = ProofContext.read_term_pattern ctxt ("_::" ^ crit)
- |> type_of;
-
- fun make_match (Strict arg) =
- let val qty = make_pattern arg; in
- fn (_, (ty, _)) => let
- val tye = Sign.typ_match thy (qty, ty) Vartab.empty;
- val sub_size = Vartab.fold add_tye tye 0;
- in SOME sub_size end handle MATCH => NONE
- end
-
- | make_match (Loose arg) =
- check_const (matches_subtype thy (make_pattern arg) o snd)
-
- | make_match (Name arg) = check_const (match_string arg o fst);
-
- fun make_criterion (b, crit) = (if b then I else opt_not) (make_match crit);
- val criteria = map make_criterion ((!default_criteria) @ raw_criteria);
-
- val (_, consts) = (#constants o Consts.dest o Sign.consts_of) thy;
- fun eval_entry c = foldl filter_const (SOME (c, low_ranking)) criteria;
-
- val matches = Symtab.fold (cons o eval_entry) consts []
- |> map_filter I
- |> sort (rev_order o int_ord o pairself snd)
- |> map ((apsnd fst) o fst);
-
- val end_msg = " in " ^
- (List.nth (String.tokens Char.isSpace (end_timing start), 3))
- ^ " secs"
- in
- Pretty.big_list "searched for:" (map pretty_criterion raw_criteria)
- :: Pretty.str ""
- :: (Pretty.str o concat)
- (if null matches
- then ["nothing found", end_msg]
- else ["found ", (string_of_int o length) matches,
- " constants", end_msg, ":"])
- :: Pretty.str ""
- :: map (pretty_const ctxt) matches
- |> Pretty.chunks
- |> Pretty.writeln
- end handle ERROR s => Output.error_msg s
-
-end;
-
--- a/src/Pure/Isar/find_theorems.ML Thu Feb 26 10:13:43 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,378 +0,0 @@
-(* Title: Pure/Isar/find_theorems.ML
- Author: Rafal Kolanski and Gerwin Klein, NICTA
-
-Retrieve theorems from proof context.
-*)
-
-signature FIND_THEOREMS =
-sig
- val limit: int ref
- val tac_limit: int ref
-
- datatype 'term criterion =
- Name of string | Intro | Elim | Dest | Solves | Simp of 'term |
- Pattern of 'term
-
- val find_theorems: Proof.context -> thm option -> bool ->
- (bool * string criterion) list -> (Facts.ref * thm) list
-
- val print_theorems: Proof.context -> thm option -> int option -> bool ->
- (bool * string criterion) list -> unit
-end;
-
-structure FindTheorems: FIND_THEOREMS =
-struct
-
-(** search criteria **)
-
-datatype 'term criterion =
- Name of string | Intro | Elim | Dest | Solves | Simp of 'term |
- Pattern of 'term;
-
-fun read_criterion _ (Name name) = Name name
- | read_criterion _ Intro = Intro
- | read_criterion _ Elim = Elim
- | read_criterion _ Dest = Dest
- | read_criterion _ Solves = Solves
- | read_criterion ctxt (Simp str) = Simp (ProofContext.read_term_pattern ctxt str)
- | read_criterion ctxt (Pattern str) = Pattern (ProofContext.read_term_pattern ctxt str);
-
-fun pretty_criterion ctxt (b, c) =
- let
- fun prfx s = if b then s else "-" ^ s;
- in
- (case c of
- Name name => Pretty.str (prfx "name: " ^ quote name)
- | Intro => Pretty.str (prfx "intro")
- | Elim => Pretty.str (prfx "elim")
- | Dest => Pretty.str (prfx "dest")
- | Solves => Pretty.str (prfx "solves")
- | Simp pat => Pretty.block [Pretty.str (prfx "simp:"), Pretty.brk 1,
- Pretty.quote (Syntax.pretty_term ctxt (Term.show_dummy_patterns pat))]
- | Pattern pat => Pretty.enclose (prfx " \"") "\""
- [Syntax.pretty_term ctxt (Term.show_dummy_patterns pat)])
- end;
-
-(** search criterion filters **)
-
-(*generated filters are to be of the form
- input: (Facts.ref * thm)
- output: (p:int, s:int) option, where
- NONE indicates no match
- p is the primary sorting criterion
- (eg. number of assumptions in the theorem)
- s is the secondary sorting criterion
- (eg. size of the substitution for intro, elim and dest)
- when applying a set of filters to a thm, fold results in:
- (biggest p, sum of all s)
- currently p and s only matter for intro, elim, dest and simp filters,
- otherwise the default ordering is used.
-*)
-
-
-(* matching theorems *)
-
-fun is_nontrivial thy = Term.is_Const o Term.head_of o ObjectLogic.drop_judgment thy;
-
-(*extract terms from term_src, refine them to the parts that concern us,
- if po try match them against obj else vice versa.
- trivial matches are ignored.
- returns: smallest substitution size*)
-fun is_matching_thm (extract_terms, refine_term) ctxt po obj term_src =
- let
- val thy = ProofContext.theory_of ctxt;
-
- fun matches pat =
- is_nontrivial thy pat andalso
- Pattern.matches thy (if po then (pat, obj) else (obj, pat));
-
- fun substsize pat =
- let val (_, subst) =
- Pattern.match thy (if po then (pat, obj) else (obj, pat)) (Vartab.empty, Vartab.empty)
- in Vartab.fold (fn (_, (_, t)) => fn n => size_of_term t + n) subst 0 end;
-
- fun bestmatch [] = NONE
- | bestmatch xs = SOME (foldr1 Int.min xs);
-
- val match_thm = matches o refine_term;
- in
- map (substsize o refine_term) (filter match_thm (extract_terms term_src))
- |> bestmatch
- end;
-
-
-(* filter_name *)
-
-fun filter_name str_pat (thmref, _) =
- if match_string str_pat (Facts.name_of_ref thmref)
- then SOME (0, 0) else NONE;
-
-(* filter intro/elim/dest/solves rules *)
-
-fun filter_dest ctxt goal (_, thm) =
- let
- val extract_dest =
- (fn thm => if Thm.no_prems thm then [] else [Thm.full_prop_of thm],
- hd o Logic.strip_imp_prems);
- val prems = Logic.prems_of_goal goal 1;
-
- fun try_subst prem = is_matching_thm extract_dest ctxt true prem thm;
- val successful = prems |> map_filter try_subst;
- in
- (*if possible, keep best substitution (one with smallest size)*)
- (*dest rules always have assumptions, so a dest with one
- assumption is as good as an intro rule with none*)
- if not (null successful)
- then SOME (Thm.nprems_of thm - 1, foldr1 Int.min successful) else NONE
- end;
-
-fun filter_intro ctxt goal (_, thm) =
- let
- val extract_intro = (single o Thm.full_prop_of, Logic.strip_imp_concl);
- val concl = Logic.concl_of_goal goal 1;
- val ss = is_matching_thm extract_intro ctxt true concl thm;
- in
- if is_some ss then SOME (Thm.nprems_of thm, the ss) else NONE
- end;
-
-fun filter_elim ctxt goal (_, thm) =
- if not (Thm.no_prems thm) then
- let
- val rule = Thm.full_prop_of thm;
- val prems = Logic.prems_of_goal goal 1;
- val goal_concl = Logic.concl_of_goal goal 1;
- val rule_mp = hd (Logic.strip_imp_prems rule);
- val rule_concl = Logic.strip_imp_concl rule;
- fun combine t1 t2 = Const ("*combine*", dummyT --> dummyT) $ (t1 $ t2);
- val rule_tree = combine rule_mp rule_concl;
- fun goal_tree prem = combine prem goal_concl;
- fun try_subst prem =
- is_matching_thm (single, I) ctxt true (goal_tree prem) rule_tree;
- val successful = prems |> map_filter try_subst;
- in
- (*elim rules always have assumptions, so an elim with one
- assumption is as good as an intro rule with none*)
- if is_nontrivial (ProofContext.theory_of ctxt) (Thm.major_prem_of thm)
- andalso not (null successful)
- then SOME (Thm.nprems_of thm - 1, foldr1 Int.min successful) else NONE
- end
- else NONE
-
-val tac_limit = ref 5;
-
-fun filter_solves ctxt goal = let
- val baregoal = Logic.get_goal (prop_of goal) 1;
-
- fun etacn thm i = Seq.take (!tac_limit) o etac thm i;
- fun try_thm thm = if Thm.no_prems thm then rtac thm 1 goal
- else (etacn thm THEN_ALL_NEW
- (Goal.norm_hhf_tac THEN'
- Method.assumption_tac ctxt)) 1 goal;
- in
- fn (_, thm) => if (is_some o Seq.pull o try_thm) thm
- then SOME (Thm.nprems_of thm, 0) else NONE
- end;
-
-(* filter_simp *)
-
-fun filter_simp ctxt t (_, thm) =
- let
- val (_, {mk_rews = {mk, ...}, ...}) =
- Simplifier.rep_ss (Simplifier.local_simpset_of ctxt);
- val extract_simp =
- (map Thm.full_prop_of o mk, #1 o Logic.dest_equals o Logic.strip_imp_concl);
- val ss = is_matching_thm extract_simp ctxt false t thm
- in
- if is_some ss then SOME (Thm.nprems_of thm, the ss) else NONE
- end;
-
-
-(* filter_pattern *)
-
-fun get_names t = (Term.add_const_names t []) union (Term.add_free_names t []);
-fun get_thm_names (_, thm) = get_names (Thm.full_prop_of thm);
- (* Including all constants and frees is only sound because
- matching uses higher-order patterns. If full matching
- were used, then constants that may be subject to
- beta-reduction after substitution of frees should
- not be included for LHS set because they could be
- thrown away by the substituted function.
- e.g. for (?F 1 2) do not include 1 or 2, if it were
- possible for ?F to be (% x y. 3)
- The largest possible set should always be included on
- the RHS. *)
-
-fun filter_pattern ctxt pat = let
- val pat_consts = get_names pat;
-
- fun check (t, NONE) = check (t, SOME (get_thm_names t))
- | check ((_, thm), c as SOME thm_consts) =
- (if pat_consts subset_string thm_consts
- andalso (Pattern.matches_subterm (ProofContext.theory_of ctxt)
- (pat, Thm.full_prop_of thm))
- then SOME (0, 0) else NONE, c);
- in check end;
-
-(* interpret criteria as filters *)
-
-local
-
-fun err_no_goal c =
- error ("Current goal required for " ^ c ^ " search criterion");
-
-val fix_goal = Thm.prop_of;
-val fix_goalo = Option.map fix_goal;
-
-fun filter_crit _ _ (Name name) = apfst (filter_name name)
- | filter_crit _ NONE Intro = err_no_goal "intro"
- | filter_crit _ NONE Elim = err_no_goal "elim"
- | filter_crit _ NONE Dest = err_no_goal "dest"
- | filter_crit _ NONE Solves = err_no_goal "solves"
- | filter_crit ctxt (SOME goal) Intro = apfst (filter_intro ctxt
- (fix_goal goal))
- | filter_crit ctxt (SOME goal) Elim = apfst (filter_elim ctxt
- (fix_goal goal))
- | filter_crit ctxt (SOME goal) Dest = apfst (filter_dest ctxt
- (fix_goal goal))
- | filter_crit ctxt (SOME goal) Solves = apfst (filter_solves ctxt goal)
- | filter_crit ctxt _ (Simp pat) = apfst (filter_simp ctxt pat)
- | filter_crit ctxt _ (Pattern pat) = filter_pattern ctxt pat;
-
-fun opt_not x = if is_some x then NONE else SOME (0, 0);
-
-fun opt_add (SOME (a, x)) (SOME (b, y)) = SOME (Int.max (a, b), x + y : int)
- | opt_add _ _ = NONE;
-
-fun app_filters thm = let
- fun app (NONE, _, _) = NONE
- | app (SOME v, consts, []) = SOME (v, thm)
- | app (r, consts, f::fs) = let val (r', consts') = f (thm, consts)
- in app (opt_add r r', consts', fs) end;
- in app end;
-
-in
-
-fun filter_criterion ctxt opt_goal (b, c) =
- (if b then I else (apfst opt_not)) o filter_crit ctxt opt_goal c;
-
-fun all_filters filters thms =
- let
- fun eval_filters thm = app_filters thm (SOME (0, 0), NONE, filters);
-
- (*filters return: (number of assumptions, substitution size) option, so
- sort (desc. in both cases) according to number of assumptions first,
- then by the substitution size*)
- fun thm_ord (((p0, s0), _), ((p1, s1), _)) =
- prod_ord int_ord int_ord ((p1, s1), (p0, s0));
- in map_filter eval_filters thms |> sort thm_ord |> map #2 end;
-
-end;
-
-
-(* removing duplicates, preferring nicer names, roughly n log n *)
-
-local
-
-val index_ord = option_ord (K EQUAL);
-val hidden_ord = bool_ord o pairself NameSpace.is_hidden;
-val qual_ord = int_ord o pairself (length o NameSpace.explode);
-val txt_ord = int_ord o pairself size;
-
-fun nicer_name (x, i) (y, j) =
- (case hidden_ord (x, y) of EQUAL =>
- (case index_ord (i, j) of EQUAL =>
- (case qual_ord (x, y) of EQUAL => txt_ord (x, y) | ord => ord)
- | ord => ord)
- | ord => ord) <> GREATER;
-
-fun rem_cdups nicer xs =
- let
- fun rem_c rev_seen [] = rev rev_seen
- | rem_c rev_seen [x] = rem_c (x :: rev_seen) []
- | rem_c rev_seen ((x as ((n, t), _)) :: (y as ((n', t'), _)) :: xs) =
- if Thm.eq_thm_prop (t, t')
- then rem_c rev_seen ((if nicer n n' then x else y) :: xs)
- else rem_c (x :: rev_seen) (y :: xs)
- in rem_c [] xs end;
-
-in
-
-fun nicer_shortest ctxt = let
- val ns = ProofContext.theory_of ctxt
- |> PureThy.facts_of
- |> Facts.space_of;
-
- val len_sort = sort (int_ord o (pairself size));
- fun shorten s = (case len_sort (NameSpace.get_accesses ns s) of
- [] => s
- | s'::_ => s');
-
- fun nicer (Facts.Named ((x, _), i)) (Facts.Named ((y, _), j)) =
- nicer_name (shorten x, i) (shorten y, j)
- | nicer (Facts.Fact _) (Facts.Named _) = true
- | nicer (Facts.Named _) (Facts.Fact _) = false;
- in nicer end;
-
-fun rem_thm_dups nicer xs =
- xs ~~ (1 upto length xs)
- |> sort (TermOrd.fast_term_ord o pairself (Thm.prop_of o #2 o #1))
- |> rem_cdups nicer
- |> sort (int_ord o pairself #2)
- |> map #1;
-
-end;
-
-
-(* print_theorems *)
-
-fun all_facts_of ctxt =
- maps Facts.selections
- (Facts.dest_static [] (PureThy.facts_of (ProofContext.theory_of ctxt)) @
- Facts.dest_static [] (ProofContext.facts_of ctxt));
-
-val limit = ref 40;
-
-fun find_theorems ctxt opt_goal rem_dups raw_criteria =
- let
- val add_prems = Seq.hd o (TRY (Method.insert_tac
- (Assumption.prems_of ctxt) 1));
- val opt_goal' = Option.map add_prems opt_goal;
-
- val criteria = map (apsnd (read_criterion ctxt)) raw_criteria;
- val filters = map (filter_criterion ctxt opt_goal') criteria;
-
- val raw_matches = all_filters filters (all_facts_of ctxt);
-
- val matches =
- if rem_dups
- then rem_thm_dups (nicer_shortest ctxt) raw_matches
- else raw_matches;
- in matches end;
-
-fun print_theorems ctxt opt_goal opt_limit rem_dups raw_criteria = let
- val start = start_timing ();
-
- val criteria = map (apsnd (read_criterion ctxt)) raw_criteria;
- val matches = find_theorems ctxt opt_goal rem_dups raw_criteria;
-
- val len = length matches;
- val lim = the_default (! limit) opt_limit;
- val thms = Library.drop (len - lim, matches);
-
- val end_msg = " in " ^
- (List.nth (String.tokens Char.isSpace (end_timing start), 3))
- ^ " secs"
- in
- Pretty.big_list "searched for:" (map (pretty_criterion ctxt) criteria)
- :: Pretty.str "" ::
- (if null thms then [Pretty.str ("nothing found" ^ end_msg)]
- else
- [Pretty.str ("found " ^ string_of_int len ^ " theorems" ^
- (if len <= lim then ""
- else " (" ^ string_of_int lim ^ " displayed)")
- ^ end_msg ^ ":"), Pretty.str ""] @
- map Display.pretty_fact thms)
- |> Pretty.chunks |> Pretty.writeln
- end
-
-end;
--- a/src/Pure/Isar/isar_cmd.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Isar/isar_cmd.ML Fri Feb 27 18:50:35 2009 +0100
@@ -62,10 +62,6 @@
val class_deps: Toplevel.transition -> Toplevel.transition
val thy_deps: Toplevel.transition -> Toplevel.transition
val thm_deps: (Facts.ref * Attrib.src list) list -> Toplevel.transition -> Toplevel.transition
- val find_theorems: (int option * bool) * (bool * string FindTheorems.criterion) list
- -> Toplevel.transition -> Toplevel.transition
- val find_consts: (bool * FindConsts.criterion) list ->
- Toplevel.transition -> Toplevel.transition
val unused_thms: (string list * string list option) option ->
Toplevel.transition -> Toplevel.transition
val print_binds: Toplevel.transition -> Toplevel.transition
@@ -403,20 +399,9 @@
|> sort (int_ord o pairself #1) |> map #2;
in Present.display_graph gr end);
-
-(* retrieve theorems *)
-
fun thm_deps args = Toplevel.unknown_theory o Toplevel.keep (fn state =>
ThmDeps.thm_deps (Proof.get_thmss (Toplevel.enter_proof_body state) args));
-fun find_theorems ((opt_lim, rem_dups), spec) =
- Toplevel.unknown_theory o Toplevel.keep (fn state =>
- let
- val proof_state = Toplevel.enter_proof_body state;
- val ctxt = Proof.context_of proof_state;
- val opt_goal = try Proof.get_goal proof_state |> Option.map (#2 o #2);
- in FindTheorems.print_theorems ctxt opt_goal opt_lim rem_dups spec end);
-
(* find unused theorems *)
@@ -434,12 +419,6 @@
|> map pretty_thm |> Pretty.chunks |> Pretty.writeln
end);
-(* retrieve constants *)
-
-fun find_consts spec =
- Toplevel.unknown_theory o Toplevel.keep (fn state =>
- let val ctxt = (Proof.context_of o Toplevel.enter_proof_body) state
- in FindConsts.find_consts ctxt spec end);
(* print proof context contents *)
--- a/src/Pure/Isar/isar_syn.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Isar/isar_syn.ML Fri Feb 27 18:50:35 2009 +0100
@@ -37,6 +37,7 @@
(Scan.succeed (Toplevel.exit o Toplevel.end_local_theory));
+
(** markup commands **)
val _ = OuterSyntax.markup_command ThyOutput.Markup "header" "theory header" K.diag
@@ -79,7 +80,7 @@
-(** theory sections **)
+(** theory commands **)
(* classes and sorts *)
@@ -853,47 +854,6 @@
OuterSyntax.improper_command "thm_deps" "visualize theorem dependencies"
K.diag (SpecParse.xthms1 >> (Toplevel.no_timing oo IsarCmd.thm_deps));
-local
-
-val criterion =
- P.reserved "name" |-- P.!!! (P.$$$ ":" |-- P.xname) >> FindTheorems.Name ||
- P.reserved "intro" >> K FindTheorems.Intro ||
- P.reserved "elim" >> K FindTheorems.Elim ||
- P.reserved "dest" >> K FindTheorems.Dest ||
- P.reserved "solves" >> K FindTheorems.Solves ||
- P.reserved "simp" |-- P.!!! (P.$$$ ":" |-- P.term) >> FindTheorems.Simp ||
- P.term >> FindTheorems.Pattern;
-
-val options =
- Scan.optional
- (P.$$$ "(" |--
- P.!!! (Scan.option P.nat -- Scan.optional (P.reserved "with_dups" >> K false) true
- --| P.$$$ ")")) (NONE, true);
-in
-
-val _ =
- OuterSyntax.improper_command "find_theorems" "print theorems meeting specified criteria" K.diag
- (options -- Scan.repeat (((Scan.option P.minus >> is_none) -- criterion))
- >> (Toplevel.no_timing oo IsarCmd.find_theorems));
-
-end;
-
-local
-
-val criterion =
- P.reserved "strict" |-- P.!!! (P.$$$ ":" |-- P.xname) >> FindConsts.Strict ||
- P.reserved "name" |-- P.!!! (P.$$$ ":" |-- P.xname) >> FindConsts.Name ||
- P.xname >> FindConsts.Loose;
-
-in
-
-val _ =
- OuterSyntax.improper_command "find_consts" "search constants by type pattern"
- K.diag (Scan.repeat (((Scan.option P.minus >> is_none) -- criterion))
- >> (Toplevel.no_timing oo IsarCmd.find_consts));
-
-end;
-
val _ =
OuterSyntax.improper_command "print_binds" "print term bindings of proof context" K.diag
(Scan.succeed (Toplevel.no_timing o IsarCmd.print_binds));
@@ -948,6 +908,7 @@
(Toplevel.no_timing oo IsarCmd.unused_thms));
+
(** system commands (for interactive mode only) **)
val _ =
--- a/src/Pure/Proof/proofchecker.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Proof/proofchecker.ML Fri Feb 27 18:50:35 2009 +0100
@@ -56,7 +56,7 @@
| thm_of _ _ (PAxm (name, _, SOME Ts)) =
thm_of_atom (Thm.axiom thy name) Ts
- | thm_of _ Hs (PBound i) = List.nth (Hs, i)
+ | thm_of _ Hs (PBound i) = nth Hs i
| thm_of (vs, names) Hs (Abst (s, SOME T, prf)) =
let
--- a/src/Pure/Proof/reconstruct.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Proof/reconstruct.ML Fri Feb 27 18:50:35 2009 +0100
@@ -98,7 +98,7 @@
let val (env3, V) = mk_tvar (env2, [])
in (t' $ u', V, vTs2, unifyT thy env3 T (U --> V)) end)
end
- | infer_type thy env Ts vTs (t as Bound i) = ((t, List.nth (Ts, i), vTs, env)
+ | infer_type thy env Ts vTs (t as Bound i) = ((t, nth Ts i, vTs, env)
handle Subscript => error ("infer_type: bad variable index " ^ string_of_int i));
fun cantunify thy (t, u) = error ("Non-unifiable terms:\n" ^
@@ -152,7 +152,7 @@
fun head_norm (prop, prf, cnstrts, env, vTs) =
(Envir.head_norm env prop, prf, cnstrts, env, vTs);
- fun mk_cnstrts env _ Hs vTs (PBound i) = ((List.nth (Hs, i), PBound i, [], env, vTs)
+ fun mk_cnstrts env _ Hs vTs (PBound i) = ((nth Hs i, PBound i, [], env, vTs)
handle Subscript => error ("mk_cnstrts: bad variable index " ^ string_of_int i))
| mk_cnstrts env Ts Hs vTs (Abst (s, opT, cprf)) =
let
@@ -304,7 +304,7 @@
val head_norm = Envir.head_norm (Envir.empty 0);
-fun prop_of0 Hs (PBound i) = List.nth (Hs, i)
+fun prop_of0 Hs (PBound i) = nth Hs i
| prop_of0 Hs (Abst (s, SOME T, prf)) =
Term.all T $ (Abs (s, T, prop_of0 Hs prf))
| prop_of0 Hs (AbsP (s, SOME t, prf)) =
--- a/src/Pure/Syntax/syn_trans.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Syntax/syn_trans.ML Fri Feb 27 18:50:35 2009 +0100
@@ -222,7 +222,7 @@
(* implicit structures *)
fun the_struct structs i =
- if 1 <= i andalso i <= length structs then List.nth (structs, i - 1)
+ if 1 <= i andalso i <= length structs then nth structs (i - 1)
else raise error ("Illegal reference to implicit structure #" ^ string_of_int i);
fun struct_tr structs (*"_struct"*) [Const ("_indexdefault", _)] =
--- a/src/Pure/Tools/ROOT.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/Tools/ROOT.ML Fri Feb 27 18:50:35 2009 +0100
@@ -9,6 +9,9 @@
(*basic XML support*)
use "xml_syntax.ML";
+use "find_theorems.ML";
+use "find_consts.ML";
+
(*quickcheck/autosolve needed here because of pg preferences*)
use "../../Tools/quickcheck.ML";
use "../../Tools/auto_solve.ML";
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Tools/find_consts.ML Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,165 @@
+(* Title: Pure/Tools/find_consts.ML
+ Author: Timothy Bourke and Gerwin Klein, NICTA
+
+Hoogle-like (http://www-users.cs.york.ac.uk/~ndm/hoogle) searching by
+type over constants, but matching is not fuzzy.
+*)
+
+signature FIND_CONSTS =
+sig
+ datatype criterion =
+ Strict of string
+ | Loose of string
+ | Name of string
+
+ val default_criteria : (bool * criterion) list ref
+
+ val find_consts : Proof.context -> (bool * criterion) list -> unit
+end;
+
+structure FindConsts : FIND_CONSTS =
+struct
+
+(* search criteria *)
+
+datatype criterion =
+ Strict of string
+ | Loose of string
+ | Name of string;
+
+val default_criteria = ref [(false, Name ".sko_")];
+
+
+(* matching types/consts *)
+
+fun add_tye (_, (_, t)) n = Term.size_of_typ t + n;
+
+fun matches_subtype thy typat =
+ let
+ val p = can (fn ty => Sign.typ_match thy (typat, ty) Vartab.empty);
+
+ fun fs [] = false
+ | fs (t :: ts) = f t orelse fs ts
+
+ and f (t as Type (_, ars)) = p t orelse fs ars
+ | f t = p t;
+ in f end;
+
+fun check_const p (nm, (ty, _)) =
+ if p (nm, ty)
+ then SOME (Term.size_of_typ ty)
+ else NONE;
+
+fun opt_not f (c as (_, (ty, _))) =
+ if is_some (f c)
+ then NONE else SOME (Term.size_of_typ ty);
+
+fun filter_const (_, NONE) = NONE
+ | filter_const (f, (SOME (c, r))) =
+ Option.map (pair c o (curry Int.min r)) (f c);
+
+
+(* pretty results *)
+
+fun pretty_criterion (b, c) =
+ let
+ fun prfx s = if b then s else "-" ^ s;
+ in
+ (case c of
+ Strict pat => Pretty.str (prfx "strict: " ^ quote pat)
+ | Loose pat => Pretty.str (prfx (quote pat))
+ | Name name => Pretty.str (prfx "name: " ^ quote name))
+ end;
+
+fun pretty_const ctxt (nm, ty) =
+ let
+ val ty' = Logic.unvarifyT ty;
+ in
+ Pretty.block
+ [Pretty.quote (Pretty.str nm), Pretty.fbrk,
+ Pretty.str "::", Pretty.brk 1,
+ Pretty.quote (Syntax.pretty_typ ctxt ty')]
+ end;
+
+
+(* find_consts *)
+
+fun find_consts ctxt raw_criteria =
+ let
+ val start = start_timing ();
+
+ val thy = ProofContext.theory_of ctxt;
+ val low_ranking = 10000;
+
+ fun make_pattern crit = ProofContext.read_term_pattern ctxt ("_::" ^ crit) |> Term.type_of;
+
+ fun make_match (Strict arg) =
+ let val qty = make_pattern arg; in
+ fn (_, (ty, _)) =>
+ let
+ val tye = Sign.typ_match thy (qty, ty) Vartab.empty;
+ val sub_size = Vartab.fold add_tye tye 0;
+ in SOME sub_size end handle MATCH => NONE
+ end
+
+ | make_match (Loose arg) =
+ check_const (matches_subtype thy (make_pattern arg) o snd)
+
+ | make_match (Name arg) = check_const (match_string arg o fst);
+
+ fun make_criterion (b, crit) = (if b then I else opt_not) (make_match crit);
+ val criteria = map make_criterion (! default_criteria @ raw_criteria);
+
+ val (_, consts) = (#constants o Consts.dest o Sign.consts_of) thy;
+ fun eval_entry c = foldl filter_const (SOME (c, low_ranking)) criteria;
+
+ val matches =
+ Symtab.fold (cons o eval_entry) consts []
+ |> map_filter I
+ |> sort (rev_order o int_ord o pairself snd)
+ |> map ((apsnd fst) o fst);
+
+ val end_msg = " in " ^
+ (List.nth (String.tokens Char.isSpace (end_timing start), 3))
+ ^ " secs"
+ in
+ Pretty.big_list "searched for:" (map pretty_criterion raw_criteria)
+ :: Pretty.str ""
+ :: (Pretty.str o concat)
+ (if null matches
+ then ["nothing found", end_msg]
+ else ["found ", (string_of_int o length) matches,
+ " constants", end_msg, ":"])
+ :: Pretty.str ""
+ :: map (pretty_const ctxt) matches
+ |> Pretty.chunks
+ |> Pretty.writeln
+ end handle ERROR s => Output.error_msg s;
+
+
+(* command syntax *)
+
+fun find_consts_cmd spec =
+ Toplevel.unknown_theory o Toplevel.keep (fn state =>
+ find_consts (Proof.context_of (Toplevel.enter_proof_body state)) spec);
+
+local
+
+structure P = OuterParse and K = OuterKeyword;
+
+val criterion =
+ P.reserved "strict" |-- P.!!! (P.$$$ ":" |-- P.xname) >> Strict ||
+ P.reserved "name" |-- P.!!! (P.$$$ ":" |-- P.xname) >> Name ||
+ P.xname >> Loose;
+
+in
+
+val _ =
+ OuterSyntax.improper_command "find_consts" "search constants by type pattern" K.diag
+ (Scan.repeat (((Scan.option P.minus >> is_none) -- criterion))
+ >> (Toplevel.no_timing oo find_consts_cmd));
+
+end;
+
+end;
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/Pure/Tools/find_theorems.ML Fri Feb 27 18:50:35 2009 +0100
@@ -0,0 +1,428 @@
+(* Title: Pure/Tools/find_theorems.ML
+ Author: Rafal Kolanski and Gerwin Klein, NICTA
+
+Retrieve theorems from proof context.
+*)
+
+signature FIND_THEOREMS =
+sig
+ val limit: int ref
+ val tac_limit: int ref
+
+ datatype 'term criterion =
+ Name of string | Intro | Elim | Dest | Solves | Simp of 'term |
+ Pattern of 'term
+
+ val find_theorems: Proof.context -> thm option -> bool ->
+ (bool * string criterion) list -> (Facts.ref * thm) list
+
+ val print_theorems: Proof.context -> thm option -> int option -> bool ->
+ (bool * string criterion) list -> unit
+end;
+
+structure FindTheorems: FIND_THEOREMS =
+struct
+
+(** search criteria **)
+
+datatype 'term criterion =
+ Name of string | Intro | Elim | Dest | Solves | Simp of 'term |
+ Pattern of 'term;
+
+fun read_criterion _ (Name name) = Name name
+ | read_criterion _ Intro = Intro
+ | read_criterion _ Elim = Elim
+ | read_criterion _ Dest = Dest
+ | read_criterion _ Solves = Solves
+ | read_criterion ctxt (Simp str) = Simp (ProofContext.read_term_pattern ctxt str)
+ | read_criterion ctxt (Pattern str) = Pattern (ProofContext.read_term_pattern ctxt str);
+
+fun pretty_criterion ctxt (b, c) =
+ let
+ fun prfx s = if b then s else "-" ^ s;
+ in
+ (case c of
+ Name name => Pretty.str (prfx "name: " ^ quote name)
+ | Intro => Pretty.str (prfx "intro")
+ | Elim => Pretty.str (prfx "elim")
+ | Dest => Pretty.str (prfx "dest")
+ | Solves => Pretty.str (prfx "solves")
+ | Simp pat => Pretty.block [Pretty.str (prfx "simp:"), Pretty.brk 1,
+ Pretty.quote (Syntax.pretty_term ctxt (Term.show_dummy_patterns pat))]
+ | Pattern pat => Pretty.enclose (prfx " \"") "\""
+ [Syntax.pretty_term ctxt (Term.show_dummy_patterns pat)])
+ end;
+
+
+
+(** search criterion filters **)
+
+(*generated filters are to be of the form
+ input: (Facts.ref * thm)
+ output: (p:int, s:int) option, where
+ NONE indicates no match
+ p is the primary sorting criterion
+ (eg. number of assumptions in the theorem)
+ s is the secondary sorting criterion
+ (eg. size of the substitution for intro, elim and dest)
+ when applying a set of filters to a thm, fold results in:
+ (biggest p, sum of all s)
+ currently p and s only matter for intro, elim, dest and simp filters,
+ otherwise the default ordering is used.
+*)
+
+
+(* matching theorems *)
+
+fun is_nontrivial thy = Term.is_Const o Term.head_of o ObjectLogic.drop_judgment thy;
+
+(*extract terms from term_src, refine them to the parts that concern us,
+ if po try match them against obj else vice versa.
+ trivial matches are ignored.
+ returns: smallest substitution size*)
+fun is_matching_thm (extract_terms, refine_term) ctxt po obj term_src =
+ let
+ val thy = ProofContext.theory_of ctxt;
+
+ fun matches pat =
+ is_nontrivial thy pat andalso
+ Pattern.matches thy (if po then (pat, obj) else (obj, pat));
+
+ fun substsize pat =
+ let val (_, subst) =
+ Pattern.match thy (if po then (pat, obj) else (obj, pat)) (Vartab.empty, Vartab.empty)
+ in Vartab.fold (fn (_, (_, t)) => fn n => size_of_term t + n) subst 0 end;
+
+ fun bestmatch [] = NONE
+ | bestmatch xs = SOME (foldr1 Int.min xs);
+
+ val match_thm = matches o refine_term;
+ in
+ map (substsize o refine_term) (filter match_thm (extract_terms term_src))
+ |> bestmatch
+ end;
+
+
+(* filter_name *)
+
+fun filter_name str_pat (thmref, _) =
+ if match_string str_pat (Facts.name_of_ref thmref)
+ then SOME (0, 0) else NONE;
+
+
+(* filter intro/elim/dest/solves rules *)
+
+fun filter_dest ctxt goal (_, thm) =
+ let
+ val extract_dest =
+ (fn thm => if Thm.no_prems thm then [] else [Thm.full_prop_of thm],
+ hd o Logic.strip_imp_prems);
+ val prems = Logic.prems_of_goal goal 1;
+
+ fun try_subst prem = is_matching_thm extract_dest ctxt true prem thm;
+ val successful = prems |> map_filter try_subst;
+ in
+ (*if possible, keep best substitution (one with smallest size)*)
+ (*dest rules always have assumptions, so a dest with one
+ assumption is as good as an intro rule with none*)
+ if not (null successful)
+ then SOME (Thm.nprems_of thm - 1, foldr1 Int.min successful) else NONE
+ end;
+
+fun filter_intro ctxt goal (_, thm) =
+ let
+ val extract_intro = (single o Thm.full_prop_of, Logic.strip_imp_concl);
+ val concl = Logic.concl_of_goal goal 1;
+ val ss = is_matching_thm extract_intro ctxt true concl thm;
+ in
+ if is_some ss then SOME (Thm.nprems_of thm, the ss) else NONE
+ end;
+
+fun filter_elim ctxt goal (_, thm) =
+ if not (Thm.no_prems thm) then
+ let
+ val rule = Thm.full_prop_of thm;
+ val prems = Logic.prems_of_goal goal 1;
+ val goal_concl = Logic.concl_of_goal goal 1;
+ val rule_mp = hd (Logic.strip_imp_prems rule);
+ val rule_concl = Logic.strip_imp_concl rule;
+ fun combine t1 t2 = Const ("*combine*", dummyT --> dummyT) $ (t1 $ t2);
+ val rule_tree = combine rule_mp rule_concl;
+ fun goal_tree prem = combine prem goal_concl;
+ fun try_subst prem =
+ is_matching_thm (single, I) ctxt true (goal_tree prem) rule_tree;
+ val successful = prems |> map_filter try_subst;
+ in
+ (*elim rules always have assumptions, so an elim with one
+ assumption is as good as an intro rule with none*)
+ if is_nontrivial (ProofContext.theory_of ctxt) (Thm.major_prem_of thm)
+ andalso not (null successful)
+ then SOME (Thm.nprems_of thm - 1, foldr1 Int.min successful) else NONE
+ end
+ else NONE
+
+val tac_limit = ref 5;
+
+fun filter_solves ctxt goal =
+ let
+ val baregoal = Logic.get_goal (Thm.prop_of goal) 1;
+
+ fun etacn thm i = Seq.take (! tac_limit) o etac thm i;
+ fun try_thm thm =
+ if Thm.no_prems thm then rtac thm 1 goal
+ else (etacn thm THEN_ALL_NEW
+ (Goal.norm_hhf_tac THEN'
+ Method.assumption_tac ctxt)) 1 goal;
+ in
+ fn (_, thm) =>
+ if (is_some o Seq.pull o try_thm) thm
+ then SOME (Thm.nprems_of thm, 0) else NONE
+ end;
+
+
+(* filter_simp *)
+
+fun filter_simp ctxt t (_, thm) =
+ let
+ val (_, {mk_rews = {mk, ...}, ...}) =
+ Simplifier.rep_ss (Simplifier.local_simpset_of ctxt);
+ val extract_simp =
+ (map Thm.full_prop_of o mk, #1 o Logic.dest_equals o Logic.strip_imp_concl);
+ val ss = is_matching_thm extract_simp ctxt false t thm
+ in
+ if is_some ss then SOME (Thm.nprems_of thm, the ss) else NONE
+ end;
+
+
+(* filter_pattern *)
+
+fun get_names t = (Term.add_const_names t []) union (Term.add_free_names t []);
+fun get_thm_names (_, thm) = get_names (Thm.full_prop_of thm);
+
+(*Including all constants and frees is only sound because
+ matching uses higher-order patterns. If full matching
+ were used, then constants that may be subject to
+ beta-reduction after substitution of frees should
+ not be included for LHS set because they could be
+ thrown away by the substituted function.
+ e.g. for (?F 1 2) do not include 1 or 2, if it were
+ possible for ?F to be (% x y. 3)
+ The largest possible set should always be included on
+ the RHS.*)
+
+fun filter_pattern ctxt pat =
+ let
+ val pat_consts = get_names pat;
+
+ fun check (t, NONE) = check (t, SOME (get_thm_names t))
+ | check ((_, thm), c as SOME thm_consts) =
+ (if pat_consts subset_string thm_consts
+ andalso (Pattern.matches_subterm (ProofContext.theory_of ctxt)
+ (pat, Thm.full_prop_of thm))
+ then SOME (0, 0) else NONE, c);
+ in check end;
+
+
+(* interpret criteria as filters *)
+
+local
+
+fun err_no_goal c =
+ error ("Current goal required for " ^ c ^ " search criterion");
+
+val fix_goal = Thm.prop_of;
+val fix_goalo = Option.map fix_goal;
+
+fun filter_crit _ _ (Name name) = apfst (filter_name name)
+ | filter_crit _ NONE Intro = err_no_goal "intro"
+ | filter_crit _ NONE Elim = err_no_goal "elim"
+ | filter_crit _ NONE Dest = err_no_goal "dest"
+ | filter_crit _ NONE Solves = err_no_goal "solves"
+ | filter_crit ctxt (SOME goal) Intro = apfst (filter_intro ctxt (fix_goal goal))
+ | filter_crit ctxt (SOME goal) Elim = apfst (filter_elim ctxt (fix_goal goal))
+ | filter_crit ctxt (SOME goal) Dest = apfst (filter_dest ctxt (fix_goal goal))
+ | filter_crit ctxt (SOME goal) Solves = apfst (filter_solves ctxt goal)
+ | filter_crit ctxt _ (Simp pat) = apfst (filter_simp ctxt pat)
+ | filter_crit ctxt _ (Pattern pat) = filter_pattern ctxt pat;
+
+fun opt_not x = if is_some x then NONE else SOME (0, 0);
+
+fun opt_add (SOME (a, x)) (SOME (b, y)) = SOME (Int.max (a, b), x + y : int)
+ | opt_add _ _ = NONE;
+
+fun app_filters thm =
+ let
+ fun app (NONE, _, _) = NONE
+ | app (SOME v, consts, []) = SOME (v, thm)
+ | app (r, consts, f :: fs) =
+ let val (r', consts') = f (thm, consts)
+ in app (opt_add r r', consts', fs) end;
+ in app end;
+
+in
+
+fun filter_criterion ctxt opt_goal (b, c) =
+ (if b then I else (apfst opt_not)) o filter_crit ctxt opt_goal c;
+
+fun all_filters filters thms =
+ let
+ fun eval_filters thm = app_filters thm (SOME (0, 0), NONE, filters);
+
+ (*filters return: (number of assumptions, substitution size) option, so
+ sort (desc. in both cases) according to number of assumptions first,
+ then by the substitution size*)
+ fun thm_ord (((p0, s0), _), ((p1, s1), _)) =
+ prod_ord int_ord int_ord ((p1, s1), (p0, s0));
+ in map_filter eval_filters thms |> sort thm_ord |> map #2 end;
+
+end;
+
+
+(* removing duplicates, preferring nicer names, roughly n log n *)
+
+local
+
+val index_ord = option_ord (K EQUAL);
+val hidden_ord = bool_ord o pairself NameSpace.is_hidden;
+val qual_ord = int_ord o pairself (length o NameSpace.explode);
+val txt_ord = int_ord o pairself size;
+
+fun nicer_name (x, i) (y, j) =
+ (case hidden_ord (x, y) of EQUAL =>
+ (case index_ord (i, j) of EQUAL =>
+ (case qual_ord (x, y) of EQUAL => txt_ord (x, y) | ord => ord)
+ | ord => ord)
+ | ord => ord) <> GREATER;
+
+fun rem_cdups nicer xs =
+ let
+ fun rem_c rev_seen [] = rev rev_seen
+ | rem_c rev_seen [x] = rem_c (x :: rev_seen) []
+ | rem_c rev_seen ((x as ((n, t), _)) :: (y as ((n', t'), _)) :: xs) =
+ if Thm.eq_thm_prop (t, t')
+ then rem_c rev_seen ((if nicer n n' then x else y) :: xs)
+ else rem_c (x :: rev_seen) (y :: xs)
+ in rem_c [] xs end;
+
+in
+
+fun nicer_shortest ctxt =
+ let
+ val ns = ProofContext.theory_of ctxt
+ |> PureThy.facts_of
+ |> Facts.space_of;
+
+ val len_sort = sort (int_ord o (pairself size));
+ fun shorten s = (case len_sort (NameSpace.get_accesses ns s) of
+ [] => s
+ | s'::_ => s');
+
+ fun nicer (Facts.Named ((x, _), i)) (Facts.Named ((y, _), j)) =
+ nicer_name (shorten x, i) (shorten y, j)
+ | nicer (Facts.Fact _) (Facts.Named _) = true
+ | nicer (Facts.Named _) (Facts.Fact _) = false;
+ in nicer end;
+
+fun rem_thm_dups nicer xs =
+ xs ~~ (1 upto length xs)
+ |> sort (TermOrd.fast_term_ord o pairself (Thm.prop_of o #2 o #1))
+ |> rem_cdups nicer
+ |> sort (int_ord o pairself #2)
+ |> map #1;
+
+end;
+
+
+(* print_theorems *)
+
+fun all_facts_of ctxt =
+ maps Facts.selections
+ (Facts.dest_static [] (PureThy.facts_of (ProofContext.theory_of ctxt)) @
+ Facts.dest_static [] (ProofContext.facts_of ctxt));
+
+val limit = ref 40;
+
+fun find_theorems ctxt opt_goal rem_dups raw_criteria =
+ let
+ val add_prems = Seq.hd o (TRY (Method.insert_tac
+ (Assumption.prems_of ctxt) 1));
+ val opt_goal' = Option.map add_prems opt_goal;
+
+ val criteria = map (apsnd (read_criterion ctxt)) raw_criteria;
+ val filters = map (filter_criterion ctxt opt_goal') criteria;
+
+ val raw_matches = all_filters filters (all_facts_of ctxt);
+
+ val matches =
+ if rem_dups
+ then rem_thm_dups (nicer_shortest ctxt) raw_matches
+ else raw_matches;
+ in matches end;
+
+fun print_theorems ctxt opt_goal opt_limit rem_dups raw_criteria =
+ let
+ val start = start_timing ();
+
+ val criteria = map (apsnd (read_criterion ctxt)) raw_criteria;
+ val matches = find_theorems ctxt opt_goal rem_dups raw_criteria;
+
+ val len = length matches;
+ val lim = the_default (! limit) opt_limit;
+ val thms = Library.drop (len - lim, matches);
+
+ val end_msg = " in " ^
+ (List.nth (String.tokens Char.isSpace (end_timing start), 3))
+ ^ " secs"
+ in
+ Pretty.big_list "searched for:" (map (pretty_criterion ctxt) criteria)
+ :: Pretty.str "" ::
+ (if null thms then [Pretty.str ("nothing found" ^ end_msg)]
+ else
+ [Pretty.str ("found " ^ string_of_int len ^ " theorems" ^
+ (if len <= lim then ""
+ else " (" ^ string_of_int lim ^ " displayed)")
+ ^ end_msg ^ ":"), Pretty.str ""] @
+ map Display.pretty_fact thms)
+ |> Pretty.chunks |> Pretty.writeln
+ end;
+
+
+
+(** command syntax **)
+
+fun find_theorems_cmd ((opt_lim, rem_dups), spec) =
+ Toplevel.unknown_theory o Toplevel.keep (fn state =>
+ let
+ val proof_state = Toplevel.enter_proof_body state;
+ val ctxt = Proof.context_of proof_state;
+ val opt_goal = try Proof.get_goal proof_state |> Option.map (#2 o #2);
+ in print_theorems ctxt opt_goal opt_lim rem_dups spec end);
+
+local
+
+structure P = OuterParse and K = OuterKeyword;
+
+val criterion =
+ P.reserved "name" |-- P.!!! (P.$$$ ":" |-- P.xname) >> Name ||
+ P.reserved "intro" >> K Intro ||
+ P.reserved "elim" >> K Elim ||
+ P.reserved "dest" >> K Dest ||
+ P.reserved "solves" >> K Solves ||
+ P.reserved "simp" |-- P.!!! (P.$$$ ":" |-- P.term) >> Simp ||
+ P.term >> Pattern;
+
+val options =
+ Scan.optional
+ (P.$$$ "(" |--
+ P.!!! (Scan.option P.nat -- Scan.optional (P.reserved "with_dups" >> K false) true
+ --| P.$$$ ")")) (NONE, true);
+in
+
+val _ =
+ OuterSyntax.improper_command "find_theorems" "print theorems meeting specified criteria" K.diag
+ (options -- Scan.repeat (((Scan.option P.minus >> is_none) -- criterion))
+ >> (Toplevel.no_timing oo find_theorems_cmd));
+
+end;
+
+end;
--- a/src/Pure/conv.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/conv.ML Fri Feb 27 18:50:35 2009 +0100
@@ -7,12 +7,17 @@
infix 1 then_conv;
infix 0 else_conv;
+signature BASIC_CONV =
+sig
+ val then_conv: conv * conv -> conv
+ val else_conv: conv * conv -> conv
+end;
+
signature CONV =
sig
+ include BASIC_CONV
val no_conv: conv
val all_conv: conv
- val then_conv: conv * conv -> conv
- val else_conv: conv * conv -> conv
val first_conv: conv list -> conv
val every_conv: conv list -> conv
val try_conv: conv -> conv
@@ -171,3 +176,6 @@
| NONE => raise THM ("gconv_rule", i, [th]));
end;
+
+structure BasicConv: BASIC_CONV = Conv;
+open BasicConv;
--- a/src/Pure/envir.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/envir.ML Fri Feb 27 18:50:35 2009 +0100
@@ -265,7 +265,7 @@
| fast Ts (Const (_, T)) = T
| fast Ts (Free (_, T)) = T
| fast Ts (Bound i) =
- (List.nth (Ts, i)
+ (nth Ts i
handle Subscript => raise TERM ("fastype: Bound", [Bound i]))
| fast Ts (Var (_, T)) = T
| fast Ts (Abs (_, T, u)) = T --> fast (T :: Ts) u
--- a/src/Pure/proofterm.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/proofterm.ML Fri Feb 27 18:50:35 2009 +0100
@@ -470,8 +470,8 @@
val n = length args;
fun subst' lev (Bound i) =
(if i<lev then raise SAME (*var is locally bound*)
- else incr_boundvars lev (List.nth (args, i-lev))
- handle Subscript => Bound (i-n) (*loose: change it*))
+ else incr_boundvars lev (nth args (i-lev))
+ handle Subscript => Bound (i-n)) (*loose: change it*)
| subst' lev (Abs (a, T, body)) = Abs (a, T, subst' (lev+1) body)
| subst' lev (f $ t) = (subst' lev f $ substh' lev t
handle SAME => f $ subst' lev t)
@@ -494,7 +494,7 @@
val n = length args;
fun subst (PBound i) Plev tlev =
(if i < Plev then raise SAME (*var is locally bound*)
- else incr_pboundvars Plev tlev (List.nth (args, i-Plev))
+ else incr_pboundvars Plev tlev (nth args (i-Plev))
handle Subscript => PBound (i-n) (*loose: change it*))
| subst (AbsP (a, t, body)) Plev tlev = AbsP (a, t, subst body (Plev+1) tlev)
| subst (Abst (a, T, body)) Plev tlev = Abst (a, T, subst body Plev (tlev+1))
@@ -935,7 +935,7 @@
in (is, ch orelse ch', ts',
if ch orelse ch' then prf' % t' else prf) end
| shrink' ls lev ts prfs (prf as PBound i) =
- (if exists (fn SOME (Bound j) => lev-j <= List.nth (ls, i) | _ => true) ts
+ (if exists (fn SOME (Bound j) => lev-j <= nth ls i | _ => true) ts
orelse has_duplicates (op =)
(Library.foldl (fn (js, SOME (Bound j)) => j :: js | (js, _) => js) ([], ts))
orelse exists #1 prfs then [i] else [], false, map (pair false) ts, prf)
--- a/src/Pure/sign.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/sign.ML Fri Feb 27 18:50:35 2009 +0100
@@ -338,7 +338,7 @@
fun typ_of (_, Const (_, T)) = T
| typ_of (_, Free (_, T)) = T
| typ_of (_, Var (_, T)) = T
- | typ_of (bs, Bound i) = snd (List.nth (bs, i) handle Subscript =>
+ | typ_of (bs, Bound i) = snd (nth bs i handle Subscript =>
raise TYPE ("Loose bound variable: B." ^ string_of_int i, [], [Bound i]))
| typ_of (bs, Abs (x, T, body)) = T --> typ_of ((x, T) :: bs, body)
| typ_of (bs, t $ u) =
--- a/src/Pure/tctical.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/tctical.ML Fri Feb 27 18:50:35 2009 +0100
@@ -349,15 +349,13 @@
(*Returns all states that have changed in subgoal i, counted from the LAST
subgoal. For stac, for example.*)
fun CHANGED_GOAL tac i st =
- let val np = nprems_of st
+ let val np = Thm.nprems_of st
val d = np-i (*distance from END*)
- val t = List.nth(prems_of st, i-1)
+ val t = Thm.term_of (Thm.cprem_of st i)
fun diff st' =
- nprems_of st' - d <= 0 (*the subgoal no longer exists*)
+ Thm.nprems_of st' - d <= 0 (*the subgoal no longer exists*)
orelse
- not (Pattern.aeconv (t,
- List.nth(prems_of st',
- nprems_of st' - d - 1)))
+ not (Pattern.aeconv (t, Thm.term_of (Thm.cprem_of st' (Thm.nprems_of st' - d))))
in Seq.filter diff (tac i st) end
handle Subscript => Seq.empty (*no subgoal i*);
--- a/src/Pure/term.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/term.ML Fri Feb 27 18:50:35 2009 +0100
@@ -297,7 +297,7 @@
Ts = [T0,T1,...] holds types of bound variables 0, 1, ...*)
fun type_of1 (Ts, Const (_,T)) = T
| type_of1 (Ts, Free (_,T)) = T
- | type_of1 (Ts, Bound i) = (List.nth (Ts,i)
+ | type_of1 (Ts, Bound i) = (nth Ts i
handle Subscript => raise TYPE("type_of: bound variable", [], [Bound i]))
| type_of1 (Ts, Var (_,T)) = T
| type_of1 (Ts, Abs (_,T,body)) = T --> type_of1(T::Ts, body)
@@ -322,7 +322,7 @@
| _ => raise TERM("fastype_of: expected function type", [f$u]))
| fastype_of1 (_, Const (_,T)) = T
| fastype_of1 (_, Free (_,T)) = T
- | fastype_of1 (Ts, Bound i) = (List.nth(Ts,i)
+ | fastype_of1 (Ts, Bound i) = (nth Ts i
handle Subscript => raise TERM("fastype_of: Bound", [Bound i]))
| fastype_of1 (_, Var (_,T)) = T
| fastype_of1 (Ts, Abs (_,T,u)) = T --> fastype_of1 (T::Ts, u);
@@ -387,17 +387,17 @@
(*number of atoms and abstractions in a term*)
fun size_of_term tm =
let
- fun add_size (t $ u, n) = add_size (t, add_size (u, n))
- | add_size (Abs (_ ,_, t), n) = add_size (t, n + 1)
- | add_size (_, n) = n + 1;
- in add_size (tm, 0) end;
+ fun add_size (t $ u) n = add_size t (add_size u n)
+ | add_size (Abs (_ ,_, t)) n = add_size t (n + 1)
+ | add_size _ n = n + 1;
+ in add_size tm 0 end;
-(*number of tfrees, tvars, and constructors in a type*)
+(*number of atoms and constructors in a type*)
fun size_of_typ ty =
let
- fun add_size (Type (_, ars), n) = foldl add_size (n + 1) ars
- | add_size (_, n) = n + 1;
- in add_size (ty, 0) end;
+ fun add_size (Type (_, tys)) n = fold add_size tys (n + 1)
+ | add_size _ n = n + 1;
+ in add_size ty 0 end;
fun map_atyps f (Type (a, Ts)) = Type (a, map (map_atyps f) Ts)
| map_atyps f T = f T;
@@ -638,7 +638,7 @@
val n = length args;
fun subst (t as Bound i, lev) =
(if i < lev then raise SAME (*var is locally bound*)
- else incr_boundvars lev (List.nth (args, i - lev))
+ else incr_boundvars lev (nth args (i - lev))
handle Subscript => Bound (i - n)) (*loose: change it*)
| subst (Abs (a, T, body), lev) = Abs (a, T, subst (body, lev + 1))
| subst (f $ t, lev) =
--- a/src/Pure/type_infer.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Pure/type_infer.ML Fri Feb 27 18:50:35 2009 +0100
@@ -369,7 +369,7 @@
fun inf _ (PConst (_, T)) = T
| inf _ (PFree (_, T)) = T
| inf _ (PVar (_, T)) = T
- | inf bs (PBound i) = snd (List.nth (bs, i) handle Subscript => err_loose i)
+ | inf bs (PBound i) = snd (nth bs i handle Subscript => err_loose i)
| inf bs (PAbs (x, T, t)) = PType ("fun", [T, inf ((x, T) :: bs) t])
| inf bs (PAppl (t, u)) =
let
--- a/src/Tools/auto_solve.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Tools/auto_solve.ML Fri Feb 27 18:50:35 2009 +0100
@@ -1,89 +1,91 @@
-(* Title: auto_solve.ML
+(* Title: Pure/Tools/auto_solve.ML
Author: Timothy Bourke and Gerwin Klein, NICTA
- Check whether a newly stated theorem can be solved directly
- by an existing theorem. Duplicate lemmas can be detected in
- this way.
+Check whether a newly stated theorem can be solved directly by an
+existing theorem. Duplicate lemmas can be detected in this way.
- The implemenation is based in part on Berghofer and
- Haftmann's Pure/codegen.ML. It relies critically on
- the FindTheorems solves feature.
+The implemenation is based in part on Berghofer and Haftmann's
+Pure/codegen.ML. It relies critically on the FindTheorems solves
+feature.
*)
signature AUTO_SOLVE =
sig
- val auto : bool ref;
- val auto_time_limit : int ref;
+ val auto : bool ref
+ val auto_time_limit : int ref
- val seek_solution : bool -> Proof.state -> Proof.state;
+ val seek_solution : bool -> Proof.state -> Proof.state
end;
structure AutoSolve : AUTO_SOLVE =
struct
- structure FT = FindTheorems;
- val auto = ref false;
- val auto_time_limit = ref 2500;
+val auto = ref false;
+val auto_time_limit = ref 2500;
- fun seek_solution int state = let
- val ctxt = Proof.context_of state;
+fun seek_solution int state =
+ let
+ val ctxt = Proof.context_of state;
- fun conj_to_list [] = []
- | conj_to_list (t::ts) =
- (Conjunction.dest_conjunction t
- |> (fn (t1, t2) => conj_to_list (t1::t2::ts)))
- handle TERM _ => t::conj_to_list ts;
+ fun conj_to_list [] = []
+ | conj_to_list (t::ts) =
+ (Conjunction.dest_conjunction t
+ |> (fn (t1, t2) => conj_to_list (t1::t2::ts)))
+ handle TERM _ => t::conj_to_list ts;
- val crits = [(true, FT.Solves)];
- fun find g = (NONE, FT.find_theorems ctxt g true crits);
- fun find_cterm g = (SOME g, FT.find_theorems ctxt
- (SOME (Goal.init g)) true crits);
+ val crits = [(true, FindTheorems.Solves)];
+ fun find g = (NONE, FindTheorems.find_theorems ctxt g true crits);
+ fun find_cterm g = (SOME g, FindTheorems.find_theorems ctxt
+ (SOME (Goal.init g)) true crits);
- fun prt_result (goal, results) = let
- val msg = case goal of
- NONE => "The current goal"
- | SOME g => Syntax.string_of_term ctxt (term_of g);
- in
- Pretty.big_list (msg ^ " could be solved directly with:")
- (map Display.pretty_fact results)
- end;
+ fun prt_result (goal, results) =
+ let
+ val msg = case goal of
+ NONE => "The current goal"
+ | SOME g => Syntax.string_of_term ctxt (term_of g);
+ in
+ Pretty.big_list (msg ^ " could be solved directly with:")
+ (map Display.pretty_fact results)
+ end;
- fun seek_against_goal () = let
- val goal = try Proof.get_goal state
- |> Option.map (#2 o #2);
+ fun seek_against_goal () =
+ let
+ val goal = try Proof.get_goal state
+ |> Option.map (#2 o #2);
- val goals = goal
- |> Option.map (fn g => cprem_of g 1)
- |> the_list
- |> conj_to_list;
+ val goals = goal
+ |> Option.map (fn g => cprem_of g 1)
+ |> the_list
+ |> conj_to_list;
- val rs = if length goals = 1
- then [find goal]
- else map find_cterm goals;
- val frs = filter_out (null o snd) rs;
+ val rs = if length goals = 1
+ then [find goal]
+ else map find_cterm goals;
+ val frs = filter_out (null o snd) rs;
- in if null frs then NONE else SOME frs end;
+ in if null frs then NONE else SOME frs end;
- fun go () = let
- val res = TimeLimit.timeLimit
- (Time.fromMilliseconds (!auto_time_limit))
- (try seek_against_goal) ();
- in
- case Option.join res of
- NONE => state
- | SOME results => (Proof.goal_message
- (fn () => Pretty.chunks [Pretty.str "",
- Pretty.markup Markup.hilite
- (Library.separate (Pretty.brk 0)
- (map prt_result results))])
- state)
- end handle TimeLimit.TimeOut => (warning "AutoSolve: timeout."; state);
- in
- if int andalso !auto andalso not (!Toplevel.quiet)
- then go ()
- else state
- end;
-
+ fun go () =
+ let
+ val res = TimeLimit.timeLimit
+ (Time.fromMilliseconds (! auto_time_limit))
+ (try seek_against_goal) ();
+ in
+ case Option.join res of
+ NONE => state
+ | SOME results => (Proof.goal_message
+ (fn () => Pretty.chunks [Pretty.str "",
+ Pretty.markup Markup.hilite
+ (Library.separate (Pretty.brk 0)
+ (map prt_result results))])
+ state)
+ end handle TimeLimit.TimeOut => (warning "AutoSolve: timeout."; state);
+ in
+ if int andalso ! auto andalso not (! Toplevel.quiet)
+ then go ()
+ else state
+ end;
+
end;
val _ = Context.>> (Specification.add_theorem_hook AutoSolve.seek_solution);
--- a/src/Tools/code/code_wellsorted.ML Thu Feb 26 10:13:43 2009 +0100
+++ b/src/Tools/code/code_wellsorted.ML Fri Feb 27 18:50:35 2009 +0100
@@ -166,9 +166,8 @@
in
vardeps_data
|> (apsnd o apsnd) (insert (op =) inst)
- |> fold_index (fn (k, classes) =>
- apfst (Vargraph.new_node ((Inst (class, tyco), k), ([] ,[])))
- ) classess
+ |> fold_index (fn (k, _) =>
+ apfst (Vargraph.new_node ((Inst (class, tyco), k), ([] ,[])))) classess
|> fold (fn superclass => assert_inst thy arities eqngr (superclass, tyco)) superclasses
|> fold (assert_fun thy arities eqngr) inst_params
|> fold_index (fn (k, classes) =>
@@ -203,9 +202,10 @@
in
vardeps_data
|> (apsnd o apfst) (Symtab.update_new (c, (lhs, eqns)))
+ |> fold_index (fn (k, _) =>
+ apfst (Vargraph.new_node ((Fun c, k), ([] ,[])))) lhs
|> fold_index (fn (k, (_, sort)) =>
- apfst (Vargraph.new_node ((Fun c, k), ([] ,[])))
- #> add_classes thy arities eqngr (Fun c, k) (complete_proper_sort thy sort)) lhs
+ add_classes thy arities eqngr (Fun c, k) (complete_proper_sort thy sort)) lhs
|> fold (assert_rhs thy arities eqngr) rhss'
end;