src/Doc/IsarRef/Generic.thy
author wenzelm
Sun Jan 26 14:01:19 2014 +0100 (2014-01-26)
changeset 55152 a56099a6447a
parent 55112 b1a5d603fd12
permissions -rw-r--r--
discontinued obsolete attribute "standard";
wenzelm@26782
     1
theory Generic
wenzelm@42651
     2
imports Base Main
wenzelm@26782
     3
begin
wenzelm@26782
     4
wenzelm@26782
     5
chapter {* Generic tools and packages \label{ch:gen-tools} *}
wenzelm@26782
     6
wenzelm@42655
     7
section {* Configuration options \label{sec:config} *}
wenzelm@26782
     8
wenzelm@40291
     9
text {* Isabelle/Pure maintains a record of named configuration
wenzelm@40291
    10
  options within the theory or proof context, with values of type
wenzelm@40291
    11
  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
wenzelm@40291
    12
  string}.  Tools may declare options in ML, and then refer to these
wenzelm@40291
    13
  values (relative to the context).  Thus global reference variables
wenzelm@40291
    14
  are easily avoided.  The user may change the value of a
wenzelm@40291
    15
  configuration option by means of an associated attribute of the same
wenzelm@40291
    16
  name.  This form of context declaration works particularly well with
wenzelm@42655
    17
  commands such as @{command "declare"} or @{command "using"} like
wenzelm@42655
    18
  this:
wenzelm@42655
    19
*}
wenzelm@42655
    20
wenzelm@42655
    21
declare [[show_main_goal = false]]
wenzelm@26782
    22
wenzelm@42655
    23
notepad
wenzelm@42655
    24
begin
wenzelm@42655
    25
  note [[show_main_goal = true]]
wenzelm@42655
    26
end
wenzelm@42655
    27
wenzelm@42655
    28
text {* For historical reasons, some tools cannot take the full proof
wenzelm@26782
    29
  context into account and merely refer to the background theory.
wenzelm@26782
    30
  This is accommodated by configuration options being declared as
wenzelm@26782
    31
  ``global'', which may not be changed within a local context.
wenzelm@26782
    32
wenzelm@26782
    33
  \begin{matharray}{rcll}
wenzelm@52060
    34
    @{command_def "print_options"} & : & @{text "context \<rightarrow>"} \\
wenzelm@26782
    35
  \end{matharray}
wenzelm@26782
    36
wenzelm@55112
    37
  @{rail \<open>
wenzelm@42596
    38
    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
wenzelm@55112
    39
  \<close>}
wenzelm@26782
    40
wenzelm@28760
    41
  \begin{description}
wenzelm@26782
    42
  
wenzelm@52060
    43
  \item @{command "print_options"} prints the available configuration
wenzelm@28760
    44
  options, with names, types, and current values.
wenzelm@26782
    45
  
wenzelm@28760
    46
  \item @{text "name = value"} as an attribute expression modifies the
wenzelm@28760
    47
  named option, with the syntax of the value depending on the option's
wenzelm@28760
    48
  type.  For @{ML_type bool} the default value is @{text true}.  Any
wenzelm@28760
    49
  attempt to change a global option in a local context is ignored.
wenzelm@26782
    50
wenzelm@28760
    51
  \end{description}
wenzelm@26782
    52
*}
wenzelm@26782
    53
wenzelm@26782
    54
wenzelm@27040
    55
section {* Basic proof tools *}
wenzelm@26782
    56
wenzelm@26782
    57
subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
wenzelm@26782
    58
wenzelm@26782
    59
text {*
wenzelm@26782
    60
  \begin{matharray}{rcl}
wenzelm@28761
    61
    @{method_def unfold} & : & @{text method} \\
wenzelm@28761
    62
    @{method_def fold} & : & @{text method} \\
wenzelm@28761
    63
    @{method_def insert} & : & @{text method} \\[0.5ex]
wenzelm@28761
    64
    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
    65
    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
    66
    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@43365
    67
    @{method_def intro} & : & @{text method} \\
wenzelm@43365
    68
    @{method_def elim} & : & @{text method} \\
wenzelm@28761
    69
    @{method_def succeed} & : & @{text method} \\
wenzelm@28761
    70
    @{method_def fail} & : & @{text method} \\
wenzelm@26782
    71
  \end{matharray}
wenzelm@26782
    72
wenzelm@55112
    73
  @{rail \<open>
wenzelm@42596
    74
    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
wenzelm@26782
    75
    ;
wenzelm@42596
    76
    (@@{method erule} | @@{method drule} | @@{method frule})
wenzelm@42596
    77
      ('(' @{syntax nat} ')')? @{syntax thmrefs}
wenzelm@43365
    78
    ;
wenzelm@43365
    79
    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
wenzelm@55112
    80
  \<close>}
wenzelm@26782
    81
wenzelm@28760
    82
  \begin{description}
wenzelm@26782
    83
  
wenzelm@28760
    84
  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
wenzelm@28760
    85
  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
wenzelm@28760
    86
  all goals; any chained facts provided are inserted into the goal and
wenzelm@28760
    87
  subject to rewriting as well.
wenzelm@26782
    88
wenzelm@28760
    89
  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
wenzelm@28760
    90
  into all goals of the proof state.  Note that current facts
wenzelm@28760
    91
  indicated for forward chaining are ignored.
wenzelm@26782
    92
wenzelm@30397
    93
  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
wenzelm@30397
    94
  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
wenzelm@30397
    95
  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
wenzelm@30397
    96
  method (see \secref{sec:pure-meth-att}), but apply rules by
wenzelm@30397
    97
  elim-resolution, destruct-resolution, and forward-resolution,
wenzelm@30397
    98
  respectively \cite{isabelle-implementation}.  The optional natural
wenzelm@30397
    99
  number argument (default 0) specifies additional assumption steps to
wenzelm@30397
   100
  be performed here.
wenzelm@26782
   101
wenzelm@26782
   102
  Note that these methods are improper ones, mainly serving for
wenzelm@26782
   103
  experimentation and tactic script emulation.  Different modes of
wenzelm@26782
   104
  basic rule application are usually expressed in Isar at the proof
wenzelm@26782
   105
  language level, rather than via implicit proof state manipulations.
wenzelm@26782
   106
  For example, a proper single-step elimination would be done using
wenzelm@26782
   107
  the plain @{method rule} method, with forward chaining of current
wenzelm@26782
   108
  facts.
wenzelm@26782
   109
wenzelm@43365
   110
  \item @{method intro} and @{method elim} repeatedly refine some goal
wenzelm@43365
   111
  by intro- or elim-resolution, after having inserted any chained
wenzelm@43365
   112
  facts.  Exactly the rules given as arguments are taken into account;
wenzelm@43365
   113
  this allows fine-tuned decomposition of a proof problem, in contrast
wenzelm@43365
   114
  to common automated tools.
wenzelm@43365
   115
wenzelm@28760
   116
  \item @{method succeed} yields a single (unchanged) result; it is
wenzelm@26782
   117
  the identity of the ``@{text ","}'' method combinator (cf.\
wenzelm@28754
   118
  \secref{sec:proof-meth}).
wenzelm@26782
   119
wenzelm@28760
   120
  \item @{method fail} yields an empty result sequence; it is the
wenzelm@26782
   121
  identity of the ``@{text "|"}'' method combinator (cf.\
wenzelm@28754
   122
  \secref{sec:proof-meth}).
wenzelm@26782
   123
wenzelm@28760
   124
  \end{description}
wenzelm@26782
   125
wenzelm@26782
   126
  \begin{matharray}{rcl}
wenzelm@28761
   127
    @{attribute_def tagged} & : & @{text attribute} \\
wenzelm@28761
   128
    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
wenzelm@28761
   129
    @{attribute_def THEN} & : & @{text attribute} \\
wenzelm@28761
   130
    @{attribute_def unfolded} & : & @{text attribute} \\
wenzelm@47497
   131
    @{attribute_def folded} & : & @{text attribute} \\
wenzelm@47497
   132
    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
wenzelm@28761
   133
    @{attribute_def rotated} & : & @{text attribute} \\
wenzelm@28761
   134
    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
wenzelm@28761
   135
    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
wenzelm@26782
   136
  \end{matharray}
wenzelm@26782
   137
wenzelm@55112
   138
  @{rail \<open>
wenzelm@42596
   139
    @@{attribute tagged} @{syntax name} @{syntax name}
wenzelm@26782
   140
    ;
wenzelm@42596
   141
    @@{attribute untagged} @{syntax name}
wenzelm@26782
   142
    ;
wenzelm@48205
   143
    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
wenzelm@26782
   144
    ;
wenzelm@42596
   145
    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
wenzelm@26782
   146
    ;
wenzelm@42596
   147
    @@{attribute rotated} @{syntax int}?
wenzelm@55112
   148
  \<close>}
wenzelm@26782
   149
wenzelm@28760
   150
  \begin{description}
wenzelm@26782
   151
wenzelm@28764
   152
  \item @{attribute tagged}~@{text "name value"} and @{attribute
wenzelm@28760
   153
  untagged}~@{text name} add and remove \emph{tags} of some theorem.
wenzelm@26782
   154
  Tags may be any list of string pairs that serve as formal comment.
wenzelm@28764
   155
  The first string is considered the tag name, the second its value.
wenzelm@28764
   156
  Note that @{attribute untagged} removes any tags of the same name.
wenzelm@26782
   157
wenzelm@48205
   158
  \item @{attribute THEN}~@{text a} composes rules by resolution; it
wenzelm@48205
   159
  resolves with the first premise of @{text a} (an alternative
wenzelm@48205
   160
  position may be also specified).  See also @{ML_op "RS"} in
wenzelm@48205
   161
  \cite{isabelle-implementation}.
wenzelm@26782
   162
  
wenzelm@28760
   163
  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
wenzelm@28760
   164
  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
wenzelm@28760
   165
  definitions throughout a rule.
wenzelm@26782
   166
wenzelm@47497
   167
  \item @{attribute abs_def} turns an equation of the form @{prop "f x
wenzelm@47497
   168
  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
wenzelm@47497
   169
  simp} or @{method unfold} steps always expand it.  This also works
wenzelm@47497
   170
  for object-logic equality.
wenzelm@47497
   171
wenzelm@28760
   172
  \item @{attribute rotated}~@{text n} rotate the premises of a
wenzelm@26782
   173
  theorem by @{text n} (default 1).
wenzelm@26782
   174
wenzelm@28760
   175
  \item @{attribute (Pure) elim_format} turns a destruction rule into
wenzelm@28760
   176
  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
wenzelm@28760
   177
  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
wenzelm@26782
   178
  
wenzelm@26782
   179
  Note that the Classical Reasoner (\secref{sec:classical}) provides
wenzelm@26782
   180
  its own version of this operation.
wenzelm@26782
   181
wenzelm@28760
   182
  \item @{attribute no_vars} replaces schematic variables by free
wenzelm@26782
   183
  ones; this is mainly for tuning output of pretty printed theorems.
wenzelm@26782
   184
wenzelm@28760
   185
  \end{description}
wenzelm@26782
   186
*}
wenzelm@26782
   187
wenzelm@26782
   188
wenzelm@27044
   189
subsection {* Low-level equational reasoning *}
wenzelm@27044
   190
wenzelm@27044
   191
text {*
wenzelm@27044
   192
  \begin{matharray}{rcl}
wenzelm@28761
   193
    @{method_def subst} & : & @{text method} \\
wenzelm@28761
   194
    @{method_def hypsubst} & : & @{text method} \\
wenzelm@28761
   195
    @{method_def split} & : & @{text method} \\
wenzelm@27044
   196
  \end{matharray}
wenzelm@27044
   197
wenzelm@55112
   198
  @{rail \<open>
wenzelm@55029
   199
    @@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thmref}
wenzelm@27044
   200
    ;
wenzelm@44094
   201
    @@{method split} @{syntax thmrefs}
wenzelm@55112
   202
  \<close>}
wenzelm@27044
   203
wenzelm@27044
   204
  These methods provide low-level facilities for equational reasoning
wenzelm@27044
   205
  that are intended for specialized applications only.  Normally,
wenzelm@27044
   206
  single step calculations would be performed in a structured text
wenzelm@27044
   207
  (see also \secref{sec:calculation}), while the Simplifier methods
wenzelm@27044
   208
  provide the canonical way for automated normalization (see
wenzelm@27044
   209
  \secref{sec:simplifier}).
wenzelm@27044
   210
wenzelm@28760
   211
  \begin{description}
wenzelm@27044
   212
wenzelm@28760
   213
  \item @{method subst}~@{text eq} performs a single substitution step
wenzelm@28760
   214
  using rule @{text eq}, which may be either a meta or object
wenzelm@27044
   215
  equality.
wenzelm@27044
   216
wenzelm@28760
   217
  \item @{method subst}~@{text "(asm) eq"} substitutes in an
wenzelm@27044
   218
  assumption.
wenzelm@27044
   219
wenzelm@28760
   220
  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
wenzelm@27044
   221
  substitutions in the conclusion. The numbers @{text i} to @{text j}
wenzelm@27044
   222
  indicate the positions to substitute at.  Positions are ordered from
wenzelm@27044
   223
  the top of the term tree moving down from left to right. For
wenzelm@27044
   224
  example, in @{text "(a + b) + (c + d)"} there are three positions
wenzelm@28760
   225
  where commutativity of @{text "+"} is applicable: 1 refers to @{text
wenzelm@28760
   226
  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
wenzelm@27044
   227
wenzelm@27044
   228
  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
wenzelm@27044
   229
  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
wenzelm@27044
   230
  assume all substitutions are performed simultaneously.  Otherwise
wenzelm@27044
   231
  the behaviour of @{text subst} is not specified.
wenzelm@27044
   232
wenzelm@28760
   233
  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
wenzelm@27071
   234
  substitutions in the assumptions. The positions refer to the
wenzelm@27071
   235
  assumptions in order from left to right.  For example, given in a
wenzelm@27071
   236
  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
wenzelm@27071
   237
  commutativity of @{text "+"} is the subterm @{text "a + b"} and
wenzelm@27071
   238
  position 2 is the subterm @{text "c + d"}.
wenzelm@27044
   239
wenzelm@28760
   240
  \item @{method hypsubst} performs substitution using some
wenzelm@27044
   241
  assumption; this only works for equations of the form @{text "x =
wenzelm@27044
   242
  t"} where @{text x} is a free or bound variable.
wenzelm@27044
   243
wenzelm@28760
   244
  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
wenzelm@44094
   245
  splitting using the given rules.  Splitting is performed in the
wenzelm@44094
   246
  conclusion or some assumption of the subgoal, depending of the
wenzelm@44094
   247
  structure of the rule.
wenzelm@27044
   248
  
wenzelm@27044
   249
  Note that the @{method simp} method already involves repeated
wenzelm@44094
   250
  application of split rules as declared in the current context, using
wenzelm@44094
   251
  @{attribute split}, for example.
wenzelm@27044
   252
wenzelm@28760
   253
  \end{description}
wenzelm@27044
   254
*}
wenzelm@27044
   255
wenzelm@27044
   256
wenzelm@26782
   257
subsection {* Further tactic emulations \label{sec:tactics} *}
wenzelm@26782
   258
wenzelm@26782
   259
text {*
wenzelm@26782
   260
  The following improper proof methods emulate traditional tactics.
wenzelm@26782
   261
  These admit direct access to the goal state, which is normally
wenzelm@26782
   262
  considered harmful!  In particular, this may involve both numbered
wenzelm@26782
   263
  goal addressing (default 1), and dynamic instantiation within the
wenzelm@26782
   264
  scope of some subgoal.
wenzelm@26782
   265
wenzelm@26782
   266
  \begin{warn}
wenzelm@26782
   267
    Dynamic instantiations refer to universally quantified parameters
wenzelm@26782
   268
    of a subgoal (the dynamic context) rather than fixed variables and
wenzelm@26782
   269
    term abbreviations of a (static) Isar context.
wenzelm@26782
   270
  \end{warn}
wenzelm@26782
   271
wenzelm@26782
   272
  Tactic emulation methods, unlike their ML counterparts, admit
wenzelm@26782
   273
  simultaneous instantiation from both dynamic and static contexts.
wenzelm@26782
   274
  If names occur in both contexts goal parameters hide locally fixed
wenzelm@26782
   275
  variables.  Likewise, schematic variables refer to term
wenzelm@26782
   276
  abbreviations, if present in the static context.  Otherwise the
wenzelm@26782
   277
  schematic variable is interpreted as a schematic variable and left
wenzelm@26782
   278
  to be solved by unification with certain parts of the subgoal.
wenzelm@26782
   279
wenzelm@26782
   280
  Note that the tactic emulation proof methods in Isabelle/Isar are
wenzelm@26782
   281
  consistently named @{text foo_tac}.  Note also that variable names
wenzelm@26782
   282
  occurring on left hand sides of instantiations must be preceded by a
wenzelm@26782
   283
  question mark if they coincide with a keyword or contain dots.  This
wenzelm@26782
   284
  is consistent with the attribute @{attribute "where"} (see
wenzelm@26782
   285
  \secref{sec:pure-meth-att}).
wenzelm@26782
   286
wenzelm@26782
   287
  \begin{matharray}{rcl}
wenzelm@28761
   288
    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   289
    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   290
    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   291
    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   292
    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   293
    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   294
    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   295
    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   296
    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   297
    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   298
    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@26782
   299
  \end{matharray}
wenzelm@26782
   300
wenzelm@55112
   301
  @{rail \<open>
wenzelm@42596
   302
    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
wenzelm@55029
   303
      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \<newline>
wenzelm@42617
   304
    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
wenzelm@26782
   305
    ;
wenzelm@42705
   306
    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
wenzelm@42596
   307
    ;
wenzelm@42705
   308
    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
wenzelm@26782
   309
    ;
wenzelm@42705
   310
    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
wenzelm@26782
   311
    ;
wenzelm@42596
   312
    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
wenzelm@26782
   313
    ;
wenzelm@26782
   314
wenzelm@42617
   315
    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
wenzelm@55112
   316
  \<close>}
wenzelm@26782
   317
wenzelm@28760
   318
\begin{description}
wenzelm@26782
   319
wenzelm@28760
   320
  \item @{method rule_tac} etc. do resolution of rules with explicit
wenzelm@26782
   321
  instantiation.  This works the same way as the ML tactics @{ML
wenzelm@30397
   322
  res_inst_tac} etc. (see \cite{isabelle-implementation})
wenzelm@26782
   323
wenzelm@26782
   324
  Multiple rules may be only given if there is no instantiation; then
wenzelm@26782
   325
  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
wenzelm@30397
   326
  \cite{isabelle-implementation}).
wenzelm@26782
   327
wenzelm@28760
   328
  \item @{method cut_tac} inserts facts into the proof state as
wenzelm@46706
   329
  assumption of a subgoal; instantiations may be given as well.  Note
wenzelm@46706
   330
  that the scope of schematic variables is spread over the main goal
wenzelm@46706
   331
  statement and rule premises are turned into new subgoals.  This is
wenzelm@46706
   332
  in contrast to the regular method @{method insert} which inserts
wenzelm@46706
   333
  closed rule statements.
wenzelm@26782
   334
wenzelm@46277
   335
  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
wenzelm@46277
   336
  from a subgoal.  Note that @{text \<phi>} may contain schematic
wenzelm@46277
   337
  variables, to abbreviate the intended proposition; the first
wenzelm@46277
   338
  matching subgoal premise will be deleted.  Removing useless premises
wenzelm@46277
   339
  from a subgoal increases its readability and can make search tactics
wenzelm@46277
   340
  run faster.
wenzelm@28760
   341
wenzelm@46271
   342
  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
wenzelm@46271
   343
  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
wenzelm@46271
   344
  as new subgoals (in the original context).
wenzelm@26782
   345
wenzelm@28760
   346
  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
wenzelm@28760
   347
  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
wenzelm@28760
   348
  \emph{suffix} of variables.
wenzelm@26782
   349
wenzelm@46274
   350
  \item @{method rotate_tac}~@{text n} rotates the premises of a
wenzelm@46274
   351
  subgoal by @{text n} positions: from right to left if @{text n} is
wenzelm@26782
   352
  positive, and from left to right if @{text n} is negative; the
wenzelm@46274
   353
  default value is 1.
wenzelm@26782
   354
wenzelm@28760
   355
  \item @{method tactic}~@{text "text"} produces a proof method from
wenzelm@26782
   356
  any ML text of type @{ML_type tactic}.  Apart from the usual ML
wenzelm@27223
   357
  environment and the current proof context, the ML code may refer to
wenzelm@27223
   358
  the locally bound values @{ML_text facts}, which indicates any
wenzelm@27223
   359
  current facts used for forward-chaining.
wenzelm@26782
   360
wenzelm@28760
   361
  \item @{method raw_tactic} is similar to @{method tactic}, but
wenzelm@27223
   362
  presents the goal state in its raw internal form, where simultaneous
wenzelm@27223
   363
  subgoals appear as conjunction of the logical framework instead of
wenzelm@27223
   364
  the usual split into several subgoals.  While feature this is useful
wenzelm@27223
   365
  for debugging of complex method definitions, it should not never
wenzelm@27223
   366
  appear in production theories.
wenzelm@26782
   367
wenzelm@28760
   368
  \end{description}
wenzelm@26782
   369
*}
wenzelm@26782
   370
wenzelm@26782
   371
wenzelm@27040
   372
section {* The Simplifier \label{sec:simplifier} *}
wenzelm@26782
   373
wenzelm@50063
   374
text {* The Simplifier performs conditional and unconditional
wenzelm@50063
   375
  rewriting and uses contextual information: rule declarations in the
wenzelm@50063
   376
  background theory or local proof context are taken into account, as
wenzelm@50063
   377
  well as chained facts and subgoal premises (``local assumptions'').
wenzelm@50063
   378
  There are several general hooks that allow to modify the
wenzelm@50063
   379
  simplification strategy, or incorporate other proof tools that solve
wenzelm@50063
   380
  sub-problems, produce rewrite rules on demand etc.
wenzelm@50063
   381
wenzelm@50075
   382
  The rewriting strategy is always strictly bottom up, except for
wenzelm@50075
   383
  congruence rules, which are applied while descending into a term.
wenzelm@50075
   384
  Conditions in conditional rewrite rules are solved recursively
wenzelm@50075
   385
  before the rewrite rule is applied.
wenzelm@50075
   386
wenzelm@50063
   387
  The default Simplifier setup of major object logics (HOL, HOLCF,
wenzelm@50063
   388
  FOL, ZF) makes the Simplifier ready for immediate use, without
wenzelm@50063
   389
  engaging into the internal structures.  Thus it serves as
wenzelm@50063
   390
  general-purpose proof tool with the main focus on equational
wenzelm@50075
   391
  reasoning, and a bit more than that.
wenzelm@50075
   392
*}
wenzelm@50063
   393
wenzelm@50063
   394
wenzelm@50063
   395
subsection {* Simplification methods \label{sec:simp-meth} *}
wenzelm@26782
   396
wenzelm@26782
   397
text {*
wenzelm@26782
   398
  \begin{matharray}{rcl}
wenzelm@28761
   399
    @{method_def simp} & : & @{text method} \\
wenzelm@28761
   400
    @{method_def simp_all} & : & @{text method} \\
wenzelm@26782
   401
  \end{matharray}
wenzelm@26782
   402
wenzelm@55112
   403
  @{rail \<open>
wenzelm@42596
   404
    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
wenzelm@26782
   405
    ;
wenzelm@26782
   406
wenzelm@40255
   407
    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
wenzelm@26782
   408
    ;
wenzelm@50063
   409
    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | 'add' | 'del') |
wenzelm@50063
   410
      'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
wenzelm@55112
   411
  \<close>}
wenzelm@26782
   412
wenzelm@28760
   413
  \begin{description}
wenzelm@26782
   414
wenzelm@50063
   415
  \item @{method simp} invokes the Simplifier on the first subgoal,
wenzelm@50063
   416
  after inserting chained facts as additional goal premises; further
wenzelm@50063
   417
  rule declarations may be included via @{text "(simp add: facts)"}.
wenzelm@50063
   418
  The proof method fails if the subgoal remains unchanged after
wenzelm@50063
   419
  simplification.
wenzelm@26782
   420
wenzelm@50063
   421
  Note that the original goal premises and chained facts are subject
wenzelm@50063
   422
  to simplification themselves, while declarations via @{text
wenzelm@50063
   423
  "add"}/@{text "del"} merely follow the policies of the object-logic
wenzelm@50063
   424
  to extract rewrite rules from theorems, without further
wenzelm@50063
   425
  simplification.  This may lead to slightly different behavior in
wenzelm@50063
   426
  either case, which might be required precisely like that in some
wenzelm@50063
   427
  boundary situations to perform the intended simplification step!
wenzelm@50063
   428
wenzelm@50063
   429
  \medskip The @{text only} modifier first removes all other rewrite
wenzelm@50063
   430
  rules, looper tactics (including split rules), congruence rules, and
wenzelm@50063
   431
  then behaves like @{text add}.  Implicit solvers remain, which means
wenzelm@50063
   432
  that trivial rules like reflexivity or introduction of @{text
wenzelm@50063
   433
  "True"} are available to solve the simplified subgoals, but also
wenzelm@50063
   434
  non-trivial tools like linear arithmetic in HOL.  The latter may
wenzelm@50063
   435
  lead to some surprise of the meaning of ``only'' in Isabelle/HOL
wenzelm@50063
   436
  compared to English!
wenzelm@26782
   437
wenzelm@42596
   438
  \medskip The @{text split} modifiers add or delete rules for the
wenzelm@50079
   439
  Splitter (see also \secref{sec:simp-strategies} on the looper).
wenzelm@26782
   440
  This works only if the Simplifier method has been properly setup to
wenzelm@26782
   441
  include the Splitter (all major object logics such HOL, HOLCF, FOL,
wenzelm@26782
   442
  ZF do this already).
wenzelm@26782
   443
wenzelm@50065
   444
  There is also a separate @{method_ref split} method available for
wenzelm@50065
   445
  single-step case splitting.  The effect of repeatedly applying
wenzelm@50065
   446
  @{text "(split thms)"} can be imitated by ``@{text "(simp only:
wenzelm@50065
   447
  split: thms)"}''.
wenzelm@50065
   448
wenzelm@50063
   449
  \medskip The @{text cong} modifiers add or delete Simplifier
wenzelm@50063
   450
  congruence rules (see also \secref{sec:simp-rules}); the default is
wenzelm@50063
   451
  to add.
wenzelm@50063
   452
wenzelm@28760
   453
  \item @{method simp_all} is similar to @{method simp}, but acts on
wenzelm@50063
   454
  all goals, working backwards from the last to the first one as usual
wenzelm@50063
   455
  in Isabelle.\footnote{The order is irrelevant for goals without
wenzelm@50063
   456
  schematic variables, so simplification might actually be performed
wenzelm@50063
   457
  in parallel here.}
wenzelm@50063
   458
wenzelm@50063
   459
  Chained facts are inserted into all subgoals, before the
wenzelm@50063
   460
  simplification process starts.  Further rule declarations are the
wenzelm@50063
   461
  same as for @{method simp}.
wenzelm@50063
   462
wenzelm@50063
   463
  The proof method fails if all subgoals remain unchanged after
wenzelm@50063
   464
  simplification.
wenzelm@26782
   465
wenzelm@28760
   466
  \end{description}
wenzelm@26782
   467
wenzelm@50063
   468
  By default the Simplifier methods above take local assumptions fully
wenzelm@50063
   469
  into account, using equational assumptions in the subsequent
wenzelm@50063
   470
  normalization process, or simplifying assumptions themselves.
wenzelm@50063
   471
  Further options allow to fine-tune the behavior of the Simplifier
wenzelm@50063
   472
  in this respect, corresponding to a variety of ML tactics as
wenzelm@50063
   473
  follows.\footnote{Unlike the corresponding Isar proof methods, the
wenzelm@50063
   474
  ML tactics do not insist in changing the goal state.}
wenzelm@50063
   475
wenzelm@50063
   476
  \begin{center}
wenzelm@50063
   477
  \small
wenzelm@50065
   478
  \begin{supertabular}{|l|l|p{0.3\textwidth}|}
wenzelm@50063
   479
  \hline
wenzelm@50063
   480
  Isar method & ML tactic & behavior \\\hline
wenzelm@50063
   481
wenzelm@50063
   482
  @{text "(simp (no_asm))"} & @{ML simp_tac} & assumptions are ignored
wenzelm@50063
   483
  completely \\\hline
wenzelm@26782
   484
wenzelm@50063
   485
  @{text "(simp (no_asm_simp))"} & @{ML asm_simp_tac} & assumptions
wenzelm@50063
   486
  are used in the simplification of the conclusion but are not
wenzelm@50063
   487
  themselves simplified \\\hline
wenzelm@50063
   488
wenzelm@50063
   489
  @{text "(simp (no_asm_use))"} & @{ML full_simp_tac} & assumptions
wenzelm@50063
   490
  are simplified but are not used in the simplification of each other
wenzelm@50063
   491
  or the conclusion \\\hline
wenzelm@26782
   492
wenzelm@50063
   493
  @{text "(simp)"} & @{ML asm_full_simp_tac} & assumptions are used in
wenzelm@50063
   494
  the simplification of the conclusion and to simplify other
wenzelm@50063
   495
  assumptions \\\hline
wenzelm@50063
   496
wenzelm@50063
   497
  @{text "(simp (asm_lr))"} & @{ML asm_lr_simp_tac} & compatibility
wenzelm@50063
   498
  mode: an assumption is only used for simplifying assumptions which
wenzelm@50063
   499
  are to the right of it \\\hline
wenzelm@50063
   500
wenzelm@50065
   501
  \end{supertabular}
wenzelm@50063
   502
  \end{center}
wenzelm@26782
   503
*}
wenzelm@26782
   504
wenzelm@26782
   505
wenzelm@50064
   506
subsubsection {* Examples *}
wenzelm@50064
   507
wenzelm@50064
   508
text {* We consider basic algebraic simplifications in Isabelle/HOL.
wenzelm@50064
   509
  The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
wenzelm@50064
   510
  a good candidate to be solved by a single call of @{method simp}:
wenzelm@50064
   511
*}
wenzelm@50064
   512
wenzelm@50064
   513
lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
wenzelm@50064
   514
wenzelm@50064
   515
text {* The above attempt \emph{fails}, because @{term "0"} and @{term
wenzelm@50064
   516
  "op +"} in the HOL library are declared as generic type class
wenzelm@50064
   517
  operations, without stating any algebraic laws yet.  More specific
wenzelm@50064
   518
  types are required to get access to certain standard simplifications
wenzelm@50064
   519
  of the theory context, e.g.\ like this: *}
wenzelm@50064
   520
wenzelm@50064
   521
lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
wenzelm@50064
   522
lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
wenzelm@50064
   523
lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
wenzelm@50064
   524
wenzelm@50064
   525
text {*
wenzelm@50064
   526
  \medskip In many cases, assumptions of a subgoal are also needed in
wenzelm@50064
   527
  the simplification process.  For example:
wenzelm@50064
   528
*}
wenzelm@50064
   529
wenzelm@50064
   530
lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
wenzelm@50064
   531
lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
wenzelm@50064
   532
lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
wenzelm@50064
   533
wenzelm@50064
   534
text {* As seen above, local assumptions that shall contribute to
wenzelm@50064
   535
  simplification need to be part of the subgoal already, or indicated
wenzelm@50064
   536
  explicitly for use by the subsequent method invocation.  Both too
wenzelm@50064
   537
  little or too much information can make simplification fail, for
wenzelm@50064
   538
  different reasons.
wenzelm@50064
   539
wenzelm@50064
   540
  In the next example the malicious assumption @{prop "\<And>x::nat. f x =
wenzelm@50064
   541
  g (f (g x))"} does not contribute to solve the problem, but makes
wenzelm@50064
   542
  the default @{method simp} method loop: the rewrite rule @{text "f
wenzelm@50064
   543
  ?x \<equiv> g (f (g ?x))"} extracted from the assumption does not
wenzelm@50064
   544
  terminate.  The Simplifier notices certain simple forms of
wenzelm@50064
   545
  nontermination, but not this one.  The problem can be solved
wenzelm@50064
   546
  nonetheless, by ignoring assumptions via special options as
wenzelm@50064
   547
  explained before:
wenzelm@50064
   548
*}
wenzelm@50064
   549
wenzelm@50064
   550
lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
wenzelm@50064
   551
  by (simp (no_asm))
wenzelm@50064
   552
wenzelm@50064
   553
text {* The latter form is typical for long unstructured proof
wenzelm@50064
   554
  scripts, where the control over the goal content is limited.  In
wenzelm@50064
   555
  structured proofs it is usually better to avoid pushing too many
wenzelm@50064
   556
  facts into the goal state in the first place.  Assumptions in the
wenzelm@50064
   557
  Isar proof context do not intrude the reasoning if not used
wenzelm@50064
   558
  explicitly.  This is illustrated for a toplevel statement and a
wenzelm@50064
   559
  local proof body as follows:
wenzelm@50064
   560
*}
wenzelm@50064
   561
wenzelm@50064
   562
lemma
wenzelm@50064
   563
  assumes "\<And>x::nat. f x = g (f (g x))"
wenzelm@50064
   564
  shows "f 0 = f 0 + 0" by simp
wenzelm@50064
   565
wenzelm@50064
   566
notepad
wenzelm@50064
   567
begin
wenzelm@50064
   568
  assume "\<And>x::nat. f x = g (f (g x))"
wenzelm@50064
   569
  have "f 0 = f 0 + 0" by simp
wenzelm@50064
   570
end
wenzelm@50064
   571
wenzelm@50064
   572
text {* \medskip Because assumptions may simplify each other, there
wenzelm@50064
   573
  can be very subtle cases of nontermination. For example, the regular
wenzelm@50064
   574
  @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
wenzelm@50064
   575
  \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
wenzelm@50064
   576
  \[
wenzelm@50064
   577
  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto}
wenzelm@50064
   578
  @{text "P (f y)"} \stackrel{@{text "y \<equiv> x"}}{\longmapsto}
wenzelm@50064
   579
  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto} \cdots
wenzelm@50064
   580
  \]
wenzelm@50064
   581
  whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
wenzelm@50064
   582
  Q"} terminates (without solving the goal):
wenzelm@50064
   583
*}
wenzelm@50064
   584
wenzelm@50064
   585
lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
wenzelm@50064
   586
  apply simp
wenzelm@50064
   587
  oops
wenzelm@50064
   588
wenzelm@50064
   589
text {* See also \secref{sec:simp-config} for options to enable
wenzelm@50064
   590
  Simplifier trace mode, which often helps to diagnose problems with
wenzelm@50064
   591
  rewrite systems.
wenzelm@50064
   592
*}
wenzelm@50064
   593
wenzelm@50064
   594
wenzelm@50063
   595
subsection {* Declaring rules \label{sec:simp-rules} *}
wenzelm@26782
   596
wenzelm@26782
   597
text {*
wenzelm@26782
   598
  \begin{matharray}{rcl}
wenzelm@28761
   599
    @{attribute_def simp} & : & @{text attribute} \\
wenzelm@28761
   600
    @{attribute_def split} & : & @{text attribute} \\
wenzelm@50063
   601
    @{attribute_def cong} & : & @{text attribute} \\
wenzelm@50077
   602
    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
wenzelm@26782
   603
  \end{matharray}
wenzelm@26782
   604
wenzelm@55112
   605
  @{rail \<open>
wenzelm@50063
   606
    (@@{attribute simp} | @@{attribute split} | @@{attribute cong})
wenzelm@50063
   607
      (() | 'add' | 'del')
wenzelm@55112
   608
  \<close>}
wenzelm@26782
   609
wenzelm@28760
   610
  \begin{description}
wenzelm@26782
   611
wenzelm@50076
   612
  \item @{attribute simp} declares rewrite rules, by adding or
wenzelm@50065
   613
  deleting them from the simpset within the theory or proof context.
wenzelm@50076
   614
  Rewrite rules are theorems expressing some form of equality, for
wenzelm@50076
   615
  example:
wenzelm@50076
   616
wenzelm@50076
   617
  @{text "Suc ?m + ?n = ?m + Suc ?n"} \\
wenzelm@50076
   618
  @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
wenzelm@50076
   619
  @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
wenzelm@50076
   620
wenzelm@50076
   621
  \smallskip
wenzelm@50076
   622
  Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
wenzelm@50076
   623
  also permitted; the conditions can be arbitrary formulas.
wenzelm@50076
   624
wenzelm@50076
   625
  \medskip Internally, all rewrite rules are translated into Pure
wenzelm@50076
   626
  equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
wenzelm@50076
   627
  simpset contains a function for extracting equalities from arbitrary
wenzelm@50076
   628
  theorems, which is usually installed when the object-logic is
wenzelm@50076
   629
  configured initially. For example, @{text "\<not> ?x \<in> {}"} could be
wenzelm@50076
   630
  turned into @{text "?x \<in> {} \<equiv> False"}. Theorems that are declared as
wenzelm@50076
   631
  @{attribute simp} and local assumptions within a goal are treated
wenzelm@50076
   632
  uniformly in this respect.
wenzelm@50076
   633
wenzelm@50076
   634
  The Simplifier accepts the following formats for the @{text "lhs"}
wenzelm@50076
   635
  term:
wenzelm@50076
   636
wenzelm@50076
   637
  \begin{enumerate}
wenzelm@50065
   638
wenzelm@50076
   639
  \item First-order patterns, considering the sublanguage of
wenzelm@50076
   640
  application of constant operators to variable operands, without
wenzelm@50076
   641
  @{text "\<lambda>"}-abstractions or functional variables.
wenzelm@50076
   642
  For example:
wenzelm@50076
   643
wenzelm@50076
   644
  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
wenzelm@50076
   645
  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
wenzelm@50076
   646
wenzelm@50076
   647
  \item Higher-order patterns in the sense of \cite{nipkow-patterns}.
wenzelm@50076
   648
  These are terms in @{text "\<beta>"}-normal form (this will always be the
wenzelm@50076
   649
  case unless you have done something strange) where each occurrence
wenzelm@50076
   650
  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
wenzelm@50076
   651
  @{text "x\<^sub>i"} are distinct bound variables.
wenzelm@50076
   652
wenzelm@50076
   653
  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
wenzelm@50076
   654
  or its symmetric form, since the @{text "rhs"} is also a
wenzelm@50076
   655
  higher-order pattern.
wenzelm@50076
   656
wenzelm@50076
   657
  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
wenzelm@50076
   658
  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
wenzelm@50076
   659
  variables are treated like quasi-constant term material.
wenzelm@50076
   660
wenzelm@50076
   661
  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
wenzelm@50076
   662
  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
wenzelm@50076
   663
  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
wenzelm@50076
   664
  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
wenzelm@50076
   665
  be replaced by adding new variables and conditions like this: @{text
wenzelm@50076
   666
  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
wenzelm@50076
   667
  rewrite rule of the second category since conditions can be
wenzelm@50076
   668
  arbitrary terms.
wenzelm@50076
   669
wenzelm@50076
   670
  \end{enumerate}
wenzelm@26782
   671
wenzelm@28760
   672
  \item @{attribute split} declares case split rules.
wenzelm@26782
   673
wenzelm@45645
   674
  \item @{attribute cong} declares congruence rules to the Simplifier
wenzelm@45645
   675
  context.
wenzelm@45645
   676
wenzelm@45645
   677
  Congruence rules are equalities of the form @{text [display]
wenzelm@45645
   678
  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
wenzelm@45645
   679
wenzelm@45645
   680
  This controls the simplification of the arguments of @{text f}.  For
wenzelm@45645
   681
  example, some arguments can be simplified under additional
wenzelm@45645
   682
  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
wenzelm@45645
   683
  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
wenzelm@45645
   684
wenzelm@45645
   685
  Given this rule, the simplifier assumes @{text "?Q\<^sub>1"} and extracts
wenzelm@45645
   686
  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
wenzelm@45645
   687
  assumptions are effective for rewriting formulae such as @{text "x =
wenzelm@45645
   688
  0 \<longrightarrow> y + x = y"}.
wenzelm@45645
   689
wenzelm@45645
   690
  %FIXME
wenzelm@45645
   691
  %The local assumptions are also provided as theorems to the solver;
wenzelm@45645
   692
  %see \secref{sec:simp-solver} below.
wenzelm@45645
   693
wenzelm@45645
   694
  \medskip The following congruence rule for bounded quantifiers also
wenzelm@45645
   695
  supplies contextual information --- about the bound variable:
wenzelm@45645
   696
  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
wenzelm@45645
   697
    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
wenzelm@45645
   698
wenzelm@45645
   699
  \medskip This congruence rule for conditional expressions can
wenzelm@45645
   700
  supply contextual information for simplifying the arms:
wenzelm@45645
   701
  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
wenzelm@45645
   702
    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
wenzelm@45645
   703
wenzelm@45645
   704
  A congruence rule can also \emph{prevent} simplification of some
wenzelm@45645
   705
  arguments.  Here is an alternative congruence rule for conditional
wenzelm@45645
   706
  expressions that conforms to non-strict functional evaluation:
wenzelm@45645
   707
  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
wenzelm@45645
   708
wenzelm@45645
   709
  Only the first argument is simplified; the others remain unchanged.
wenzelm@45645
   710
  This can make simplification much faster, but may require an extra
wenzelm@45645
   711
  case split over the condition @{text "?q"} to prove the goal.
wenzelm@50063
   712
wenzelm@50077
   713
  \item @{command "print_simpset"} prints the collection of rules
wenzelm@50077
   714
  declared to the Simplifier, which is also known as ``simpset''
wenzelm@50077
   715
  internally.
wenzelm@50077
   716
wenzelm@50077
   717
  For historical reasons, simpsets may occur independently from the
wenzelm@50077
   718
  current context, but are conceptually dependent on it.  When the
wenzelm@50077
   719
  Simplifier is invoked via one of its main entry points in the Isar
wenzelm@50077
   720
  source language (as proof method \secref{sec:simp-meth} or rule
wenzelm@50077
   721
  attribute \secref{sec:simp-meth}), its simpset is derived from the
wenzelm@50077
   722
  current proof context, and carries a back-reference to that for
wenzelm@50077
   723
  other tools that might get invoked internally (e.g.\ simplification
wenzelm@50077
   724
  procedures \secref{sec:simproc}).  A mismatch of the context of the
wenzelm@50077
   725
  simpset and the context of the problem being simplified may lead to
wenzelm@50077
   726
  unexpected results.
wenzelm@50077
   727
wenzelm@50063
   728
  \end{description}
wenzelm@50065
   729
wenzelm@50065
   730
  The implicit simpset of the theory context is propagated
wenzelm@50065
   731
  monotonically through the theory hierarchy: forming a new theory,
wenzelm@50065
   732
  the union of the simpsets of its imports are taken as starting
wenzelm@50065
   733
  point.  Also note that definitional packages like @{command
wenzelm@50065
   734
  "datatype"}, @{command "primrec"}, @{command "fun"} routinely
wenzelm@50065
   735
  declare Simplifier rules to the target context, while plain
wenzelm@50065
   736
  @{command "definition"} is an exception in \emph{not} declaring
wenzelm@50065
   737
  anything.
wenzelm@50065
   738
wenzelm@50065
   739
  \medskip It is up the user to manipulate the current simpset further
wenzelm@50065
   740
  by explicitly adding or deleting theorems as simplification rules,
wenzelm@50065
   741
  or installing other tools via simplification procedures
wenzelm@50065
   742
  (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
wenzelm@50065
   743
  that obviously simplify, like @{text "?n + 0 \<equiv> ?n"} are good
wenzelm@50065
   744
  candidates for the implicit simpset, unless a special
wenzelm@50065
   745
  non-normalizing behavior of certain operations is intended.  More
wenzelm@50065
   746
  specific rules (such as distributive laws, which duplicate subterms)
wenzelm@50065
   747
  should be added only for specific proof steps.  Conversely,
wenzelm@50065
   748
  sometimes a rule needs to be deleted just for some part of a proof.
wenzelm@50065
   749
  The need of frequent additions or deletions may indicate a poorly
wenzelm@50065
   750
  designed simpset.
wenzelm@50065
   751
wenzelm@50065
   752
  \begin{warn}
wenzelm@50065
   753
  The union of simpsets from theory imports (as described above) is
wenzelm@50065
   754
  not always a good starting point for the new theory.  If some
wenzelm@50065
   755
  ancestors have deleted simplification rules because they are no
wenzelm@50065
   756
  longer wanted, while others have left those rules in, then the union
wenzelm@50065
   757
  will contain the unwanted rules, and thus have to be deleted again
wenzelm@50065
   758
  in the theory body.
wenzelm@50065
   759
  \end{warn}
wenzelm@45645
   760
*}
wenzelm@45645
   761
wenzelm@45645
   762
wenzelm@50080
   763
subsection {* Ordered rewriting with permutative rules *}
wenzelm@50080
   764
wenzelm@50080
   765
text {* A rewrite rule is \emph{permutative} if the left-hand side and
wenzelm@50080
   766
  right-hand side are the equal up to renaming of variables.  The most
wenzelm@50080
   767
  common permutative rule is commutativity: @{text "?x + ?y = ?y +
wenzelm@50080
   768
  ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
wenzelm@50080
   769
  ?y"} in arithmetic and @{text "insert ?x (insert ?y ?A) = insert ?y
wenzelm@50080
   770
  (insert ?x ?A)"} for sets.  Such rules are common enough to merit
wenzelm@50080
   771
  special attention.
wenzelm@50080
   772
wenzelm@50080
   773
  Because ordinary rewriting loops given such rules, the Simplifier
wenzelm@50080
   774
  employs a special strategy, called \emph{ordered rewriting}.
wenzelm@50080
   775
  Permutative rules are detected and only applied if the rewriting
wenzelm@50080
   776
  step decreases the redex wrt.\ a given term ordering.  For example,
wenzelm@50080
   777
  commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then
wenzelm@50080
   778
  stops, because the redex cannot be decreased further in the sense of
wenzelm@50080
   779
  the term ordering.
wenzelm@50080
   780
wenzelm@50080
   781
  The default is lexicographic ordering of term structure, but this
wenzelm@50080
   782
  could be also changed locally for special applications via
wenzelm@50080
   783
  @{index_ML Simplifier.set_termless} in Isabelle/ML.
wenzelm@50080
   784
wenzelm@50080
   785
  \medskip Permutative rewrite rules are declared to the Simplifier
wenzelm@50080
   786
  just like other rewrite rules.  Their special status is recognized
wenzelm@50080
   787
  automatically, and their application is guarded by the term ordering
wenzelm@50080
   788
  accordingly. *}
wenzelm@50080
   789
wenzelm@50080
   790
wenzelm@50080
   791
subsubsection {* Rewriting with AC operators *}
wenzelm@50080
   792
wenzelm@50080
   793
text {* Ordered rewriting is particularly effective in the case of
wenzelm@50080
   794
  associative-commutative operators.  (Associativity by itself is not
wenzelm@50080
   795
  permutative.)  When dealing with an AC-operator @{text "f"}, keep
wenzelm@50080
   796
  the following points in mind:
wenzelm@50080
   797
wenzelm@50080
   798
  \begin{itemize}
wenzelm@50080
   799
wenzelm@50080
   800
  \item The associative law must always be oriented from left to
wenzelm@50080
   801
  right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
wenzelm@50080
   802
  orientation, if used with commutativity, leads to looping in
wenzelm@50080
   803
  conjunction with the standard term order.
wenzelm@50080
   804
wenzelm@50080
   805
  \item To complete your set of rewrite rules, you must add not just
wenzelm@50080
   806
  associativity (A) and commutativity (C) but also a derived rule
wenzelm@50080
   807
  \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
wenzelm@50080
   808
wenzelm@50080
   809
  \end{itemize}
wenzelm@50080
   810
wenzelm@50080
   811
  Ordered rewriting with the combination of A, C, and LC sorts a term
wenzelm@50080
   812
  lexicographically --- the rewriting engine imitates bubble-sort.
wenzelm@50080
   813
*}
wenzelm@50080
   814
wenzelm@50080
   815
locale AC_example =
wenzelm@50080
   816
  fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
wenzelm@50080
   817
  assumes assoc: "(x \<bullet> y) \<bullet> z = x \<bullet> (y \<bullet> z)"
wenzelm@50080
   818
  assumes commute: "x \<bullet> y = y \<bullet> x"
wenzelm@50080
   819
begin
wenzelm@50080
   820
wenzelm@50080
   821
lemma left_commute: "x \<bullet> (y \<bullet> z) = y \<bullet> (x \<bullet> z)"
wenzelm@50080
   822
proof -
wenzelm@50080
   823
  have "(x \<bullet> y) \<bullet> z = (y \<bullet> x) \<bullet> z" by (simp only: commute)
wenzelm@50080
   824
  then show ?thesis by (simp only: assoc)
wenzelm@50080
   825
qed
wenzelm@50080
   826
wenzelm@50080
   827
lemmas AC_rules = assoc commute left_commute
wenzelm@50080
   828
wenzelm@50080
   829
text {* Thus the Simplifier is able to establish equalities with
wenzelm@50080
   830
  arbitrary permutations of subterms, by normalizing to a common
wenzelm@50080
   831
  standard form.  For example: *}
wenzelm@50080
   832
wenzelm@50080
   833
lemma "(b \<bullet> c) \<bullet> a = xxx"
wenzelm@50080
   834
  apply (simp only: AC_rules)
wenzelm@50080
   835
  txt {* @{subgoals} *}
wenzelm@50080
   836
  oops
wenzelm@50080
   837
wenzelm@50080
   838
lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
wenzelm@50080
   839
lemma "(b \<bullet> c) \<bullet> a = c \<bullet> (b \<bullet> a)" by (simp only: AC_rules)
wenzelm@50080
   840
lemma "(b \<bullet> c) \<bullet> a = (c \<bullet> b) \<bullet> a" by (simp only: AC_rules)
wenzelm@50080
   841
wenzelm@50080
   842
end
wenzelm@50080
   843
wenzelm@50080
   844
text {* Martin and Nipkow \cite{martin-nipkow} discuss the theory and
wenzelm@50080
   845
  give many examples; other algebraic structures are amenable to
wenzelm@50080
   846
  ordered rewriting, such as boolean rings.  The Boyer-Moore theorem
wenzelm@50080
   847
  prover \cite{bm88book} also employs ordered rewriting.
wenzelm@50080
   848
*}
wenzelm@50080
   849
wenzelm@50080
   850
wenzelm@50080
   851
subsubsection {* Re-orienting equalities *}
wenzelm@50080
   852
wenzelm@50080
   853
text {* Another application of ordered rewriting uses the derived rule
wenzelm@50080
   854
  @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
wenzelm@50080
   855
  reverse equations.
wenzelm@50080
   856
wenzelm@50080
   857
  This is occasionally useful to re-orient local assumptions according
wenzelm@50080
   858
  to the term ordering, when other built-in mechanisms of
wenzelm@50080
   859
  reorientation and mutual simplification fail to apply.  *}
wenzelm@50080
   860
wenzelm@50080
   861
wenzelm@50063
   862
subsection {* Configuration options \label{sec:simp-config} *}
wenzelm@50063
   863
wenzelm@50063
   864
text {*
wenzelm@50063
   865
  \begin{tabular}{rcll}
wenzelm@50063
   866
    @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
wenzelm@50063
   867
    @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
wenzelm@50063
   868
    @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
wenzelm@50063
   869
    @{attribute_def simp_debug} & : & @{text attribute} & default @{text false} \\
wenzelm@50063
   870
  \end{tabular}
wenzelm@50063
   871
  \medskip
wenzelm@50063
   872
wenzelm@50063
   873
  These configurations options control further aspects of the Simplifier.
wenzelm@50063
   874
  See also \secref{sec:config}.
wenzelm@50063
   875
wenzelm@50063
   876
  \begin{description}
wenzelm@50063
   877
wenzelm@50063
   878
  \item @{attribute simp_depth_limit} limits the number of recursive
wenzelm@50063
   879
  invocations of the Simplifier during conditional rewriting.
wenzelm@50063
   880
wenzelm@50063
   881
  \item @{attribute simp_trace} makes the Simplifier output internal
wenzelm@50063
   882
  operations.  This includes rewrite steps, but also bookkeeping like
wenzelm@50063
   883
  modifications of the simpset.
wenzelm@50063
   884
wenzelm@50063
   885
  \item @{attribute simp_trace_depth_limit} limits the effect of
wenzelm@50063
   886
  @{attribute simp_trace} to the given depth of recursive Simplifier
wenzelm@50063
   887
  invocations (when solving conditions of rewrite rules).
wenzelm@50063
   888
wenzelm@50063
   889
  \item @{attribute simp_debug} makes the Simplifier output some extra
wenzelm@50063
   890
  information about internal operations.  This includes any attempted
wenzelm@50063
   891
  invocation of simplification procedures.
wenzelm@50063
   892
wenzelm@50063
   893
  \end{description}
wenzelm@50063
   894
*}
wenzelm@50063
   895
wenzelm@50063
   896
wenzelm@50063
   897
subsection {* Simplification procedures \label{sec:simproc} *}
wenzelm@26782
   898
wenzelm@42925
   899
text {* Simplification procedures are ML functions that produce proven
wenzelm@42925
   900
  rewrite rules on demand.  They are associated with higher-order
wenzelm@42925
   901
  patterns that approximate the left-hand sides of equations.  The
wenzelm@42925
   902
  Simplifier first matches the current redex against one of the LHS
wenzelm@42925
   903
  patterns; if this succeeds, the corresponding ML function is
wenzelm@42925
   904
  invoked, passing the Simplifier context and redex term.  Thus rules
wenzelm@42925
   905
  may be specifically fashioned for particular situations, resulting
wenzelm@42925
   906
  in a more powerful mechanism than term rewriting by a fixed set of
wenzelm@42925
   907
  rules.
wenzelm@42925
   908
wenzelm@42925
   909
  Any successful result needs to be a (possibly conditional) rewrite
wenzelm@42925
   910
  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
wenzelm@42925
   911
  rule will be applied just as any ordinary rewrite rule.  It is
wenzelm@42925
   912
  expected to be already in \emph{internal form}, bypassing the
wenzelm@42925
   913
  automatic preprocessing of object-level equivalences.
wenzelm@42925
   914
wenzelm@26782
   915
  \begin{matharray}{rcl}
wenzelm@28761
   916
    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
wenzelm@28761
   917
    simproc & : & @{text attribute} \\
wenzelm@26782
   918
  \end{matharray}
wenzelm@26782
   919
wenzelm@55112
   920
  @{rail \<open>
wenzelm@42596
   921
    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
wenzelm@55029
   922
      @{syntax text} \<newline> (@'identifier' (@{syntax nameref}+))?
wenzelm@26782
   923
    ;
wenzelm@26782
   924
wenzelm@42596
   925
    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
wenzelm@55112
   926
  \<close>}
wenzelm@26782
   927
wenzelm@28760
   928
  \begin{description}
wenzelm@26782
   929
wenzelm@28760
   930
  \item @{command "simproc_setup"} defines a named simplification
wenzelm@26782
   931
  procedure that is invoked by the Simplifier whenever any of the
wenzelm@26782
   932
  given term patterns match the current redex.  The implementation,
wenzelm@26782
   933
  which is provided as ML source text, needs to be of type @{ML_type
wenzelm@26782
   934
  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
wenzelm@26782
   935
  cterm} represents the current redex @{text r} and the result is
wenzelm@26782
   936
  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
wenzelm@26782
   937
  generalized version), or @{ML NONE} to indicate failure.  The
wenzelm@26782
   938
  @{ML_type simpset} argument holds the full context of the current
wenzelm@26782
   939
  Simplifier invocation, including the actual Isar proof context.  The
wenzelm@26782
   940
  @{ML_type morphism} informs about the difference of the original
wenzelm@26782
   941
  compilation context wrt.\ the one of the actual application later
wenzelm@26782
   942
  on.  The optional @{keyword "identifier"} specifies theorems that
wenzelm@26782
   943
  represent the logical content of the abstract theory of this
wenzelm@26782
   944
  simproc.
wenzelm@26782
   945
wenzelm@26782
   946
  Morphisms and identifiers are only relevant for simprocs that are
wenzelm@26782
   947
  defined within a local target context, e.g.\ in a locale.
wenzelm@26782
   948
wenzelm@28760
   949
  \item @{text "simproc add: name"} and @{text "simproc del: name"}
wenzelm@26782
   950
  add or delete named simprocs to the current Simplifier context.  The
wenzelm@26782
   951
  default is to add a simproc.  Note that @{command "simproc_setup"}
wenzelm@26782
   952
  already adds the new simproc to the subsequent context.
wenzelm@26782
   953
wenzelm@28760
   954
  \end{description}
wenzelm@26782
   955
*}
wenzelm@26782
   956
wenzelm@26782
   957
wenzelm@42925
   958
subsubsection {* Example *}
wenzelm@42925
   959
wenzelm@42925
   960
text {* The following simplification procedure for @{thm
wenzelm@42925
   961
  [source=false, show_types] unit_eq} in HOL performs fine-grained
wenzelm@42925
   962
  control over rule application, beyond higher-order pattern matching.
wenzelm@42925
   963
  Declaring @{thm unit_eq} as @{attribute simp} directly would make
wenzelm@42925
   964
  the simplifier loop!  Note that a version of this simplification
wenzelm@42925
   965
  procedure is already active in Isabelle/HOL.  *}
wenzelm@42925
   966
wenzelm@42925
   967
simproc_setup unit ("x::unit") = {*
wenzelm@42925
   968
  fn _ => fn _ => fn ct =>
wenzelm@42925
   969
    if HOLogic.is_unit (term_of ct) then NONE
wenzelm@42925
   970
    else SOME (mk_meta_eq @{thm unit_eq})
wenzelm@42925
   971
*}
wenzelm@42925
   972
wenzelm@42925
   973
text {* Since the Simplifier applies simplification procedures
wenzelm@42925
   974
  frequently, it is important to make the failure check in ML
wenzelm@42925
   975
  reasonably fast. *}
wenzelm@42925
   976
wenzelm@42925
   977
wenzelm@50079
   978
subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
wenzelm@50079
   979
wenzelm@50079
   980
text {* The core term-rewriting engine of the Simplifier is normally
wenzelm@50079
   981
  used in combination with some add-on components that modify the
wenzelm@50079
   982
  strategy and allow to integrate other non-Simplifier proof tools.
wenzelm@50079
   983
  These may be reconfigured in ML as explained below.  Even if the
wenzelm@50079
   984
  default strategies of object-logics like Isabelle/HOL are used
wenzelm@50079
   985
  unchanged, it helps to understand how the standard Simplifier
wenzelm@50079
   986
  strategies work. *}
wenzelm@50079
   987
wenzelm@50079
   988
wenzelm@50079
   989
subsubsection {* The subgoaler *}
wenzelm@50079
   990
wenzelm@50079
   991
text {*
wenzelm@50079
   992
  \begin{mldecls}
wenzelm@51717
   993
  @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
wenzelm@51717
   994
  Proof.context -> Proof.context"} \\
wenzelm@51717
   995
  @{index_ML Simplifier.prems_of: "Proof.context -> thm list"} \\
wenzelm@50079
   996
  \end{mldecls}
wenzelm@50079
   997
wenzelm@50079
   998
  The subgoaler is the tactic used to solve subgoals arising out of
wenzelm@50079
   999
  conditional rewrite rules or congruence rules.  The default should
wenzelm@50079
  1000
  be simplification itself.  In rare situations, this strategy may
wenzelm@50079
  1001
  need to be changed.  For example, if the premise of a conditional
wenzelm@50079
  1002
  rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
wenzelm@50079
  1003
  ?m < ?n"}, the default strategy could loop.  % FIXME !??
wenzelm@50079
  1004
wenzelm@50079
  1005
  \begin{description}
wenzelm@50079
  1006
wenzelm@51717
  1007
  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
wenzelm@51717
  1008
  subgoaler of the context to @{text "tac"}.  The tactic will
wenzelm@51717
  1009
  be applied to the context of the running Simplifier instance.
wenzelm@50079
  1010
wenzelm@51717
  1011
  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
wenzelm@51717
  1012
  set of premises from the context.  This may be non-empty only if
wenzelm@50079
  1013
  the Simplifier has been told to utilize local assumptions in the
wenzelm@50079
  1014
  first place (cf.\ the options in \secref{sec:simp-meth}).
wenzelm@50079
  1015
wenzelm@50079
  1016
  \end{description}
wenzelm@50079
  1017
wenzelm@50079
  1018
  As an example, consider the following alternative subgoaler:
wenzelm@50079
  1019
*}
wenzelm@50079
  1020
wenzelm@50079
  1021
ML {*
wenzelm@51717
  1022
  fun subgoaler_tac ctxt =
wenzelm@50079
  1023
    assume_tac ORELSE'
wenzelm@51717
  1024
    resolve_tac (Simplifier.prems_of ctxt) ORELSE'
wenzelm@51717
  1025
    asm_simp_tac ctxt
wenzelm@50079
  1026
*}
wenzelm@50079
  1027
wenzelm@50079
  1028
text {* This tactic first tries to solve the subgoal by assumption or
wenzelm@50079
  1029
  by resolving with with one of the premises, calling simplification
wenzelm@50079
  1030
  only if that fails. *}
wenzelm@50079
  1031
wenzelm@50079
  1032
wenzelm@50079
  1033
subsubsection {* The solver *}
wenzelm@50079
  1034
wenzelm@50079
  1035
text {*
wenzelm@50079
  1036
  \begin{mldecls}
wenzelm@50079
  1037
  @{index_ML_type solver} \\
wenzelm@51717
  1038
  @{index_ML Simplifier.mk_solver: "string ->
wenzelm@51717
  1039
  (Proof.context -> int -> tactic) -> solver"} \\
wenzelm@51717
  1040
  @{index_ML_op setSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@51717
  1041
  @{index_ML_op addSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@51717
  1042
  @{index_ML_op setSSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@51717
  1043
  @{index_ML_op addSSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@50079
  1044
  \end{mldecls}
wenzelm@50079
  1045
wenzelm@50079
  1046
  A solver is a tactic that attempts to solve a subgoal after
wenzelm@50079
  1047
  simplification.  Its core functionality is to prove trivial subgoals
wenzelm@50079
  1048
  such as @{prop "True"} and @{text "t = t"}, but object-logics might
wenzelm@50079
  1049
  be more ambitious.  For example, Isabelle/HOL performs a restricted
wenzelm@50079
  1050
  version of linear arithmetic here.
wenzelm@50079
  1051
wenzelm@50079
  1052
  Solvers are packaged up in abstract type @{ML_type solver}, with
wenzelm@50079
  1053
  @{ML Simplifier.mk_solver} as the only operation to create a solver.
wenzelm@50079
  1054
wenzelm@50079
  1055
  \medskip Rewriting does not instantiate unknowns.  For example,
wenzelm@50079
  1056
  rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
wenzelm@50079
  1057
  instantiating @{text "?A"}.  The solver, however, is an arbitrary
wenzelm@50079
  1058
  tactic and may instantiate unknowns as it pleases.  This is the only
wenzelm@50079
  1059
  way the Simplifier can handle a conditional rewrite rule whose
wenzelm@50079
  1060
  condition contains extra variables.  When a simplification tactic is
wenzelm@50079
  1061
  to be combined with other provers, especially with the Classical
wenzelm@50079
  1062
  Reasoner, it is important whether it can be considered safe or not.
wenzelm@50079
  1063
  For this reason a simpset contains two solvers: safe and unsafe.
wenzelm@50079
  1064
wenzelm@50079
  1065
  The standard simplification strategy solely uses the unsafe solver,
wenzelm@50079
  1066
  which is appropriate in most cases.  For special applications where
wenzelm@50079
  1067
  the simplification process is not allowed to instantiate unknowns
wenzelm@50079
  1068
  within the goal, simplification starts with the safe solver, but may
wenzelm@50079
  1069
  still apply the ordinary unsafe one in nested simplifications for
wenzelm@50079
  1070
  conditional rules or congruences. Note that in this way the overall
wenzelm@50079
  1071
  tactic is not totally safe: it may instantiate unknowns that appear
wenzelm@50079
  1072
  also in other subgoals.
wenzelm@50079
  1073
wenzelm@50079
  1074
  \begin{description}
wenzelm@50079
  1075
wenzelm@50079
  1076
  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
wenzelm@50079
  1077
  "tac"} into a solver; the @{text "name"} is only attached as a
wenzelm@50079
  1078
  comment and has no further significance.
wenzelm@50079
  1079
wenzelm@51717
  1080
  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
wenzelm@51717
  1081
  the safe solver of @{text "ctxt"}.
wenzelm@50079
  1082
wenzelm@51717
  1083
  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
wenzelm@50079
  1084
  additional safe solver; it will be tried after the solvers which had
wenzelm@51717
  1085
  already been present in @{text "ctxt"}.
wenzelm@50079
  1086
wenzelm@51717
  1087
  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
wenzelm@51717
  1088
  unsafe solver of @{text "ctxt"}.
wenzelm@50079
  1089
wenzelm@51717
  1090
  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
wenzelm@50079
  1091
  additional unsafe solver; it will be tried after the solvers which
wenzelm@51717
  1092
  had already been present in @{text "ctxt"}.
wenzelm@50079
  1093
wenzelm@50079
  1094
  \end{description}
wenzelm@50079
  1095
wenzelm@51717
  1096
  \medskip The solver tactic is invoked with the context of the
wenzelm@51717
  1097
  running Simplifier.  Further operations
wenzelm@50079
  1098
  may be used to retrieve relevant information, such as the list of
wenzelm@50079
  1099
  local Simplifier premises via @{ML Simplifier.prems_of} --- this
wenzelm@50079
  1100
  list may be non-empty only if the Simplifier runs in a mode that
wenzelm@50079
  1101
  utilizes local assumptions (see also \secref{sec:simp-meth}).  The
wenzelm@50079
  1102
  solver is also presented the full goal including its assumptions in
wenzelm@50079
  1103
  any case.  Thus it can use these (e.g.\ by calling @{ML
wenzelm@50079
  1104
  assume_tac}), even if the Simplifier proper happens to ignore local
wenzelm@50079
  1105
  premises at the moment.
wenzelm@50079
  1106
wenzelm@50079
  1107
  \medskip As explained before, the subgoaler is also used to solve
wenzelm@50079
  1108
  the premises of congruence rules.  These are usually of the form
wenzelm@50079
  1109
  @{text "s = ?x"}, where @{text "s"} needs to be simplified and
wenzelm@50079
  1110
  @{text "?x"} needs to be instantiated with the result.  Typically,
wenzelm@50079
  1111
  the subgoaler will invoke the Simplifier at some point, which will
wenzelm@50079
  1112
  eventually call the solver.  For this reason, solver tactics must be
wenzelm@50079
  1113
  prepared to solve goals of the form @{text "t = ?x"}, usually by
wenzelm@50079
  1114
  reflexivity.  In particular, reflexivity should be tried before any
wenzelm@50079
  1115
  of the fancy automated proof tools.
wenzelm@50079
  1116
wenzelm@50079
  1117
  It may even happen that due to simplification the subgoal is no
wenzelm@50079
  1118
  longer an equality.  For example, @{text "False \<longleftrightarrow> ?Q"} could be
wenzelm@50079
  1119
  rewritten to @{text "\<not> ?Q"}.  To cover this case, the solver could
wenzelm@50079
  1120
  try resolving with the theorem @{text "\<not> False"} of the
wenzelm@50079
  1121
  object-logic.
wenzelm@50079
  1122
wenzelm@50079
  1123
  \medskip
wenzelm@50079
  1124
wenzelm@50079
  1125
  \begin{warn}
wenzelm@50079
  1126
  If a premise of a congruence rule cannot be proved, then the
wenzelm@50079
  1127
  congruence is ignored.  This should only happen if the rule is
wenzelm@50079
  1128
  \emph{conditional} --- that is, contains premises not of the form
wenzelm@50079
  1129
  @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
wenzelm@50079
  1130
  or possibly the subgoaler or solver, is faulty.
wenzelm@50079
  1131
  \end{warn}
wenzelm@50079
  1132
*}
wenzelm@50079
  1133
wenzelm@50079
  1134
wenzelm@50079
  1135
subsubsection {* The looper *}
wenzelm@50079
  1136
wenzelm@50079
  1137
text {*
wenzelm@50079
  1138
  \begin{mldecls}
wenzelm@51717
  1139
  @{index_ML_op setloop: "Proof.context *
wenzelm@51717
  1140
  (Proof.context -> int -> tactic) -> Proof.context"} \\
wenzelm@51717
  1141
  @{index_ML_op addloop: "Proof.context *
wenzelm@51717
  1142
  (string * (Proof.context -> int -> tactic))
wenzelm@51717
  1143
  -> Proof.context"} \\
wenzelm@51717
  1144
  @{index_ML_op delloop: "Proof.context * string -> Proof.context"} \\
wenzelm@51717
  1145
  @{index_ML Splitter.add_split: "thm -> Proof.context -> Proof.context"} \\
wenzelm@51717
  1146
  @{index_ML Splitter.del_split: "thm -> Proof.context -> Proof.context"} \\
wenzelm@50079
  1147
  \end{mldecls}
wenzelm@50079
  1148
wenzelm@50079
  1149
  The looper is a list of tactics that are applied after
wenzelm@50079
  1150
  simplification, in case the solver failed to solve the simplified
wenzelm@50079
  1151
  goal.  If the looper succeeds, the simplification process is started
wenzelm@50079
  1152
  all over again.  Each of the subgoals generated by the looper is
wenzelm@50079
  1153
  attacked in turn, in reverse order.
wenzelm@50079
  1154
wenzelm@50079
  1155
  A typical looper is \emph{case splitting}: the expansion of a
wenzelm@50079
  1156
  conditional.  Another possibility is to apply an elimination rule on
wenzelm@50079
  1157
  the assumptions.  More adventurous loopers could start an induction.
wenzelm@50079
  1158
wenzelm@50079
  1159
  \begin{description}
wenzelm@50079
  1160
wenzelm@51717
  1161
  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
wenzelm@52037
  1162
  looper tactic of @{text "ctxt"}.
wenzelm@50079
  1163
wenzelm@51717
  1164
  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
wenzelm@50079
  1165
  additional looper tactic with name @{text "name"}, which is
wenzelm@50079
  1166
  significant for managing the collection of loopers.  The tactic will
wenzelm@50079
  1167
  be tried after the looper tactics that had already been present in
wenzelm@52037
  1168
  @{text "ctxt"}.
wenzelm@50079
  1169
wenzelm@51717
  1170
  \item @{text "ctxt delloop name"} deletes the looper tactic that was
wenzelm@51717
  1171
  associated with @{text "name"} from @{text "ctxt"}.
wenzelm@50079
  1172
wenzelm@51717
  1173
  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
wenzelm@51717
  1174
  for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
wenzelm@50079
  1175
wenzelm@51717
  1176
  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
wenzelm@50079
  1177
  tactic corresponding to @{text thm} from the looper tactics of
wenzelm@51717
  1178
  @{text "ctxt"}.
wenzelm@50079
  1179
wenzelm@50079
  1180
  \end{description}
wenzelm@50079
  1181
wenzelm@50079
  1182
  The splitter replaces applications of a given function; the
wenzelm@50079
  1183
  right-hand side of the replacement can be anything.  For example,
wenzelm@50079
  1184
  here is a splitting rule for conditional expressions:
wenzelm@50079
  1185
wenzelm@50079
  1186
  @{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
wenzelm@50079
  1187
wenzelm@50079
  1188
  Another example is the elimination operator for Cartesian products
wenzelm@50079
  1189
  (which happens to be called @{text split} in Isabelle/HOL:
wenzelm@50079
  1190
wenzelm@50079
  1191
  @{text [display] "?P (split ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
wenzelm@50079
  1192
wenzelm@50079
  1193
  For technical reasons, there is a distinction between case splitting
wenzelm@50079
  1194
  in the conclusion and in the premises of a subgoal.  The former is
wenzelm@50079
  1195
  done by @{ML Splitter.split_tac} with rules like @{thm [source]
wenzelm@50079
  1196
  split_if} or @{thm [source] option.split}, which do not split the
wenzelm@50079
  1197
  subgoal, while the latter is done by @{ML Splitter.split_asm_tac}
wenzelm@50079
  1198
  with rules like @{thm [source] split_if_asm} or @{thm [source]
wenzelm@50079
  1199
  option.split_asm}, which split the subgoal.  The function @{ML
wenzelm@50079
  1200
  Splitter.add_split} automatically takes care of which tactic to
wenzelm@50079
  1201
  call, analyzing the form of the rules given as argument; it is the
wenzelm@50079
  1202
  same operation behind @{text "split"} attribute or method modifier
wenzelm@50079
  1203
  syntax in the Isar source language.
wenzelm@50079
  1204
wenzelm@50079
  1205
  Case splits should be allowed only when necessary; they are
wenzelm@50079
  1206
  expensive and hard to control.  Case-splitting on if-expressions in
wenzelm@50079
  1207
  the conclusion is usually beneficial, so it is enabled by default in
wenzelm@50079
  1208
  Isabelle/HOL and Isabelle/FOL/ZF.
wenzelm@50079
  1209
wenzelm@50079
  1210
  \begin{warn}
wenzelm@50079
  1211
  With @{ML Splitter.split_asm_tac} as looper component, the
wenzelm@50079
  1212
  Simplifier may split subgoals!  This might cause unexpected problems
wenzelm@50079
  1213
  in tactic expressions that silently assume 0 or 1 subgoals after
wenzelm@50079
  1214
  simplification.
wenzelm@50079
  1215
  \end{warn}
wenzelm@50079
  1216
*}
wenzelm@50079
  1217
wenzelm@50079
  1218
wenzelm@50063
  1219
subsection {* Forward simplification \label{sec:simp-forward} *}
wenzelm@26782
  1220
wenzelm@26782
  1221
text {*
wenzelm@26782
  1222
  \begin{matharray}{rcl}
wenzelm@28761
  1223
    @{attribute_def simplified} & : & @{text attribute} \\
wenzelm@26782
  1224
  \end{matharray}
wenzelm@26782
  1225
wenzelm@55112
  1226
  @{rail \<open>
wenzelm@42596
  1227
    @@{attribute simplified} opt? @{syntax thmrefs}?
wenzelm@26782
  1228
    ;
wenzelm@26782
  1229
wenzelm@40255
  1230
    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
wenzelm@55112
  1231
  \<close>}
wenzelm@26782
  1232
wenzelm@28760
  1233
  \begin{description}
wenzelm@26782
  1234
  
wenzelm@28760
  1235
  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
wenzelm@28760
  1236
  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
wenzelm@28760
  1237
  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
wenzelm@28760
  1238
  The result is fully simplified by default, including assumptions and
wenzelm@28760
  1239
  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
wenzelm@28760
  1240
  the same way as the for the @{text simp} method.
wenzelm@26782
  1241
wenzelm@26782
  1242
  Note that forward simplification restricts the simplifier to its
wenzelm@26782
  1243
  most basic operation of term rewriting; solver and looper tactics
wenzelm@50079
  1244
  (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
wenzelm@50079
  1245
  @{attribute simplified} attribute should be only rarely required
wenzelm@50079
  1246
  under normal circumstances.
wenzelm@26782
  1247
wenzelm@28760
  1248
  \end{description}
wenzelm@26782
  1249
*}
wenzelm@26782
  1250
wenzelm@26782
  1251
wenzelm@27040
  1252
section {* The Classical Reasoner \label{sec:classical} *}
wenzelm@26782
  1253
wenzelm@42930
  1254
subsection {* Basic concepts *}
wenzelm@42927
  1255
wenzelm@42927
  1256
text {* Although Isabelle is generic, many users will be working in
wenzelm@42927
  1257
  some extension of classical first-order logic.  Isabelle/ZF is built
wenzelm@42927
  1258
  upon theory FOL, while Isabelle/HOL conceptually contains
wenzelm@42927
  1259
  first-order logic as a fragment.  Theorem-proving in predicate logic
wenzelm@42927
  1260
  is undecidable, but many automated strategies have been developed to
wenzelm@42927
  1261
  assist in this task.
wenzelm@42927
  1262
wenzelm@42927
  1263
  Isabelle's classical reasoner is a generic package that accepts
wenzelm@42927
  1264
  certain information about a logic and delivers a suite of automatic
wenzelm@42927
  1265
  proof tools, based on rules that are classified and declared in the
wenzelm@42927
  1266
  context.  These proof procedures are slow and simplistic compared
wenzelm@42927
  1267
  with high-end automated theorem provers, but they can save
wenzelm@42927
  1268
  considerable time and effort in practice.  They can prove theorems
wenzelm@42927
  1269
  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
wenzelm@42927
  1270
  milliseconds (including full proof reconstruction): *}
wenzelm@42927
  1271
wenzelm@42927
  1272
lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
wenzelm@42927
  1273
  by blast
wenzelm@42927
  1274
wenzelm@42927
  1275
lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
wenzelm@42927
  1276
  by blast
wenzelm@42927
  1277
wenzelm@42927
  1278
text {* The proof tools are generic.  They are not restricted to
wenzelm@42927
  1279
  first-order logic, and have been heavily used in the development of
wenzelm@42927
  1280
  the Isabelle/HOL library and applications.  The tactics can be
wenzelm@42927
  1281
  traced, and their components can be called directly; in this manner,
wenzelm@42927
  1282
  any proof can be viewed interactively.  *}
wenzelm@42927
  1283
wenzelm@42927
  1284
wenzelm@42927
  1285
subsubsection {* The sequent calculus *}
wenzelm@42927
  1286
wenzelm@42927
  1287
text {* Isabelle supports natural deduction, which is easy to use for
wenzelm@42927
  1288
  interactive proof.  But natural deduction does not easily lend
wenzelm@42927
  1289
  itself to automation, and has a bias towards intuitionism.  For
wenzelm@42927
  1290
  certain proofs in classical logic, it can not be called natural.
wenzelm@42927
  1291
  The \emph{sequent calculus}, a generalization of natural deduction,
wenzelm@42927
  1292
  is easier to automate.
wenzelm@42927
  1293
wenzelm@42927
  1294
  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
wenzelm@42927
  1295
  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
wenzelm@42927
  1296
  logic, sequents can equivalently be made from lists or multisets of
wenzelm@42927
  1297
  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
wenzelm@42927
  1298
  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
wenzelm@42927
  1299
  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
wenzelm@42927
  1300
  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
wenzelm@42927
  1301
  sequent is \textbf{basic} if its left and right sides have a common
wenzelm@42927
  1302
  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
wenzelm@42927
  1303
  valid.
wenzelm@42927
  1304
wenzelm@42927
  1305
  Sequent rules are classified as \textbf{right} or \textbf{left},
wenzelm@42927
  1306
  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
wenzelm@42927
  1307
  Rules that operate on the right side are analogous to natural
wenzelm@42927
  1308
  deduction's introduction rules, and left rules are analogous to
wenzelm@42927
  1309
  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
wenzelm@42927
  1310
  is the rule
wenzelm@42927
  1311
  \[
wenzelm@42927
  1312
  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
wenzelm@42927
  1313
  \]
wenzelm@42927
  1314
  Applying the rule backwards, this breaks down some implication on
wenzelm@42927
  1315
  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
wenzelm@42927
  1316
  the sets of formulae that are unaffected by the inference.  The
wenzelm@42927
  1317
  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
wenzelm@42927
  1318
  single rule
wenzelm@42927
  1319
  \[
wenzelm@42927
  1320
  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
wenzelm@42927
  1321
  \]
wenzelm@42927
  1322
  This breaks down some disjunction on the right side, replacing it by
wenzelm@42927
  1323
  both disjuncts.  Thus, the sequent calculus is a kind of
wenzelm@42927
  1324
  multiple-conclusion logic.
wenzelm@42927
  1325
wenzelm@42927
  1326
  To illustrate the use of multiple formulae on the right, let us
wenzelm@42927
  1327
  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
wenzelm@42927
  1328
  backwards, we reduce this formula to a basic sequent:
wenzelm@42927
  1329
  \[
wenzelm@42927
  1330
  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
wenzelm@42927
  1331
    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
wenzelm@42927
  1332
      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
wenzelm@42927
  1333
        {@{text "P, Q \<turnstile> Q, P"}}}}
wenzelm@42927
  1334
  \]
wenzelm@42927
  1335
wenzelm@42927
  1336
  This example is typical of the sequent calculus: start with the
wenzelm@42927
  1337
  desired theorem and apply rules backwards in a fairly arbitrary
wenzelm@42927
  1338
  manner.  This yields a surprisingly effective proof procedure.
wenzelm@42927
  1339
  Quantifiers add only few complications, since Isabelle handles
wenzelm@42927
  1340
  parameters and schematic variables.  See \cite[Chapter
wenzelm@42927
  1341
  10]{paulson-ml2} for further discussion.  *}
wenzelm@42927
  1342
wenzelm@42927
  1343
wenzelm@42927
  1344
subsubsection {* Simulating sequents by natural deduction *}
wenzelm@42927
  1345
wenzelm@42927
  1346
text {* Isabelle can represent sequents directly, as in the
wenzelm@42927
  1347
  object-logic LK.  But natural deduction is easier to work with, and
wenzelm@42927
  1348
  most object-logics employ it.  Fortunately, we can simulate the
wenzelm@42927
  1349
  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
wenzelm@42927
  1350
  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
wenzelm@42927
  1351
  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
wenzelm@42927
  1352
  Elim-resolution plays a key role in simulating sequent proofs.
wenzelm@42927
  1353
wenzelm@42927
  1354
  We can easily handle reasoning on the left.  Elim-resolution with
wenzelm@42927
  1355
  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
wenzelm@42927
  1356
  a similar effect as the corresponding sequent rules.  For the other
wenzelm@42927
  1357
  connectives, we use sequent-style elimination rules instead of
wenzelm@42927
  1358
  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
wenzelm@42927
  1359
  But note that the rule @{text "(\<not>L)"} has no effect under our
wenzelm@42927
  1360
  representation of sequents!
wenzelm@42927
  1361
  \[
wenzelm@42927
  1362
  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
wenzelm@42927
  1363
  \]
wenzelm@42927
  1364
wenzelm@42927
  1365
  What about reasoning on the right?  Introduction rules can only
wenzelm@42927
  1366
  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
wenzelm@42927
  1367
  other right-side formulae are represented as negated assumptions,
wenzelm@42927
  1368
  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
wenzelm@42927
  1369
  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
wenzelm@42927
  1370
  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
wenzelm@42927
  1371
wenzelm@42927
  1372
  To ensure that swaps occur only when necessary, each introduction
wenzelm@42927
  1373
  rule is converted into a swapped form: it is resolved with the
wenzelm@42927
  1374
  second premise of @{text "(swap)"}.  The swapped form of @{text
wenzelm@42927
  1375
  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
wenzelm@42927
  1376
  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
wenzelm@42927
  1377
wenzelm@42927
  1378
  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
wenzelm@42927
  1379
  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
wenzelm@42927
  1380
wenzelm@42927
  1381
  Swapped introduction rules are applied using elim-resolution, which
wenzelm@42927
  1382
  deletes the negated formula.  Our representation of sequents also
wenzelm@42927
  1383
  requires the use of ordinary introduction rules.  If we had no
wenzelm@42927
  1384
  regard for readability of intermediate goal states, we could treat
wenzelm@42927
  1385
  the right side more uniformly by representing sequents as @{text
wenzelm@42927
  1386
  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
wenzelm@42927
  1387
*}
wenzelm@42927
  1388
wenzelm@42927
  1389
wenzelm@42927
  1390
subsubsection {* Extra rules for the sequent calculus *}
wenzelm@42927
  1391
wenzelm@42927
  1392
text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
wenzelm@42927
  1393
  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
wenzelm@42927
  1394
  In addition, we need rules to embody the classical equivalence
wenzelm@42927
  1395
  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
wenzelm@42927
  1396
  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
wenzelm@42927
  1397
  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
wenzelm@42927
  1398
wenzelm@42927
  1399
  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
wenzelm@42927
  1400
  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
wenzelm@42927
  1401
wenzelm@42927
  1402
  Quantifier replication also requires special rules.  In classical
wenzelm@42927
  1403
  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
wenzelm@42927
  1404
  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
wenzelm@42927
  1405
  \[
wenzelm@42927
  1406
  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
wenzelm@42927
  1407
  \qquad
wenzelm@42927
  1408
  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
wenzelm@42927
  1409
  \]
wenzelm@42927
  1410
  Thus both kinds of quantifier may be replicated.  Theorems requiring
wenzelm@42927
  1411
  multiple uses of a universal formula are easy to invent; consider
wenzelm@42927
  1412
  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
wenzelm@42927
  1413
  @{text "n > 1"}.  Natural examples of the multiple use of an
wenzelm@42927
  1414
  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
wenzelm@42927
  1415
  \<longrightarrow> P y"}.
wenzelm@42927
  1416
wenzelm@42927
  1417
  Forgoing quantifier replication loses completeness, but gains
wenzelm@42927
  1418
  decidability, since the search space becomes finite.  Many useful
wenzelm@42927
  1419
  theorems can be proved without replication, and the search generally
wenzelm@42927
  1420
  delivers its verdict in a reasonable time.  To adopt this approach,
wenzelm@42927
  1421
  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
wenzelm@42927
  1422
  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
wenzelm@42927
  1423
  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
wenzelm@42927
  1424
  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
wenzelm@42927
  1425
wenzelm@42927
  1426
  Elim-resolution with this rule will delete the universal formula
wenzelm@42927
  1427
  after a single use.  To replicate universal quantifiers, replace the
wenzelm@42927
  1428
  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
wenzelm@42927
  1429
wenzelm@42927
  1430
  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
wenzelm@42927
  1431
  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
wenzelm@42927
  1432
wenzelm@42927
  1433
  All introduction rules mentioned above are also useful in swapped
wenzelm@42927
  1434
  form.
wenzelm@42927
  1435
wenzelm@42927
  1436
  Replication makes the search space infinite; we must apply the rules
wenzelm@42927
  1437
  with care.  The classical reasoner distinguishes between safe and
wenzelm@42927
  1438
  unsafe rules, applying the latter only when there is no alternative.
wenzelm@42927
  1439
  Depth-first search may well go down a blind alley; best-first search
wenzelm@42927
  1440
  is better behaved in an infinite search space.  However, quantifier
wenzelm@42927
  1441
  replication is too expensive to prove any but the simplest theorems.
wenzelm@42927
  1442
*}
wenzelm@42927
  1443
wenzelm@42927
  1444
wenzelm@42928
  1445
subsection {* Rule declarations *}
wenzelm@42928
  1446
wenzelm@42928
  1447
text {* The proof tools of the Classical Reasoner depend on
wenzelm@42928
  1448
  collections of rules declared in the context, which are classified
wenzelm@42928
  1449
  as introduction, elimination or destruction and as \emph{safe} or
wenzelm@42928
  1450
  \emph{unsafe}.  In general, safe rules can be attempted blindly,
wenzelm@42928
  1451
  while unsafe rules must be used with care.  A safe rule must never
wenzelm@42928
  1452
  reduce a provable goal to an unprovable set of subgoals.
wenzelm@42928
  1453
wenzelm@42928
  1454
  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
wenzelm@42928
  1455
  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
wenzelm@42928
  1456
  unprovable subgoal.  Any rule is unsafe whose premises contain new
wenzelm@42928
  1457
  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
wenzelm@42928
  1458
  unsafe, since it is applied via elim-resolution, which discards the
wenzelm@42928
  1459
  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
wenzelm@42928
  1460
  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
wenzelm@42928
  1461
  unsafe for similar reasons.  The quantifier duplication rule @{text
wenzelm@42928
  1462
  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
wenzelm@42928
  1463
  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
wenzelm@42928
  1464
  looping.  In classical first-order logic, all rules are safe except
wenzelm@42928
  1465
  those mentioned above.
wenzelm@42928
  1466
wenzelm@42928
  1467
  The safe~/ unsafe distinction is vague, and may be regarded merely
wenzelm@42928
  1468
  as a way of giving some rules priority over others.  One could argue
wenzelm@42928
  1469
  that @{text "(\<or>E)"} is unsafe, because repeated application of it
wenzelm@42928
  1470
  could generate exponentially many subgoals.  Induction rules are
wenzelm@42928
  1471
  unsafe because inductive proofs are difficult to set up
wenzelm@42928
  1472
  automatically.  Any inference is unsafe that instantiates an unknown
wenzelm@42928
  1473
  in the proof state --- thus matching must be used, rather than
wenzelm@42928
  1474
  unification.  Even proof by assumption is unsafe if it instantiates
wenzelm@42928
  1475
  unknowns shared with other subgoals.
wenzelm@42928
  1476
wenzelm@42928
  1477
  \begin{matharray}{rcl}
wenzelm@42928
  1478
    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
wenzelm@42928
  1479
    @{attribute_def intro} & : & @{text attribute} \\
wenzelm@42928
  1480
    @{attribute_def elim} & : & @{text attribute} \\
wenzelm@42928
  1481
    @{attribute_def dest} & : & @{text attribute} \\
wenzelm@42928
  1482
    @{attribute_def rule} & : & @{text attribute} \\
wenzelm@42928
  1483
    @{attribute_def iff} & : & @{text attribute} \\
wenzelm@42928
  1484
    @{attribute_def swapped} & : & @{text attribute} \\
wenzelm@42928
  1485
  \end{matharray}
wenzelm@42928
  1486
wenzelm@55112
  1487
  @{rail \<open>
wenzelm@42928
  1488
    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
wenzelm@42928
  1489
    ;
wenzelm@42928
  1490
    @@{attribute rule} 'del'
wenzelm@42928
  1491
    ;
wenzelm@42928
  1492
    @@{attribute iff} (((() | 'add') '?'?) | 'del')
wenzelm@55112
  1493
  \<close>}
wenzelm@42928
  1494
wenzelm@42928
  1495
  \begin{description}
wenzelm@42928
  1496
wenzelm@42928
  1497
  \item @{command "print_claset"} prints the collection of rules
wenzelm@42928
  1498
  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
wenzelm@42928
  1499
  within the context.
wenzelm@42928
  1500
wenzelm@42928
  1501
  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
wenzelm@42928
  1502
  declare introduction, elimination, and destruction rules,
wenzelm@42928
  1503
  respectively.  By default, rules are considered as \emph{unsafe}
wenzelm@42928
  1504
  (i.e.\ not applied blindly without backtracking), while ``@{text
wenzelm@42928
  1505
  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
wenzelm@42928
  1506
  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
wenzelm@42928
  1507
  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
wenzelm@42928
  1508
  of the @{method rule} method).  The optional natural number
wenzelm@42928
  1509
  specifies an explicit weight argument, which is ignored by the
wenzelm@42928
  1510
  automated reasoning tools, but determines the search order of single
wenzelm@42928
  1511
  rule steps.
wenzelm@42928
  1512
wenzelm@42928
  1513
  Introduction rules are those that can be applied using ordinary
wenzelm@42928
  1514
  resolution.  Their swapped forms are generated internally, which
wenzelm@42928
  1515
  will be applied using elim-resolution.  Elimination rules are
wenzelm@42928
  1516
  applied using elim-resolution.  Rules are sorted by the number of
wenzelm@42928
  1517
  new subgoals they will yield; rules that generate the fewest
wenzelm@42928
  1518
  subgoals will be tried first.  Otherwise, later declarations take
wenzelm@42928
  1519
  precedence over earlier ones.
wenzelm@42928
  1520
wenzelm@42928
  1521
  Rules already present in the context with the same classification
wenzelm@42928
  1522
  are ignored.  A warning is printed if the rule has already been
wenzelm@42928
  1523
  added with some other classification, but the rule is added anyway
wenzelm@42928
  1524
  as requested.
wenzelm@42928
  1525
wenzelm@42928
  1526
  \item @{attribute rule}~@{text del} deletes all occurrences of a
wenzelm@42928
  1527
  rule from the classical context, regardless of its classification as
wenzelm@42928
  1528
  introduction~/ elimination~/ destruction and safe~/ unsafe.
wenzelm@42928
  1529
wenzelm@42928
  1530
  \item @{attribute iff} declares logical equivalences to the
wenzelm@42928
  1531
  Simplifier and the Classical reasoner at the same time.
wenzelm@42928
  1532
  Non-conditional rules result in a safe introduction and elimination
wenzelm@42928
  1533
  pair; conditional ones are considered unsafe.  Rules with negative
wenzelm@42928
  1534
  conclusion are automatically inverted (using @{text "\<not>"}-elimination
wenzelm@42928
  1535
  internally).
wenzelm@42928
  1536
wenzelm@42928
  1537
  The ``@{text "?"}'' version of @{attribute iff} declares rules to
wenzelm@42928
  1538
  the Isabelle/Pure context only, and omits the Simplifier
wenzelm@42928
  1539
  declaration.
wenzelm@42928
  1540
wenzelm@42928
  1541
  \item @{attribute swapped} turns an introduction rule into an
wenzelm@42928
  1542
  elimination, by resolving with the classical swap principle @{text
wenzelm@42928
  1543
  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
wenzelm@42928
  1544
  illustrative purposes: the Classical Reasoner already swaps rules
wenzelm@42928
  1545
  internally as explained above.
wenzelm@42928
  1546
wenzelm@28760
  1547
  \end{description}
wenzelm@26782
  1548
*}
wenzelm@26782
  1549
wenzelm@26782
  1550
wenzelm@43365
  1551
subsection {* Structured methods *}
wenzelm@43365
  1552
wenzelm@43365
  1553
text {*
wenzelm@43365
  1554
  \begin{matharray}{rcl}
wenzelm@43365
  1555
    @{method_def rule} & : & @{text method} \\
wenzelm@43365
  1556
    @{method_def contradiction} & : & @{text method} \\
wenzelm@43365
  1557
  \end{matharray}
wenzelm@43365
  1558
wenzelm@55112
  1559
  @{rail \<open>
wenzelm@43365
  1560
    @@{method rule} @{syntax thmrefs}?
wenzelm@55112
  1561
  \<close>}
wenzelm@43365
  1562
wenzelm@43365
  1563
  \begin{description}
wenzelm@43365
  1564
wenzelm@43365
  1565
  \item @{method rule} as offered by the Classical Reasoner is a
wenzelm@43365
  1566
  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
wenzelm@43365
  1567
  versions work the same, but the classical version observes the
wenzelm@43365
  1568
  classical rule context in addition to that of Isabelle/Pure.
wenzelm@43365
  1569
wenzelm@43365
  1570
  Common object logics (HOL, ZF, etc.) declare a rich collection of
wenzelm@43365
  1571
  classical rules (even if these would qualify as intuitionistic
wenzelm@43365
  1572
  ones), but only few declarations to the rule context of
wenzelm@43365
  1573
  Isabelle/Pure (\secref{sec:pure-meth-att}).
wenzelm@43365
  1574
wenzelm@43365
  1575
  \item @{method contradiction} solves some goal by contradiction,
wenzelm@43365
  1576
  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
wenzelm@43365
  1577
  facts, which are guaranteed to participate, may appear in either
wenzelm@43365
  1578
  order.
wenzelm@43365
  1579
wenzelm@43365
  1580
  \end{description}
wenzelm@43365
  1581
*}
wenzelm@43365
  1582
wenzelm@43365
  1583
wenzelm@50070
  1584
subsection {* Fully automated methods *}
wenzelm@26782
  1585
wenzelm@26782
  1586
text {*
wenzelm@26782
  1587
  \begin{matharray}{rcl}
wenzelm@28761
  1588
    @{method_def blast} & : & @{text method} \\
wenzelm@42930
  1589
    @{method_def auto} & : & @{text method} \\
wenzelm@42930
  1590
    @{method_def force} & : & @{text method} \\
wenzelm@28761
  1591
    @{method_def fast} & : & @{text method} \\
wenzelm@28761
  1592
    @{method_def slow} & : & @{text method} \\
wenzelm@28761
  1593
    @{method_def best} & : & @{text method} \\
nipkow@44911
  1594
    @{method_def fastforce} & : & @{text method} \\
wenzelm@28761
  1595
    @{method_def slowsimp} & : & @{text method} \\
wenzelm@28761
  1596
    @{method_def bestsimp} & : & @{text method} \\
wenzelm@43367
  1597
    @{method_def deepen} & : & @{text method} \\
wenzelm@26782
  1598
  \end{matharray}
wenzelm@26782
  1599
wenzelm@55112
  1600
  @{rail \<open>
wenzelm@42930
  1601
    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
wenzelm@42930
  1602
    ;
wenzelm@42596
  1603
    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
wenzelm@26782
  1604
    ;
wenzelm@42930
  1605
    @@{method force} (@{syntax clasimpmod} * )
wenzelm@42930
  1606
    ;
wenzelm@42930
  1607
    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
wenzelm@26782
  1608
    ;
nipkow@44911
  1609
    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
wenzelm@42930
  1610
      (@{syntax clasimpmod} * )
wenzelm@42930
  1611
    ;
wenzelm@43367
  1612
    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
wenzelm@43367
  1613
    ;
wenzelm@42930
  1614
    @{syntax_def clamod}:
wenzelm@42930
  1615
      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
wenzelm@42930
  1616
    ;
wenzelm@42596
  1617
    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
wenzelm@26782
  1618
      ('cong' | 'split') (() | 'add' | 'del') |
wenzelm@26782
  1619
      'iff' (((() | 'add') '?'?) | 'del') |
wenzelm@42596
  1620
      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
wenzelm@55112
  1621
  \<close>}
wenzelm@26782
  1622
wenzelm@28760
  1623
  \begin{description}
wenzelm@26782
  1624
wenzelm@42930
  1625
  \item @{method blast} is a separate classical tableau prover that
wenzelm@42930
  1626
  uses the same classical rule declarations as explained before.
wenzelm@42930
  1627
wenzelm@42930
  1628
  Proof search is coded directly in ML using special data structures.
wenzelm@42930
  1629
  A successful proof is then reconstructed using regular Isabelle
wenzelm@42930
  1630
  inferences.  It is faster and more powerful than the other classical
wenzelm@42930
  1631
  reasoning tools, but has major limitations too.
wenzelm@42930
  1632
wenzelm@42930
  1633
  \begin{itemize}
wenzelm@42930
  1634
wenzelm@42930
  1635
  \item It does not use the classical wrapper tacticals, such as the
nipkow@44911
  1636
  integration with the Simplifier of @{method fastforce}.
wenzelm@42930
  1637
wenzelm@42930
  1638
  \item It does not perform higher-order unification, as needed by the
wenzelm@42930
  1639
  rule @{thm [source=false] rangeI} in HOL.  There are often
wenzelm@42930
  1640
  alternatives to such rules, for example @{thm [source=false]
wenzelm@42930
  1641
  range_eqI}.
wenzelm@42930
  1642
wenzelm@42930
  1643
  \item Function variables may only be applied to parameters of the
wenzelm@42930
  1644
  subgoal.  (This restriction arises because the prover does not use
wenzelm@42930
  1645
  higher-order unification.)  If other function variables are present
wenzelm@42930
  1646
  then the prover will fail with the message \texttt{Function Var's
wenzelm@42930
  1647
  argument not a bound variable}.
wenzelm@42930
  1648
wenzelm@42930
  1649
  \item Its proof strategy is more general than @{method fast} but can
wenzelm@42930
  1650
  be slower.  If @{method blast} fails or seems to be running forever,
wenzelm@42930
  1651
  try @{method fast} and the other proof tools described below.
wenzelm@42930
  1652
wenzelm@42930
  1653
  \end{itemize}
wenzelm@42930
  1654
wenzelm@42930
  1655
  The optional integer argument specifies a bound for the number of
wenzelm@42930
  1656
  unsafe steps used in a proof.  By default, @{method blast} starts
wenzelm@42930
  1657
  with a bound of 0 and increases it successively to 20.  In contrast,
wenzelm@42930
  1658
  @{text "(blast lim)"} tries to prove the goal using a search bound
wenzelm@42930
  1659
  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
wenzelm@42930
  1660
  be made much faster by supplying the successful search bound to this
wenzelm@42930
  1661
  proof method instead.
wenzelm@42930
  1662
wenzelm@42930
  1663
  \item @{method auto} combines classical reasoning with
wenzelm@42930
  1664
  simplification.  It is intended for situations where there are a lot
wenzelm@42930
  1665
  of mostly trivial subgoals; it proves all the easy ones, leaving the
wenzelm@42930
  1666
  ones it cannot prove.  Occasionally, attempting to prove the hard
wenzelm@42930
  1667
  ones may take a long time.
wenzelm@42930
  1668
wenzelm@43332
  1669
  The optional depth arguments in @{text "(auto m n)"} refer to its
wenzelm@43332
  1670
  builtin classical reasoning procedures: @{text m} (default 4) is for
wenzelm@43332
  1671
  @{method blast}, which is tried first, and @{text n} (default 2) is
wenzelm@43332
  1672
  for a slower but more general alternative that also takes wrappers
wenzelm@43332
  1673
  into account.
wenzelm@42930
  1674
wenzelm@42930
  1675
  \item @{method force} is intended to prove the first subgoal
wenzelm@42930
  1676
  completely, using many fancy proof tools and performing a rather
wenzelm@42930
  1677
  exhaustive search.  As a result, proof attempts may take rather long
wenzelm@42930
  1678
  or diverge easily.
wenzelm@42930
  1679
wenzelm@42930
  1680
  \item @{method fast}, @{method best}, @{method slow} attempt to
wenzelm@42930
  1681
  prove the first subgoal using sequent-style reasoning as explained
wenzelm@42930
  1682
  before.  Unlike @{method blast}, they construct proofs directly in
wenzelm@42930
  1683
  Isabelle.
wenzelm@26782
  1684
wenzelm@42930
  1685
  There is a difference in search strategy and back-tracking: @{method
wenzelm@42930
  1686
  fast} uses depth-first search and @{method best} uses best-first
wenzelm@42930
  1687
  search (guided by a heuristic function: normally the total size of
wenzelm@42930
  1688
  the proof state).
wenzelm@42930
  1689
wenzelm@42930
  1690
  Method @{method slow} is like @{method fast}, but conducts a broader
wenzelm@42930
  1691
  search: it may, when backtracking from a failed proof attempt, undo
wenzelm@42930
  1692
  even the step of proving a subgoal by assumption.
wenzelm@42930
  1693
wenzelm@47967
  1694
  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
wenzelm@47967
  1695
  are like @{method fast}, @{method slow}, @{method best},
wenzelm@47967
  1696
  respectively, but use the Simplifier as additional wrapper. The name
wenzelm@47967
  1697
  @{method fastforce}, reflects the behaviour of this popular method
wenzelm@47967
  1698
  better without requiring an understanding of its implementation.
wenzelm@42930
  1699
wenzelm@43367
  1700
  \item @{method deepen} works by exhaustive search up to a certain
wenzelm@43367
  1701
  depth.  The start depth is 4 (unless specified explicitly), and the
wenzelm@43367
  1702
  depth is increased iteratively up to 10.  Unsafe rules are modified
wenzelm@43367
  1703
  to preserve the formula they act on, so that it be used repeatedly.
wenzelm@43367
  1704
  This method can prove more goals than @{method fast}, but is much
wenzelm@43367
  1705
  slower, for example if the assumptions have many universal
wenzelm@43367
  1706
  quantifiers.
wenzelm@43367
  1707
wenzelm@42930
  1708
  \end{description}
wenzelm@42930
  1709
wenzelm@42930
  1710
  Any of the above methods support additional modifiers of the context
wenzelm@42930
  1711
  of classical (and simplifier) rules, but the ones related to the
wenzelm@42930
  1712
  Simplifier are explicitly prefixed by @{text simp} here.  The
wenzelm@42930
  1713
  semantics of these ad-hoc rule declarations is analogous to the
wenzelm@42930
  1714
  attributes given before.  Facts provided by forward chaining are
wenzelm@42930
  1715
  inserted into the goal before commencing proof search.
wenzelm@42930
  1716
*}
wenzelm@42930
  1717
wenzelm@42930
  1718
wenzelm@50070
  1719
subsection {* Partially automated methods *}
wenzelm@42930
  1720
wenzelm@42930
  1721
text {* These proof methods may help in situations when the
wenzelm@42930
  1722
  fully-automated tools fail.  The result is a simpler subgoal that
wenzelm@42930
  1723
  can be tackled by other means, such as by manual instantiation of
wenzelm@42930
  1724
  quantifiers.
wenzelm@42930
  1725
wenzelm@42930
  1726
  \begin{matharray}{rcl}
wenzelm@42930
  1727
    @{method_def safe} & : & @{text method} \\
wenzelm@42930
  1728
    @{method_def clarify} & : & @{text method} \\
wenzelm@42930
  1729
    @{method_def clarsimp} & : & @{text method} \\
wenzelm@42930
  1730
  \end{matharray}
wenzelm@42930
  1731
wenzelm@55112
  1732
  @{rail \<open>
wenzelm@42930
  1733
    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
wenzelm@42930
  1734
    ;
wenzelm@42930
  1735
    @@{method clarsimp} (@{syntax clasimpmod} * )
wenzelm@55112
  1736
  \<close>}
wenzelm@42930
  1737
wenzelm@42930
  1738
  \begin{description}
wenzelm@42930
  1739
wenzelm@42930
  1740
  \item @{method safe} repeatedly performs safe steps on all subgoals.
wenzelm@42930
  1741
  It is deterministic, with at most one outcome.
wenzelm@42930
  1742
wenzelm@43366
  1743
  \item @{method clarify} performs a series of safe steps without
wenzelm@50108
  1744
  splitting subgoals; see also @{method clarify_step}.
wenzelm@42930
  1745
wenzelm@42930
  1746
  \item @{method clarsimp} acts like @{method clarify}, but also does
wenzelm@42930
  1747
  simplification.  Note that if the Simplifier context includes a
wenzelm@42930
  1748
  splitter for the premises, the subgoal may still be split.
wenzelm@26782
  1749
wenzelm@28760
  1750
  \end{description}
wenzelm@26782
  1751
*}
wenzelm@26782
  1752
wenzelm@26782
  1753
wenzelm@43366
  1754
subsection {* Single-step tactics *}
wenzelm@43366
  1755
wenzelm@43366
  1756
text {*
wenzelm@50108
  1757
  \begin{matharray}{rcl}
wenzelm@50108
  1758
    @{method_def safe_step} & : & @{text method} \\
wenzelm@50108
  1759
    @{method_def inst_step} & : & @{text method} \\
wenzelm@50108
  1760
    @{method_def step} & : & @{text method} \\
wenzelm@50108
  1761
    @{method_def slow_step} & : & @{text method} \\
wenzelm@50108
  1762
    @{method_def clarify_step} & : &  @{text method} \\
wenzelm@50108
  1763
  \end{matharray}
wenzelm@43366
  1764
wenzelm@50070
  1765
  These are the primitive tactics behind the automated proof methods
wenzelm@50070
  1766
  of the Classical Reasoner.  By calling them yourself, you can
wenzelm@50070
  1767
  execute these procedures one step at a time.
wenzelm@43366
  1768
wenzelm@43366
  1769
  \begin{description}
wenzelm@43366
  1770
wenzelm@50108
  1771
  \item @{method safe_step} performs a safe step on the first subgoal.
wenzelm@50108
  1772
  The safe wrapper tacticals are applied to a tactic that may include
wenzelm@50108
  1773
  proof by assumption or Modus Ponens (taking care not to instantiate
wenzelm@50108
  1774
  unknowns), or substitution.
wenzelm@43366
  1775
wenzelm@50108
  1776
  \item @{method inst_step} is like @{method safe_step}, but allows
wenzelm@43366
  1777
  unknowns to be instantiated.
wenzelm@43366
  1778
wenzelm@50108
  1779
  \item @{method step} is the basic step of the proof procedure, it
wenzelm@50108
  1780
  operates on the first subgoal.  The unsafe wrapper tacticals are
wenzelm@50108
  1781
  applied to a tactic that tries @{method safe}, @{method inst_step},
wenzelm@50108
  1782
  or applies an unsafe rule from the context.
wenzelm@43366
  1783
wenzelm@50108
  1784
  \item @{method slow_step} resembles @{method step}, but allows
wenzelm@50108
  1785
  backtracking between using safe rules with instantiation (@{method
wenzelm@50108
  1786
  inst_step}) and using unsafe rules.  The resulting search space is
wenzelm@50108
  1787
  larger.
wenzelm@43366
  1788
wenzelm@50108
  1789
  \item @{method clarify_step} performs a safe step on the first
wenzelm@50108
  1790
  subgoal; no splitting step is applied.  For example, the subgoal
wenzelm@50108
  1791
  @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
wenzelm@50108
  1792
  Modus Ponens, etc., may be performed provided they do not
wenzelm@50108
  1793
  instantiate unknowns.  Assumptions of the form @{text "x = t"} may
wenzelm@50108
  1794
  be eliminated.  The safe wrapper tactical is applied.
wenzelm@43366
  1795
wenzelm@43366
  1796
  \end{description}
wenzelm@43366
  1797
*}
wenzelm@43366
  1798
wenzelm@43366
  1799
wenzelm@50071
  1800
subsection {* Modifying the search step *}
wenzelm@50071
  1801
wenzelm@50071
  1802
text {*
wenzelm@50071
  1803
  \begin{mldecls}
wenzelm@50071
  1804
    @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
wenzelm@51703
  1805
    @{index_ML_op addSWrapper: "Proof.context *
wenzelm@51703
  1806
  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
wenzelm@51703
  1807
    @{index_ML_op addSbefore: "Proof.context *
wenzelm@51717
  1808
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1809
    @{index_ML_op addSafter: "Proof.context *
wenzelm@51717
  1810
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1811
    @{index_ML_op delSWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
wenzelm@51703
  1812
    @{index_ML_op addWrapper: "Proof.context *
wenzelm@51703
  1813
  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
wenzelm@51703
  1814
    @{index_ML_op addbefore: "Proof.context *
wenzelm@51717
  1815
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1816
    @{index_ML_op addafter: "Proof.context *
wenzelm@51717
  1817
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1818
    @{index_ML_op delWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
wenzelm@50071
  1819
    @{index_ML addSss: "Proof.context -> Proof.context"} \\
wenzelm@50071
  1820
    @{index_ML addss: "Proof.context -> Proof.context"} \\
wenzelm@50071
  1821
  \end{mldecls}
wenzelm@50071
  1822
wenzelm@50071
  1823
  The proof strategy of the Classical Reasoner is simple.  Perform as
wenzelm@50071
  1824
  many safe inferences as possible; or else, apply certain safe rules,
wenzelm@50071
  1825
  allowing instantiation of unknowns; or else, apply an unsafe rule.
wenzelm@50071
  1826
  The tactics also eliminate assumptions of the form @{text "x = t"}
wenzelm@50071
  1827
  by substitution if they have been set up to do so.  They may perform
wenzelm@50071
  1828
  a form of Modus Ponens: if there are assumptions @{text "P \<longrightarrow> Q"} and
wenzelm@50071
  1829
  @{text "P"}, then replace @{text "P \<longrightarrow> Q"} by @{text "Q"}.
wenzelm@50071
  1830
wenzelm@50071
  1831
  The classical reasoning tools --- except @{method blast} --- allow
wenzelm@50071
  1832
  to modify this basic proof strategy by applying two lists of
wenzelm@50071
  1833
  arbitrary \emph{wrapper tacticals} to it.  The first wrapper list,
wenzelm@50108
  1834
  which is considered to contain safe wrappers only, affects @{method
wenzelm@50108
  1835
  safe_step} and all the tactics that call it.  The second one, which
wenzelm@50108
  1836
  may contain unsafe wrappers, affects the unsafe parts of @{method
wenzelm@50108
  1837
  step}, @{method slow_step}, and the tactics that call them.  A
wenzelm@50071
  1838
  wrapper transforms each step of the search, for example by
wenzelm@50071
  1839
  attempting other tactics before or after the original step tactic.
wenzelm@50071
  1840
  All members of a wrapper list are applied in turn to the respective
wenzelm@50071
  1841
  step tactic.
wenzelm@50071
  1842
wenzelm@50071
  1843
  Initially the two wrapper lists are empty, which means no
wenzelm@50071
  1844
  modification of the step tactics. Safe and unsafe wrappers are added
wenzelm@50071
  1845
  to a claset with the functions given below, supplying them with
wenzelm@50071
  1846
  wrapper names.  These names may be used to selectively delete
wenzelm@50071
  1847
  wrappers.
wenzelm@50071
  1848
wenzelm@50071
  1849
  \begin{description}
wenzelm@50071
  1850
wenzelm@51703
  1851
  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
wenzelm@50071
  1852
  which should yield a safe tactic, to modify the existing safe step
wenzelm@50071
  1853
  tactic.
wenzelm@50071
  1854
wenzelm@51703
  1855
  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
wenzelm@50071
  1856
  safe wrapper, such that it is tried \emph{before} each safe step of
wenzelm@50071
  1857
  the search.
wenzelm@50071
  1858
wenzelm@51703
  1859
  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
wenzelm@50071
  1860
  safe wrapper, such that it is tried when a safe step of the search
wenzelm@50071
  1861
  would fail.
wenzelm@50071
  1862
wenzelm@51703
  1863
  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
wenzelm@50071
  1864
  the given name.
wenzelm@50071
  1865
wenzelm@51703
  1866
  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
wenzelm@50071
  1867
  modify the existing (unsafe) step tactic.
wenzelm@50071
  1868
wenzelm@51703
  1869
  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
wenzelm@50071
  1870
  unsafe wrapper, such that it its result is concatenated
wenzelm@50071
  1871
  \emph{before} the result of each unsafe step.
wenzelm@50071
  1872
wenzelm@51703
  1873
  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
wenzelm@50071
  1874
  unsafe wrapper, such that it its result is concatenated \emph{after}
wenzelm@50071
  1875
  the result of each unsafe step.
wenzelm@50071
  1876
wenzelm@51703
  1877
  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
wenzelm@50071
  1878
  the given name.
wenzelm@50071
  1879
wenzelm@50071
  1880
  \item @{text "addSss"} adds the simpset of the context to its
wenzelm@50071
  1881
  classical set. The assumptions and goal will be simplified, in a
wenzelm@50071
  1882
  rather safe way, after each safe step of the search.
wenzelm@50071
  1883
wenzelm@50071
  1884
  \item @{text "addss"} adds the simpset of the context to its
wenzelm@50071
  1885
  classical set. The assumptions and goal will be simplified, before
wenzelm@50071
  1886
  the each unsafe step of the search.
wenzelm@50071
  1887
wenzelm@50071
  1888
  \end{description}
wenzelm@50071
  1889
*}
wenzelm@50071
  1890
wenzelm@50071
  1891
wenzelm@27044
  1892
section {* Object-logic setup \label{sec:object-logic} *}
wenzelm@26790
  1893
wenzelm@26790
  1894
text {*
wenzelm@26790
  1895
  \begin{matharray}{rcl}
wenzelm@28761
  1896
    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
wenzelm@28761
  1897
    @{method_def atomize} & : & @{text method} \\
wenzelm@28761
  1898
    @{attribute_def atomize} & : & @{text attribute} \\
wenzelm@28761
  1899
    @{attribute_def rule_format} & : & @{text attribute} \\
wenzelm@28761
  1900
    @{attribute_def rulify} & : & @{text attribute} \\
wenzelm@26790
  1901
  \end{matharray}
wenzelm@26790
  1902
wenzelm@26790
  1903
  The very starting point for any Isabelle object-logic is a ``truth
wenzelm@26790
  1904
  judgment'' that links object-level statements to the meta-logic
wenzelm@26790
  1905
  (with its minimal language of @{text prop} that covers universal
wenzelm@26790
  1906
  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
wenzelm@26790
  1907
wenzelm@26790
  1908
  Common object-logics are sufficiently expressive to internalize rule
wenzelm@26790
  1909
  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
wenzelm@26790
  1910
  language.  This is useful in certain situations where a rule needs
wenzelm@26790
  1911
  to be viewed as an atomic statement from the meta-level perspective,
wenzelm@26790
  1912
  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
wenzelm@26790
  1913
wenzelm@26790
  1914
  From the following language elements, only the @{method atomize}
wenzelm@26790
  1915
  method and @{attribute rule_format} attribute are occasionally
wenzelm@26790
  1916
  required by end-users, the rest is for those who need to setup their
wenzelm@26790
  1917
  own object-logic.  In the latter case existing formulations of
wenzelm@26790
  1918
  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
wenzelm@26790
  1919
wenzelm@26790
  1920
  Generic tools may refer to the information provided by object-logic
wenzelm@26790
  1921
  declarations internally.
wenzelm@26790
  1922
wenzelm@55112
  1923
  @{rail \<open>
wenzelm@46494
  1924
    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
wenzelm@26790
  1925
    ;
wenzelm@42596
  1926
    @@{attribute atomize} ('(' 'full' ')')?
wenzelm@26790
  1927
    ;
wenzelm@42596
  1928
    @@{attribute rule_format} ('(' 'noasm' ')')?
wenzelm@55112
  1929
  \<close>}
wenzelm@26790
  1930
wenzelm@28760
  1931
  \begin{description}
wenzelm@26790
  1932
  
wenzelm@28760
  1933
  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
wenzelm@28760
  1934
  @{text c} as the truth judgment of the current object-logic.  Its
wenzelm@28760
  1935
  type @{text \<sigma>} should specify a coercion of the category of
wenzelm@28760
  1936
  object-level propositions to @{text prop} of the Pure meta-logic;
wenzelm@28760
  1937
  the mixfix annotation @{text "(mx)"} would typically just link the
wenzelm@28760
  1938
  object language (internally of syntactic category @{text logic})
wenzelm@28760
  1939
  with that of @{text prop}.  Only one @{command "judgment"}
wenzelm@28760
  1940
  declaration may be given in any theory development.
wenzelm@26790
  1941
  
wenzelm@28760
  1942
  \item @{method atomize} (as a method) rewrites any non-atomic
wenzelm@26790
  1943
  premises of a sub-goal, using the meta-level equations declared via
wenzelm@26790
  1944
  @{attribute atomize} (as an attribute) beforehand.  As a result,
wenzelm@26790
  1945
  heavily nested goals become amenable to fundamental operations such
wenzelm@42626
  1946
  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
wenzelm@26790
  1947
  "(full)"}'' option here means to turn the whole subgoal into an
wenzelm@26790
  1948
  object-statement (if possible), including the outermost parameters
wenzelm@26790
  1949
  and assumptions as well.
wenzelm@26790
  1950
wenzelm@26790
  1951
  A typical collection of @{attribute atomize} rules for a particular
wenzelm@26790
  1952
  object-logic would provide an internalization for each of the
wenzelm@26790
  1953
  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
wenzelm@26790
  1954
  Meta-level conjunction should be covered as well (this is
wenzelm@26790
  1955
  particularly important for locales, see \secref{sec:locale}).
wenzelm@26790
  1956
wenzelm@28760
  1957
  \item @{attribute rule_format} rewrites a theorem by the equalities
wenzelm@28760
  1958
  declared as @{attribute rulify} rules in the current object-logic.
wenzelm@28760
  1959
  By default, the result is fully normalized, including assumptions
wenzelm@28760
  1960
  and conclusions at any depth.  The @{text "(no_asm)"} option
wenzelm@28760
  1961
  restricts the transformation to the conclusion of a rule.
wenzelm@26790
  1962
wenzelm@26790
  1963
  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
wenzelm@26790
  1964
  rule_format} is to replace (bounded) universal quantification
wenzelm@26790
  1965
  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
wenzelm@26790
  1966
  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
wenzelm@26790
  1967
wenzelm@28760
  1968
  \end{description}
wenzelm@26790
  1969
*}
wenzelm@26790
  1970
wenzelm@50083
  1971
wenzelm@50083
  1972
section {* Tracing higher-order unification *}
wenzelm@50083
  1973
wenzelm@50083
  1974
text {*
wenzelm@50083
  1975
  \begin{tabular}{rcll}
wenzelm@50083
  1976
    @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
wenzelm@50083
  1977
    @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
wenzelm@50083
  1978
    @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
wenzelm@50083
  1979
    @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
wenzelm@50083
  1980
  \end{tabular}
wenzelm@50083
  1981
  \medskip
wenzelm@50083
  1982
wenzelm@50083
  1983
  Higher-order unification works well in most practical situations,
wenzelm@50083
  1984
  but sometimes needs extra care to identify problems.  These tracing
wenzelm@50083
  1985
  options may help.
wenzelm@50083
  1986
wenzelm@50083
  1987
  \begin{description}
wenzelm@50083
  1988
wenzelm@50083
  1989
  \item @{attribute unify_trace_simp} controls tracing of the
wenzelm@50083
  1990
  simplification phase of higher-order unification.
wenzelm@50083
  1991
wenzelm@50083
  1992
  \item @{attribute unify_trace_types} controls warnings of
wenzelm@50083
  1993
  incompleteness, when unification is not considering all possible
wenzelm@50083
  1994
  instantiations of schematic type variables.
wenzelm@50083
  1995
wenzelm@50083
  1996
  \item @{attribute unify_trace_bound} determines the depth where
wenzelm@50083
  1997
  unification starts to print tracing information once it reaches
wenzelm@50083
  1998
  depth; 0 for full tracing.  At the default value, tracing
wenzelm@50083
  1999
  information is almost never printed in practice.
wenzelm@50083
  2000
wenzelm@50083
  2001
  \item @{attribute unify_search_bound} prevents unification from
wenzelm@50083
  2002
  searching past the given depth.  Because of this bound, higher-order
wenzelm@50083
  2003
  unification cannot return an infinite sequence, though it can return
wenzelm@50083
  2004
  an exponentially long one.  The search rarely approaches the default
wenzelm@50083
  2005
  value in practice.  If the search is cut off, unification prints a
wenzelm@50083
  2006
  warning ``Unification bound exceeded''.
wenzelm@50083
  2007
wenzelm@50083
  2008
  \end{description}
wenzelm@50083
  2009
wenzelm@50083
  2010
  \begin{warn}
wenzelm@50083
  2011
  Options for unification cannot be modified in a local context.  Only
wenzelm@50083
  2012
  the global theory content is taken into account.
wenzelm@50083
  2013
  \end{warn}
wenzelm@50083
  2014
*}
wenzelm@50083
  2015
wenzelm@26782
  2016
end