src/Doc/Isar_Ref/Generic.thy
author blanchet
Thu Sep 11 19:32:36 2014 +0200 (2014-09-11)
changeset 58310 91ea607a34d8
parent 58305 57752a91eec4
child 58552 66fed99e874f
permissions -rw-r--r--
updated news
wenzelm@26782
     1
theory Generic
wenzelm@42651
     2
imports Base Main
wenzelm@26782
     3
begin
wenzelm@26782
     4
wenzelm@26782
     5
chapter {* Generic tools and packages \label{ch:gen-tools} *}
wenzelm@26782
     6
wenzelm@42655
     7
section {* Configuration options \label{sec:config} *}
wenzelm@26782
     8
wenzelm@40291
     9
text {* Isabelle/Pure maintains a record of named configuration
wenzelm@40291
    10
  options within the theory or proof context, with values of type
wenzelm@40291
    11
  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
wenzelm@40291
    12
  string}.  Tools may declare options in ML, and then refer to these
wenzelm@40291
    13
  values (relative to the context).  Thus global reference variables
wenzelm@40291
    14
  are easily avoided.  The user may change the value of a
wenzelm@40291
    15
  configuration option by means of an associated attribute of the same
wenzelm@40291
    16
  name.  This form of context declaration works particularly well with
wenzelm@42655
    17
  commands such as @{command "declare"} or @{command "using"} like
wenzelm@42655
    18
  this:
wenzelm@42655
    19
*}
wenzelm@42655
    20
wenzelm@42655
    21
declare [[show_main_goal = false]]
wenzelm@26782
    22
wenzelm@42655
    23
notepad
wenzelm@42655
    24
begin
wenzelm@42655
    25
  note [[show_main_goal = true]]
wenzelm@42655
    26
end
wenzelm@42655
    27
wenzelm@42655
    28
text {* For historical reasons, some tools cannot take the full proof
wenzelm@26782
    29
  context into account and merely refer to the background theory.
wenzelm@26782
    30
  This is accommodated by configuration options being declared as
wenzelm@26782
    31
  ``global'', which may not be changed within a local context.
wenzelm@26782
    32
wenzelm@26782
    33
  \begin{matharray}{rcll}
wenzelm@52060
    34
    @{command_def "print_options"} & : & @{text "context \<rightarrow>"} \\
wenzelm@26782
    35
  \end{matharray}
wenzelm@26782
    36
wenzelm@55112
    37
  @{rail \<open>
wenzelm@42596
    38
    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
wenzelm@55112
    39
  \<close>}
wenzelm@26782
    40
wenzelm@28760
    41
  \begin{description}
wenzelm@26782
    42
  
wenzelm@52060
    43
  \item @{command "print_options"} prints the available configuration
wenzelm@28760
    44
  options, with names, types, and current values.
wenzelm@26782
    45
  
wenzelm@28760
    46
  \item @{text "name = value"} as an attribute expression modifies the
wenzelm@28760
    47
  named option, with the syntax of the value depending on the option's
wenzelm@28760
    48
  type.  For @{ML_type bool} the default value is @{text true}.  Any
wenzelm@28760
    49
  attempt to change a global option in a local context is ignored.
wenzelm@26782
    50
wenzelm@28760
    51
  \end{description}
wenzelm@26782
    52
*}
wenzelm@26782
    53
wenzelm@26782
    54
wenzelm@27040
    55
section {* Basic proof tools *}
wenzelm@26782
    56
wenzelm@26782
    57
subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
wenzelm@26782
    58
wenzelm@26782
    59
text {*
wenzelm@26782
    60
  \begin{matharray}{rcl}
wenzelm@28761
    61
    @{method_def unfold} & : & @{text method} \\
wenzelm@28761
    62
    @{method_def fold} & : & @{text method} \\
wenzelm@28761
    63
    @{method_def insert} & : & @{text method} \\[0.5ex]
wenzelm@28761
    64
    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
    65
    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
    66
    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@43365
    67
    @{method_def intro} & : & @{text method} \\
wenzelm@43365
    68
    @{method_def elim} & : & @{text method} \\
wenzelm@28761
    69
    @{method_def succeed} & : & @{text method} \\
wenzelm@28761
    70
    @{method_def fail} & : & @{text method} \\
wenzelm@26782
    71
  \end{matharray}
wenzelm@26782
    72
wenzelm@55112
    73
  @{rail \<open>
wenzelm@42596
    74
    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
wenzelm@26782
    75
    ;
wenzelm@42596
    76
    (@@{method erule} | @@{method drule} | @@{method frule})
wenzelm@42596
    77
      ('(' @{syntax nat} ')')? @{syntax thmrefs}
wenzelm@43365
    78
    ;
wenzelm@43365
    79
    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
wenzelm@55112
    80
  \<close>}
wenzelm@26782
    81
wenzelm@28760
    82
  \begin{description}
wenzelm@26782
    83
  
wenzelm@28760
    84
  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
wenzelm@28760
    85
  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
wenzelm@28760
    86
  all goals; any chained facts provided are inserted into the goal and
wenzelm@28760
    87
  subject to rewriting as well.
wenzelm@26782
    88
wenzelm@28760
    89
  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
wenzelm@28760
    90
  into all goals of the proof state.  Note that current facts
wenzelm@28760
    91
  indicated for forward chaining are ignored.
wenzelm@26782
    92
wenzelm@30397
    93
  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
wenzelm@30397
    94
  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
wenzelm@30397
    95
  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
wenzelm@30397
    96
  method (see \secref{sec:pure-meth-att}), but apply rules by
wenzelm@30397
    97
  elim-resolution, destruct-resolution, and forward-resolution,
wenzelm@30397
    98
  respectively \cite{isabelle-implementation}.  The optional natural
wenzelm@30397
    99
  number argument (default 0) specifies additional assumption steps to
wenzelm@30397
   100
  be performed here.
wenzelm@26782
   101
wenzelm@26782
   102
  Note that these methods are improper ones, mainly serving for
wenzelm@26782
   103
  experimentation and tactic script emulation.  Different modes of
wenzelm@26782
   104
  basic rule application are usually expressed in Isar at the proof
wenzelm@26782
   105
  language level, rather than via implicit proof state manipulations.
wenzelm@26782
   106
  For example, a proper single-step elimination would be done using
wenzelm@26782
   107
  the plain @{method rule} method, with forward chaining of current
wenzelm@26782
   108
  facts.
wenzelm@26782
   109
wenzelm@43365
   110
  \item @{method intro} and @{method elim} repeatedly refine some goal
wenzelm@43365
   111
  by intro- or elim-resolution, after having inserted any chained
wenzelm@43365
   112
  facts.  Exactly the rules given as arguments are taken into account;
wenzelm@43365
   113
  this allows fine-tuned decomposition of a proof problem, in contrast
wenzelm@43365
   114
  to common automated tools.
wenzelm@43365
   115
wenzelm@28760
   116
  \item @{method succeed} yields a single (unchanged) result; it is
wenzelm@26782
   117
  the identity of the ``@{text ","}'' method combinator (cf.\
wenzelm@28754
   118
  \secref{sec:proof-meth}).
wenzelm@26782
   119
wenzelm@28760
   120
  \item @{method fail} yields an empty result sequence; it is the
wenzelm@26782
   121
  identity of the ``@{text "|"}'' method combinator (cf.\
wenzelm@28754
   122
  \secref{sec:proof-meth}).
wenzelm@26782
   123
wenzelm@28760
   124
  \end{description}
wenzelm@26782
   125
wenzelm@26782
   126
  \begin{matharray}{rcl}
wenzelm@28761
   127
    @{attribute_def tagged} & : & @{text attribute} \\
wenzelm@28761
   128
    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
wenzelm@28761
   129
    @{attribute_def THEN} & : & @{text attribute} \\
wenzelm@28761
   130
    @{attribute_def unfolded} & : & @{text attribute} \\
wenzelm@47497
   131
    @{attribute_def folded} & : & @{text attribute} \\
wenzelm@47497
   132
    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
wenzelm@28761
   133
    @{attribute_def rotated} & : & @{text attribute} \\
wenzelm@28761
   134
    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
wenzelm@28761
   135
    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
wenzelm@26782
   136
  \end{matharray}
wenzelm@26782
   137
wenzelm@55112
   138
  @{rail \<open>
wenzelm@42596
   139
    @@{attribute tagged} @{syntax name} @{syntax name}
wenzelm@26782
   140
    ;
wenzelm@42596
   141
    @@{attribute untagged} @{syntax name}
wenzelm@26782
   142
    ;
wenzelm@48205
   143
    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
wenzelm@26782
   144
    ;
wenzelm@42596
   145
    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
wenzelm@26782
   146
    ;
wenzelm@42596
   147
    @@{attribute rotated} @{syntax int}?
wenzelm@55112
   148
  \<close>}
wenzelm@26782
   149
wenzelm@28760
   150
  \begin{description}
wenzelm@26782
   151
wenzelm@28764
   152
  \item @{attribute tagged}~@{text "name value"} and @{attribute
wenzelm@28760
   153
  untagged}~@{text name} add and remove \emph{tags} of some theorem.
wenzelm@26782
   154
  Tags may be any list of string pairs that serve as formal comment.
wenzelm@28764
   155
  The first string is considered the tag name, the second its value.
wenzelm@28764
   156
  Note that @{attribute untagged} removes any tags of the same name.
wenzelm@26782
   157
wenzelm@48205
   158
  \item @{attribute THEN}~@{text a} composes rules by resolution; it
wenzelm@48205
   159
  resolves with the first premise of @{text a} (an alternative
wenzelm@48205
   160
  position may be also specified).  See also @{ML_op "RS"} in
wenzelm@48205
   161
  \cite{isabelle-implementation}.
wenzelm@26782
   162
  
wenzelm@28760
   163
  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
wenzelm@28760
   164
  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
wenzelm@28760
   165
  definitions throughout a rule.
wenzelm@26782
   166
wenzelm@47497
   167
  \item @{attribute abs_def} turns an equation of the form @{prop "f x
wenzelm@47497
   168
  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
wenzelm@47497
   169
  simp} or @{method unfold} steps always expand it.  This also works
wenzelm@47497
   170
  for object-logic equality.
wenzelm@47497
   171
wenzelm@28760
   172
  \item @{attribute rotated}~@{text n} rotate the premises of a
wenzelm@26782
   173
  theorem by @{text n} (default 1).
wenzelm@26782
   174
wenzelm@28760
   175
  \item @{attribute (Pure) elim_format} turns a destruction rule into
wenzelm@28760
   176
  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
wenzelm@28760
   177
  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
wenzelm@26782
   178
  
wenzelm@26782
   179
  Note that the Classical Reasoner (\secref{sec:classical}) provides
wenzelm@26782
   180
  its own version of this operation.
wenzelm@26782
   181
wenzelm@28760
   182
  \item @{attribute no_vars} replaces schematic variables by free
wenzelm@26782
   183
  ones; this is mainly for tuning output of pretty printed theorems.
wenzelm@26782
   184
wenzelm@28760
   185
  \end{description}
wenzelm@26782
   186
*}
wenzelm@26782
   187
wenzelm@26782
   188
wenzelm@27044
   189
subsection {* Low-level equational reasoning *}
wenzelm@27044
   190
wenzelm@27044
   191
text {*
wenzelm@27044
   192
  \begin{matharray}{rcl}
wenzelm@28761
   193
    @{method_def subst} & : & @{text method} \\
wenzelm@28761
   194
    @{method_def hypsubst} & : & @{text method} \\
wenzelm@28761
   195
    @{method_def split} & : & @{text method} \\
wenzelm@27044
   196
  \end{matharray}
wenzelm@27044
   197
wenzelm@55112
   198
  @{rail \<open>
wenzelm@55029
   199
    @@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thmref}
wenzelm@27044
   200
    ;
wenzelm@44094
   201
    @@{method split} @{syntax thmrefs}
wenzelm@55112
   202
  \<close>}
wenzelm@27044
   203
wenzelm@27044
   204
  These methods provide low-level facilities for equational reasoning
wenzelm@27044
   205
  that are intended for specialized applications only.  Normally,
wenzelm@27044
   206
  single step calculations would be performed in a structured text
wenzelm@27044
   207
  (see also \secref{sec:calculation}), while the Simplifier methods
wenzelm@27044
   208
  provide the canonical way for automated normalization (see
wenzelm@27044
   209
  \secref{sec:simplifier}).
wenzelm@27044
   210
wenzelm@28760
   211
  \begin{description}
wenzelm@27044
   212
wenzelm@28760
   213
  \item @{method subst}~@{text eq} performs a single substitution step
wenzelm@28760
   214
  using rule @{text eq}, which may be either a meta or object
wenzelm@27044
   215
  equality.
wenzelm@27044
   216
wenzelm@28760
   217
  \item @{method subst}~@{text "(asm) eq"} substitutes in an
wenzelm@27044
   218
  assumption.
wenzelm@27044
   219
wenzelm@28760
   220
  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
wenzelm@27044
   221
  substitutions in the conclusion. The numbers @{text i} to @{text j}
wenzelm@27044
   222
  indicate the positions to substitute at.  Positions are ordered from
wenzelm@27044
   223
  the top of the term tree moving down from left to right. For
wenzelm@27044
   224
  example, in @{text "(a + b) + (c + d)"} there are three positions
wenzelm@28760
   225
  where commutativity of @{text "+"} is applicable: 1 refers to @{text
wenzelm@28760
   226
  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
wenzelm@27044
   227
wenzelm@27044
   228
  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
wenzelm@27044
   229
  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
wenzelm@27044
   230
  assume all substitutions are performed simultaneously.  Otherwise
wenzelm@27044
   231
  the behaviour of @{text subst} is not specified.
wenzelm@27044
   232
wenzelm@28760
   233
  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
wenzelm@27071
   234
  substitutions in the assumptions. The positions refer to the
wenzelm@27071
   235
  assumptions in order from left to right.  For example, given in a
wenzelm@27071
   236
  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
wenzelm@27071
   237
  commutativity of @{text "+"} is the subterm @{text "a + b"} and
wenzelm@27071
   238
  position 2 is the subterm @{text "c + d"}.
wenzelm@27044
   239
wenzelm@28760
   240
  \item @{method hypsubst} performs substitution using some
wenzelm@27044
   241
  assumption; this only works for equations of the form @{text "x =
wenzelm@27044
   242
  t"} where @{text x} is a free or bound variable.
wenzelm@27044
   243
wenzelm@28760
   244
  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
wenzelm@44094
   245
  splitting using the given rules.  Splitting is performed in the
wenzelm@44094
   246
  conclusion or some assumption of the subgoal, depending of the
wenzelm@44094
   247
  structure of the rule.
wenzelm@27044
   248
  
wenzelm@27044
   249
  Note that the @{method simp} method already involves repeated
wenzelm@44094
   250
  application of split rules as declared in the current context, using
wenzelm@44094
   251
  @{attribute split}, for example.
wenzelm@27044
   252
wenzelm@28760
   253
  \end{description}
wenzelm@27044
   254
*}
wenzelm@27044
   255
wenzelm@27044
   256
wenzelm@26782
   257
subsection {* Further tactic emulations \label{sec:tactics} *}
wenzelm@26782
   258
wenzelm@26782
   259
text {*
wenzelm@26782
   260
  The following improper proof methods emulate traditional tactics.
wenzelm@26782
   261
  These admit direct access to the goal state, which is normally
wenzelm@26782
   262
  considered harmful!  In particular, this may involve both numbered
wenzelm@26782
   263
  goal addressing (default 1), and dynamic instantiation within the
wenzelm@26782
   264
  scope of some subgoal.
wenzelm@26782
   265
wenzelm@26782
   266
  \begin{warn}
wenzelm@26782
   267
    Dynamic instantiations refer to universally quantified parameters
wenzelm@26782
   268
    of a subgoal (the dynamic context) rather than fixed variables and
wenzelm@26782
   269
    term abbreviations of a (static) Isar context.
wenzelm@26782
   270
  \end{warn}
wenzelm@26782
   271
wenzelm@26782
   272
  Tactic emulation methods, unlike their ML counterparts, admit
wenzelm@26782
   273
  simultaneous instantiation from both dynamic and static contexts.
wenzelm@26782
   274
  If names occur in both contexts goal parameters hide locally fixed
wenzelm@26782
   275
  variables.  Likewise, schematic variables refer to term
wenzelm@26782
   276
  abbreviations, if present in the static context.  Otherwise the
wenzelm@26782
   277
  schematic variable is interpreted as a schematic variable and left
wenzelm@26782
   278
  to be solved by unification with certain parts of the subgoal.
wenzelm@26782
   279
wenzelm@26782
   280
  Note that the tactic emulation proof methods in Isabelle/Isar are
wenzelm@26782
   281
  consistently named @{text foo_tac}.  Note also that variable names
wenzelm@26782
   282
  occurring on left hand sides of instantiations must be preceded by a
wenzelm@26782
   283
  question mark if they coincide with a keyword or contain dots.  This
wenzelm@26782
   284
  is consistent with the attribute @{attribute "where"} (see
wenzelm@26782
   285
  \secref{sec:pure-meth-att}).
wenzelm@26782
   286
wenzelm@26782
   287
  \begin{matharray}{rcl}
wenzelm@28761
   288
    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   289
    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   290
    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   291
    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   292
    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   293
    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   294
    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   295
    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   296
    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   297
    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@28761
   298
    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
wenzelm@26782
   299
  \end{matharray}
wenzelm@26782
   300
wenzelm@55112
   301
  @{rail \<open>
wenzelm@42596
   302
    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
wenzelm@55029
   303
      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \<newline>
wenzelm@42617
   304
    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
wenzelm@26782
   305
    ;
wenzelm@42705
   306
    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
wenzelm@42596
   307
    ;
wenzelm@42705
   308
    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
wenzelm@26782
   309
    ;
wenzelm@42705
   310
    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
wenzelm@26782
   311
    ;
wenzelm@42596
   312
    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
wenzelm@26782
   313
    ;
wenzelm@26782
   314
wenzelm@42617
   315
    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
wenzelm@55112
   316
  \<close>}
wenzelm@26782
   317
wenzelm@28760
   318
\begin{description}
wenzelm@26782
   319
wenzelm@28760
   320
  \item @{method rule_tac} etc. do resolution of rules with explicit
wenzelm@26782
   321
  instantiation.  This works the same way as the ML tactics @{ML
wenzelm@30397
   322
  res_inst_tac} etc. (see \cite{isabelle-implementation})
wenzelm@26782
   323
wenzelm@26782
   324
  Multiple rules may be only given if there is no instantiation; then
wenzelm@26782
   325
  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
wenzelm@30397
   326
  \cite{isabelle-implementation}).
wenzelm@26782
   327
wenzelm@28760
   328
  \item @{method cut_tac} inserts facts into the proof state as
wenzelm@46706
   329
  assumption of a subgoal; instantiations may be given as well.  Note
wenzelm@46706
   330
  that the scope of schematic variables is spread over the main goal
wenzelm@46706
   331
  statement and rule premises are turned into new subgoals.  This is
wenzelm@46706
   332
  in contrast to the regular method @{method insert} which inserts
wenzelm@46706
   333
  closed rule statements.
wenzelm@26782
   334
wenzelm@46277
   335
  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
wenzelm@46277
   336
  from a subgoal.  Note that @{text \<phi>} may contain schematic
wenzelm@46277
   337
  variables, to abbreviate the intended proposition; the first
wenzelm@46277
   338
  matching subgoal premise will be deleted.  Removing useless premises
wenzelm@46277
   339
  from a subgoal increases its readability and can make search tactics
wenzelm@46277
   340
  run faster.
wenzelm@28760
   341
wenzelm@46271
   342
  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
wenzelm@46271
   343
  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
wenzelm@46271
   344
  as new subgoals (in the original context).
wenzelm@26782
   345
wenzelm@28760
   346
  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
wenzelm@28760
   347
  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
wenzelm@28760
   348
  \emph{suffix} of variables.
wenzelm@26782
   349
wenzelm@46274
   350
  \item @{method rotate_tac}~@{text n} rotates the premises of a
wenzelm@46274
   351
  subgoal by @{text n} positions: from right to left if @{text n} is
wenzelm@26782
   352
  positive, and from left to right if @{text n} is negative; the
wenzelm@46274
   353
  default value is 1.
wenzelm@26782
   354
wenzelm@28760
   355
  \item @{method tactic}~@{text "text"} produces a proof method from
wenzelm@26782
   356
  any ML text of type @{ML_type tactic}.  Apart from the usual ML
wenzelm@27223
   357
  environment and the current proof context, the ML code may refer to
wenzelm@27223
   358
  the locally bound values @{ML_text facts}, which indicates any
wenzelm@27223
   359
  current facts used for forward-chaining.
wenzelm@26782
   360
wenzelm@28760
   361
  \item @{method raw_tactic} is similar to @{method tactic}, but
wenzelm@27223
   362
  presents the goal state in its raw internal form, where simultaneous
wenzelm@27223
   363
  subgoals appear as conjunction of the logical framework instead of
wenzelm@27223
   364
  the usual split into several subgoals.  While feature this is useful
wenzelm@27223
   365
  for debugging of complex method definitions, it should not never
wenzelm@27223
   366
  appear in production theories.
wenzelm@26782
   367
wenzelm@28760
   368
  \end{description}
wenzelm@26782
   369
*}
wenzelm@26782
   370
wenzelm@26782
   371
wenzelm@27040
   372
section {* The Simplifier \label{sec:simplifier} *}
wenzelm@26782
   373
wenzelm@50063
   374
text {* The Simplifier performs conditional and unconditional
wenzelm@50063
   375
  rewriting and uses contextual information: rule declarations in the
wenzelm@50063
   376
  background theory or local proof context are taken into account, as
wenzelm@50063
   377
  well as chained facts and subgoal premises (``local assumptions'').
wenzelm@50063
   378
  There are several general hooks that allow to modify the
wenzelm@50063
   379
  simplification strategy, or incorporate other proof tools that solve
wenzelm@50063
   380
  sub-problems, produce rewrite rules on demand etc.
wenzelm@50063
   381
wenzelm@50075
   382
  The rewriting strategy is always strictly bottom up, except for
wenzelm@50075
   383
  congruence rules, which are applied while descending into a term.
wenzelm@50075
   384
  Conditions in conditional rewrite rules are solved recursively
wenzelm@50075
   385
  before the rewrite rule is applied.
wenzelm@50075
   386
wenzelm@50063
   387
  The default Simplifier setup of major object logics (HOL, HOLCF,
wenzelm@50063
   388
  FOL, ZF) makes the Simplifier ready for immediate use, without
wenzelm@50063
   389
  engaging into the internal structures.  Thus it serves as
wenzelm@50063
   390
  general-purpose proof tool with the main focus on equational
wenzelm@50075
   391
  reasoning, and a bit more than that.
wenzelm@50075
   392
*}
wenzelm@50063
   393
wenzelm@50063
   394
wenzelm@50063
   395
subsection {* Simplification methods \label{sec:simp-meth} *}
wenzelm@26782
   396
wenzelm@26782
   397
text {*
wenzelm@57591
   398
  \begin{tabular}{rcll}
wenzelm@28761
   399
    @{method_def simp} & : & @{text method} \\
wenzelm@28761
   400
    @{method_def simp_all} & : & @{text method} \\
wenzelm@57591
   401
    @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
wenzelm@57591
   402
  \end{tabular}
wenzelm@57591
   403
  \medskip
wenzelm@26782
   404
wenzelm@55112
   405
  @{rail \<open>
wenzelm@42596
   406
    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
wenzelm@26782
   407
    ;
wenzelm@26782
   408
wenzelm@40255
   409
    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
wenzelm@26782
   410
    ;
wenzelm@50063
   411
    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | 'add' | 'del') |
wenzelm@50063
   412
      'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
wenzelm@55112
   413
  \<close>}
wenzelm@26782
   414
wenzelm@28760
   415
  \begin{description}
wenzelm@26782
   416
wenzelm@50063
   417
  \item @{method simp} invokes the Simplifier on the first subgoal,
wenzelm@50063
   418
  after inserting chained facts as additional goal premises; further
wenzelm@50063
   419
  rule declarations may be included via @{text "(simp add: facts)"}.
wenzelm@50063
   420
  The proof method fails if the subgoal remains unchanged after
wenzelm@50063
   421
  simplification.
wenzelm@26782
   422
wenzelm@50063
   423
  Note that the original goal premises and chained facts are subject
wenzelm@50063
   424
  to simplification themselves, while declarations via @{text
wenzelm@50063
   425
  "add"}/@{text "del"} merely follow the policies of the object-logic
wenzelm@50063
   426
  to extract rewrite rules from theorems, without further
wenzelm@50063
   427
  simplification.  This may lead to slightly different behavior in
wenzelm@50063
   428
  either case, which might be required precisely like that in some
wenzelm@50063
   429
  boundary situations to perform the intended simplification step!
wenzelm@50063
   430
wenzelm@50063
   431
  \medskip The @{text only} modifier first removes all other rewrite
wenzelm@50063
   432
  rules, looper tactics (including split rules), congruence rules, and
wenzelm@50063
   433
  then behaves like @{text add}.  Implicit solvers remain, which means
wenzelm@50063
   434
  that trivial rules like reflexivity or introduction of @{text
wenzelm@50063
   435
  "True"} are available to solve the simplified subgoals, but also
wenzelm@50063
   436
  non-trivial tools like linear arithmetic in HOL.  The latter may
wenzelm@50063
   437
  lead to some surprise of the meaning of ``only'' in Isabelle/HOL
wenzelm@50063
   438
  compared to English!
wenzelm@26782
   439
wenzelm@42596
   440
  \medskip The @{text split} modifiers add or delete rules for the
wenzelm@50079
   441
  Splitter (see also \secref{sec:simp-strategies} on the looper).
wenzelm@26782
   442
  This works only if the Simplifier method has been properly setup to
wenzelm@26782
   443
  include the Splitter (all major object logics such HOL, HOLCF, FOL,
wenzelm@26782
   444
  ZF do this already).
wenzelm@26782
   445
wenzelm@50065
   446
  There is also a separate @{method_ref split} method available for
wenzelm@50065
   447
  single-step case splitting.  The effect of repeatedly applying
wenzelm@50065
   448
  @{text "(split thms)"} can be imitated by ``@{text "(simp only:
wenzelm@50065
   449
  split: thms)"}''.
wenzelm@50065
   450
wenzelm@50063
   451
  \medskip The @{text cong} modifiers add or delete Simplifier
wenzelm@50063
   452
  congruence rules (see also \secref{sec:simp-rules}); the default is
wenzelm@50063
   453
  to add.
wenzelm@50063
   454
wenzelm@28760
   455
  \item @{method simp_all} is similar to @{method simp}, but acts on
wenzelm@50063
   456
  all goals, working backwards from the last to the first one as usual
wenzelm@50063
   457
  in Isabelle.\footnote{The order is irrelevant for goals without
wenzelm@50063
   458
  schematic variables, so simplification might actually be performed
wenzelm@50063
   459
  in parallel here.}
wenzelm@50063
   460
wenzelm@50063
   461
  Chained facts are inserted into all subgoals, before the
wenzelm@50063
   462
  simplification process starts.  Further rule declarations are the
wenzelm@50063
   463
  same as for @{method simp}.
wenzelm@50063
   464
wenzelm@50063
   465
  The proof method fails if all subgoals remain unchanged after
wenzelm@50063
   466
  simplification.
wenzelm@26782
   467
wenzelm@57591
   468
  \item @{attribute simp_depth_limit} limits the number of recursive
wenzelm@57591
   469
  invocations of the Simplifier during conditional rewriting.
wenzelm@57591
   470
wenzelm@28760
   471
  \end{description}
wenzelm@26782
   472
wenzelm@50063
   473
  By default the Simplifier methods above take local assumptions fully
wenzelm@50063
   474
  into account, using equational assumptions in the subsequent
wenzelm@50063
   475
  normalization process, or simplifying assumptions themselves.
wenzelm@50063
   476
  Further options allow to fine-tune the behavior of the Simplifier
wenzelm@50063
   477
  in this respect, corresponding to a variety of ML tactics as
wenzelm@50063
   478
  follows.\footnote{Unlike the corresponding Isar proof methods, the
wenzelm@50063
   479
  ML tactics do not insist in changing the goal state.}
wenzelm@50063
   480
wenzelm@50063
   481
  \begin{center}
wenzelm@50063
   482
  \small
wenzelm@50065
   483
  \begin{supertabular}{|l|l|p{0.3\textwidth}|}
wenzelm@50063
   484
  \hline
wenzelm@50063
   485
  Isar method & ML tactic & behavior \\\hline
wenzelm@50063
   486
wenzelm@50063
   487
  @{text "(simp (no_asm))"} & @{ML simp_tac} & assumptions are ignored
wenzelm@50063
   488
  completely \\\hline
wenzelm@26782
   489
wenzelm@50063
   490
  @{text "(simp (no_asm_simp))"} & @{ML asm_simp_tac} & assumptions
wenzelm@50063
   491
  are used in the simplification of the conclusion but are not
wenzelm@50063
   492
  themselves simplified \\\hline
wenzelm@50063
   493
wenzelm@50063
   494
  @{text "(simp (no_asm_use))"} & @{ML full_simp_tac} & assumptions
wenzelm@50063
   495
  are simplified but are not used in the simplification of each other
wenzelm@50063
   496
  or the conclusion \\\hline
wenzelm@26782
   497
wenzelm@50063
   498
  @{text "(simp)"} & @{ML asm_full_simp_tac} & assumptions are used in
wenzelm@50063
   499
  the simplification of the conclusion and to simplify other
wenzelm@50063
   500
  assumptions \\\hline
wenzelm@50063
   501
wenzelm@50063
   502
  @{text "(simp (asm_lr))"} & @{ML asm_lr_simp_tac} & compatibility
wenzelm@50063
   503
  mode: an assumption is only used for simplifying assumptions which
wenzelm@50063
   504
  are to the right of it \\\hline
wenzelm@50063
   505
wenzelm@50065
   506
  \end{supertabular}
wenzelm@50063
   507
  \end{center}
wenzelm@26782
   508
*}
wenzelm@26782
   509
wenzelm@26782
   510
wenzelm@50064
   511
subsubsection {* Examples *}
wenzelm@50064
   512
wenzelm@50064
   513
text {* We consider basic algebraic simplifications in Isabelle/HOL.
wenzelm@50064
   514
  The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
wenzelm@50064
   515
  a good candidate to be solved by a single call of @{method simp}:
wenzelm@50064
   516
*}
wenzelm@50064
   517
wenzelm@50064
   518
lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
wenzelm@50064
   519
wenzelm@50064
   520
text {* The above attempt \emph{fails}, because @{term "0"} and @{term
wenzelm@50064
   521
  "op +"} in the HOL library are declared as generic type class
wenzelm@50064
   522
  operations, without stating any algebraic laws yet.  More specific
wenzelm@50064
   523
  types are required to get access to certain standard simplifications
wenzelm@50064
   524
  of the theory context, e.g.\ like this: *}
wenzelm@50064
   525
wenzelm@50064
   526
lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
wenzelm@50064
   527
lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
wenzelm@50064
   528
lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
wenzelm@50064
   529
wenzelm@50064
   530
text {*
wenzelm@50064
   531
  \medskip In many cases, assumptions of a subgoal are also needed in
wenzelm@50064
   532
  the simplification process.  For example:
wenzelm@50064
   533
*}
wenzelm@50064
   534
wenzelm@50064
   535
lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
wenzelm@50064
   536
lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
wenzelm@50064
   537
lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
wenzelm@50064
   538
wenzelm@50064
   539
text {* As seen above, local assumptions that shall contribute to
wenzelm@50064
   540
  simplification need to be part of the subgoal already, or indicated
wenzelm@50064
   541
  explicitly for use by the subsequent method invocation.  Both too
wenzelm@50064
   542
  little or too much information can make simplification fail, for
wenzelm@50064
   543
  different reasons.
wenzelm@50064
   544
wenzelm@50064
   545
  In the next example the malicious assumption @{prop "\<And>x::nat. f x =
wenzelm@50064
   546
  g (f (g x))"} does not contribute to solve the problem, but makes
wenzelm@50064
   547
  the default @{method simp} method loop: the rewrite rule @{text "f
wenzelm@50064
   548
  ?x \<equiv> g (f (g ?x))"} extracted from the assumption does not
wenzelm@50064
   549
  terminate.  The Simplifier notices certain simple forms of
wenzelm@50064
   550
  nontermination, but not this one.  The problem can be solved
wenzelm@50064
   551
  nonetheless, by ignoring assumptions via special options as
wenzelm@50064
   552
  explained before:
wenzelm@50064
   553
*}
wenzelm@50064
   554
wenzelm@50064
   555
lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
wenzelm@50064
   556
  by (simp (no_asm))
wenzelm@50064
   557
wenzelm@50064
   558
text {* The latter form is typical for long unstructured proof
wenzelm@50064
   559
  scripts, where the control over the goal content is limited.  In
wenzelm@50064
   560
  structured proofs it is usually better to avoid pushing too many
wenzelm@50064
   561
  facts into the goal state in the first place.  Assumptions in the
wenzelm@50064
   562
  Isar proof context do not intrude the reasoning if not used
wenzelm@50064
   563
  explicitly.  This is illustrated for a toplevel statement and a
wenzelm@50064
   564
  local proof body as follows:
wenzelm@50064
   565
*}
wenzelm@50064
   566
wenzelm@50064
   567
lemma
wenzelm@50064
   568
  assumes "\<And>x::nat. f x = g (f (g x))"
wenzelm@50064
   569
  shows "f 0 = f 0 + 0" by simp
wenzelm@50064
   570
wenzelm@50064
   571
notepad
wenzelm@50064
   572
begin
wenzelm@50064
   573
  assume "\<And>x::nat. f x = g (f (g x))"
wenzelm@50064
   574
  have "f 0 = f 0 + 0" by simp
wenzelm@50064
   575
end
wenzelm@50064
   576
wenzelm@50064
   577
text {* \medskip Because assumptions may simplify each other, there
wenzelm@50064
   578
  can be very subtle cases of nontermination. For example, the regular
wenzelm@50064
   579
  @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
wenzelm@50064
   580
  \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
wenzelm@50064
   581
  \[
wenzelm@50064
   582
  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto}
wenzelm@50064
   583
  @{text "P (f y)"} \stackrel{@{text "y \<equiv> x"}}{\longmapsto}
wenzelm@50064
   584
  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto} \cdots
wenzelm@50064
   585
  \]
wenzelm@50064
   586
  whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
wenzelm@50064
   587
  Q"} terminates (without solving the goal):
wenzelm@50064
   588
*}
wenzelm@50064
   589
wenzelm@50064
   590
lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
wenzelm@50064
   591
  apply simp
wenzelm@50064
   592
  oops
wenzelm@50064
   593
wenzelm@57591
   594
text {* See also \secref{sec:simp-trace} for options to enable
wenzelm@50064
   595
  Simplifier trace mode, which often helps to diagnose problems with
wenzelm@50064
   596
  rewrite systems.
wenzelm@50064
   597
*}
wenzelm@50064
   598
wenzelm@50064
   599
wenzelm@50063
   600
subsection {* Declaring rules \label{sec:simp-rules} *}
wenzelm@26782
   601
wenzelm@26782
   602
text {*
wenzelm@26782
   603
  \begin{matharray}{rcl}
wenzelm@28761
   604
    @{attribute_def simp} & : & @{text attribute} \\
wenzelm@28761
   605
    @{attribute_def split} & : & @{text attribute} \\
wenzelm@50063
   606
    @{attribute_def cong} & : & @{text attribute} \\
wenzelm@50077
   607
    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
wenzelm@26782
   608
  \end{matharray}
wenzelm@26782
   609
wenzelm@55112
   610
  @{rail \<open>
wenzelm@50063
   611
    (@@{attribute simp} | @@{attribute split} | @@{attribute cong})
wenzelm@50063
   612
      (() | 'add' | 'del')
wenzelm@55112
   613
  \<close>}
wenzelm@26782
   614
wenzelm@28760
   615
  \begin{description}
wenzelm@26782
   616
wenzelm@50076
   617
  \item @{attribute simp} declares rewrite rules, by adding or
wenzelm@50065
   618
  deleting them from the simpset within the theory or proof context.
wenzelm@50076
   619
  Rewrite rules are theorems expressing some form of equality, for
wenzelm@50076
   620
  example:
wenzelm@50076
   621
wenzelm@50076
   622
  @{text "Suc ?m + ?n = ?m + Suc ?n"} \\
wenzelm@50076
   623
  @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
wenzelm@50076
   624
  @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
wenzelm@50076
   625
wenzelm@50076
   626
  \smallskip
wenzelm@50076
   627
  Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
wenzelm@50076
   628
  also permitted; the conditions can be arbitrary formulas.
wenzelm@50076
   629
wenzelm@50076
   630
  \medskip Internally, all rewrite rules are translated into Pure
wenzelm@50076
   631
  equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
wenzelm@50076
   632
  simpset contains a function for extracting equalities from arbitrary
wenzelm@50076
   633
  theorems, which is usually installed when the object-logic is
wenzelm@50076
   634
  configured initially. For example, @{text "\<not> ?x \<in> {}"} could be
wenzelm@50076
   635
  turned into @{text "?x \<in> {} \<equiv> False"}. Theorems that are declared as
wenzelm@50076
   636
  @{attribute simp} and local assumptions within a goal are treated
wenzelm@50076
   637
  uniformly in this respect.
wenzelm@50076
   638
wenzelm@50076
   639
  The Simplifier accepts the following formats for the @{text "lhs"}
wenzelm@50076
   640
  term:
wenzelm@50076
   641
wenzelm@50076
   642
  \begin{enumerate}
wenzelm@50065
   643
wenzelm@50076
   644
  \item First-order patterns, considering the sublanguage of
wenzelm@50076
   645
  application of constant operators to variable operands, without
wenzelm@50076
   646
  @{text "\<lambda>"}-abstractions or functional variables.
wenzelm@50076
   647
  For example:
wenzelm@50076
   648
wenzelm@50076
   649
  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
wenzelm@50076
   650
  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
wenzelm@50076
   651
wenzelm@50076
   652
  \item Higher-order patterns in the sense of \cite{nipkow-patterns}.
wenzelm@50076
   653
  These are terms in @{text "\<beta>"}-normal form (this will always be the
wenzelm@50076
   654
  case unless you have done something strange) where each occurrence
wenzelm@50076
   655
  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
wenzelm@50076
   656
  @{text "x\<^sub>i"} are distinct bound variables.
wenzelm@50076
   657
wenzelm@50076
   658
  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
wenzelm@50076
   659
  or its symmetric form, since the @{text "rhs"} is also a
wenzelm@50076
   660
  higher-order pattern.
wenzelm@50076
   661
wenzelm@50076
   662
  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
wenzelm@50076
   663
  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
wenzelm@50076
   664
  variables are treated like quasi-constant term material.
wenzelm@50076
   665
wenzelm@50076
   666
  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
wenzelm@50076
   667
  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
wenzelm@50076
   668
  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
wenzelm@50076
   669
  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
wenzelm@50076
   670
  be replaced by adding new variables and conditions like this: @{text
wenzelm@50076
   671
  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
wenzelm@50076
   672
  rewrite rule of the second category since conditions can be
wenzelm@50076
   673
  arbitrary terms.
wenzelm@50076
   674
wenzelm@50076
   675
  \end{enumerate}
wenzelm@26782
   676
wenzelm@28760
   677
  \item @{attribute split} declares case split rules.
wenzelm@26782
   678
wenzelm@45645
   679
  \item @{attribute cong} declares congruence rules to the Simplifier
wenzelm@45645
   680
  context.
wenzelm@45645
   681
wenzelm@45645
   682
  Congruence rules are equalities of the form @{text [display]
wenzelm@45645
   683
  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
wenzelm@45645
   684
wenzelm@45645
   685
  This controls the simplification of the arguments of @{text f}.  For
wenzelm@45645
   686
  example, some arguments can be simplified under additional
wenzelm@45645
   687
  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
wenzelm@45645
   688
  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
wenzelm@45645
   689
wenzelm@56594
   690
  Given this rule, the Simplifier assumes @{text "?Q\<^sub>1"} and extracts
wenzelm@45645
   691
  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
wenzelm@45645
   692
  assumptions are effective for rewriting formulae such as @{text "x =
wenzelm@45645
   693
  0 \<longrightarrow> y + x = y"}.
wenzelm@45645
   694
wenzelm@45645
   695
  %FIXME
wenzelm@45645
   696
  %The local assumptions are also provided as theorems to the solver;
wenzelm@45645
   697
  %see \secref{sec:simp-solver} below.
wenzelm@45645
   698
wenzelm@45645
   699
  \medskip The following congruence rule for bounded quantifiers also
wenzelm@45645
   700
  supplies contextual information --- about the bound variable:
wenzelm@45645
   701
  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
wenzelm@45645
   702
    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
wenzelm@45645
   703
wenzelm@45645
   704
  \medskip This congruence rule for conditional expressions can
wenzelm@45645
   705
  supply contextual information for simplifying the arms:
wenzelm@45645
   706
  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
wenzelm@45645
   707
    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
wenzelm@45645
   708
wenzelm@45645
   709
  A congruence rule can also \emph{prevent} simplification of some
wenzelm@45645
   710
  arguments.  Here is an alternative congruence rule for conditional
wenzelm@45645
   711
  expressions that conforms to non-strict functional evaluation:
wenzelm@45645
   712
  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
wenzelm@45645
   713
wenzelm@45645
   714
  Only the first argument is simplified; the others remain unchanged.
wenzelm@45645
   715
  This can make simplification much faster, but may require an extra
wenzelm@45645
   716
  case split over the condition @{text "?q"} to prove the goal.
wenzelm@50063
   717
wenzelm@50077
   718
  \item @{command "print_simpset"} prints the collection of rules
wenzelm@50077
   719
  declared to the Simplifier, which is also known as ``simpset''
wenzelm@50077
   720
  internally.
wenzelm@50077
   721
wenzelm@50077
   722
  For historical reasons, simpsets may occur independently from the
wenzelm@50077
   723
  current context, but are conceptually dependent on it.  When the
wenzelm@50077
   724
  Simplifier is invoked via one of its main entry points in the Isar
wenzelm@50077
   725
  source language (as proof method \secref{sec:simp-meth} or rule
wenzelm@50077
   726
  attribute \secref{sec:simp-meth}), its simpset is derived from the
wenzelm@50077
   727
  current proof context, and carries a back-reference to that for
wenzelm@50077
   728
  other tools that might get invoked internally (e.g.\ simplification
wenzelm@50077
   729
  procedures \secref{sec:simproc}).  A mismatch of the context of the
wenzelm@50077
   730
  simpset and the context of the problem being simplified may lead to
wenzelm@50077
   731
  unexpected results.
wenzelm@50077
   732
wenzelm@50063
   733
  \end{description}
wenzelm@50065
   734
wenzelm@50065
   735
  The implicit simpset of the theory context is propagated
wenzelm@50065
   736
  monotonically through the theory hierarchy: forming a new theory,
wenzelm@50065
   737
  the union of the simpsets of its imports are taken as starting
wenzelm@50065
   738
  point.  Also note that definitional packages like @{command
blanchet@58310
   739
  "datatype"}, @{command "primrec"}, @{command "fun"} routinely
wenzelm@50065
   740
  declare Simplifier rules to the target context, while plain
wenzelm@50065
   741
  @{command "definition"} is an exception in \emph{not} declaring
wenzelm@50065
   742
  anything.
wenzelm@50065
   743
wenzelm@50065
   744
  \medskip It is up the user to manipulate the current simpset further
wenzelm@50065
   745
  by explicitly adding or deleting theorems as simplification rules,
wenzelm@50065
   746
  or installing other tools via simplification procedures
wenzelm@50065
   747
  (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
wenzelm@50065
   748
  that obviously simplify, like @{text "?n + 0 \<equiv> ?n"} are good
wenzelm@50065
   749
  candidates for the implicit simpset, unless a special
wenzelm@50065
   750
  non-normalizing behavior of certain operations is intended.  More
wenzelm@50065
   751
  specific rules (such as distributive laws, which duplicate subterms)
wenzelm@50065
   752
  should be added only for specific proof steps.  Conversely,
wenzelm@50065
   753
  sometimes a rule needs to be deleted just for some part of a proof.
wenzelm@50065
   754
  The need of frequent additions or deletions may indicate a poorly
wenzelm@50065
   755
  designed simpset.
wenzelm@50065
   756
wenzelm@50065
   757
  \begin{warn}
wenzelm@50065
   758
  The union of simpsets from theory imports (as described above) is
wenzelm@50065
   759
  not always a good starting point for the new theory.  If some
wenzelm@50065
   760
  ancestors have deleted simplification rules because they are no
wenzelm@50065
   761
  longer wanted, while others have left those rules in, then the union
wenzelm@50065
   762
  will contain the unwanted rules, and thus have to be deleted again
wenzelm@50065
   763
  in the theory body.
wenzelm@50065
   764
  \end{warn}
wenzelm@45645
   765
*}
wenzelm@45645
   766
wenzelm@45645
   767
wenzelm@50080
   768
subsection {* Ordered rewriting with permutative rules *}
wenzelm@50080
   769
wenzelm@50080
   770
text {* A rewrite rule is \emph{permutative} if the left-hand side and
wenzelm@50080
   771
  right-hand side are the equal up to renaming of variables.  The most
wenzelm@50080
   772
  common permutative rule is commutativity: @{text "?x + ?y = ?y +
wenzelm@50080
   773
  ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
wenzelm@50080
   774
  ?y"} in arithmetic and @{text "insert ?x (insert ?y ?A) = insert ?y
wenzelm@50080
   775
  (insert ?x ?A)"} for sets.  Such rules are common enough to merit
wenzelm@50080
   776
  special attention.
wenzelm@50080
   777
wenzelm@50080
   778
  Because ordinary rewriting loops given such rules, the Simplifier
wenzelm@50080
   779
  employs a special strategy, called \emph{ordered rewriting}.
wenzelm@50080
   780
  Permutative rules are detected and only applied if the rewriting
wenzelm@50080
   781
  step decreases the redex wrt.\ a given term ordering.  For example,
wenzelm@50080
   782
  commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then
wenzelm@50080
   783
  stops, because the redex cannot be decreased further in the sense of
wenzelm@50080
   784
  the term ordering.
wenzelm@50080
   785
wenzelm@50080
   786
  The default is lexicographic ordering of term structure, but this
wenzelm@50080
   787
  could be also changed locally for special applications via
wenzelm@50080
   788
  @{index_ML Simplifier.set_termless} in Isabelle/ML.
wenzelm@50080
   789
wenzelm@50080
   790
  \medskip Permutative rewrite rules are declared to the Simplifier
wenzelm@50080
   791
  just like other rewrite rules.  Their special status is recognized
wenzelm@50080
   792
  automatically, and their application is guarded by the term ordering
wenzelm@50080
   793
  accordingly. *}
wenzelm@50080
   794
wenzelm@50080
   795
wenzelm@50080
   796
subsubsection {* Rewriting with AC operators *}
wenzelm@50080
   797
wenzelm@50080
   798
text {* Ordered rewriting is particularly effective in the case of
wenzelm@50080
   799
  associative-commutative operators.  (Associativity by itself is not
wenzelm@50080
   800
  permutative.)  When dealing with an AC-operator @{text "f"}, keep
wenzelm@50080
   801
  the following points in mind:
wenzelm@50080
   802
wenzelm@50080
   803
  \begin{itemize}
wenzelm@50080
   804
wenzelm@50080
   805
  \item The associative law must always be oriented from left to
wenzelm@50080
   806
  right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
wenzelm@50080
   807
  orientation, if used with commutativity, leads to looping in
wenzelm@50080
   808
  conjunction with the standard term order.
wenzelm@50080
   809
wenzelm@50080
   810
  \item To complete your set of rewrite rules, you must add not just
wenzelm@50080
   811
  associativity (A) and commutativity (C) but also a derived rule
wenzelm@50080
   812
  \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
wenzelm@50080
   813
wenzelm@50080
   814
  \end{itemize}
wenzelm@50080
   815
wenzelm@50080
   816
  Ordered rewriting with the combination of A, C, and LC sorts a term
wenzelm@50080
   817
  lexicographically --- the rewriting engine imitates bubble-sort.
wenzelm@50080
   818
*}
wenzelm@50080
   819
wenzelm@50080
   820
locale AC_example =
wenzelm@50080
   821
  fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
wenzelm@50080
   822
  assumes assoc: "(x \<bullet> y) \<bullet> z = x \<bullet> (y \<bullet> z)"
wenzelm@50080
   823
  assumes commute: "x \<bullet> y = y \<bullet> x"
wenzelm@50080
   824
begin
wenzelm@50080
   825
wenzelm@50080
   826
lemma left_commute: "x \<bullet> (y \<bullet> z) = y \<bullet> (x \<bullet> z)"
wenzelm@50080
   827
proof -
wenzelm@50080
   828
  have "(x \<bullet> y) \<bullet> z = (y \<bullet> x) \<bullet> z" by (simp only: commute)
wenzelm@50080
   829
  then show ?thesis by (simp only: assoc)
wenzelm@50080
   830
qed
wenzelm@50080
   831
wenzelm@50080
   832
lemmas AC_rules = assoc commute left_commute
wenzelm@50080
   833
wenzelm@50080
   834
text {* Thus the Simplifier is able to establish equalities with
wenzelm@50080
   835
  arbitrary permutations of subterms, by normalizing to a common
wenzelm@50080
   836
  standard form.  For example: *}
wenzelm@50080
   837
wenzelm@50080
   838
lemma "(b \<bullet> c) \<bullet> a = xxx"
wenzelm@50080
   839
  apply (simp only: AC_rules)
wenzelm@50080
   840
  txt {* @{subgoals} *}
wenzelm@50080
   841
  oops
wenzelm@50080
   842
wenzelm@50080
   843
lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
wenzelm@50080
   844
lemma "(b \<bullet> c) \<bullet> a = c \<bullet> (b \<bullet> a)" by (simp only: AC_rules)
wenzelm@50080
   845
lemma "(b \<bullet> c) \<bullet> a = (c \<bullet> b) \<bullet> a" by (simp only: AC_rules)
wenzelm@50080
   846
wenzelm@50080
   847
end
wenzelm@50080
   848
wenzelm@50080
   849
text {* Martin and Nipkow \cite{martin-nipkow} discuss the theory and
wenzelm@50080
   850
  give many examples; other algebraic structures are amenable to
wenzelm@56594
   851
  ordered rewriting, such as Boolean rings.  The Boyer-Moore theorem
wenzelm@50080
   852
  prover \cite{bm88book} also employs ordered rewriting.
wenzelm@50080
   853
*}
wenzelm@50080
   854
wenzelm@50080
   855
wenzelm@50080
   856
subsubsection {* Re-orienting equalities *}
wenzelm@50080
   857
wenzelm@50080
   858
text {* Another application of ordered rewriting uses the derived rule
wenzelm@50080
   859
  @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
wenzelm@50080
   860
  reverse equations.
wenzelm@50080
   861
wenzelm@50080
   862
  This is occasionally useful to re-orient local assumptions according
wenzelm@50080
   863
  to the term ordering, when other built-in mechanisms of
wenzelm@50080
   864
  reorientation and mutual simplification fail to apply.  *}
wenzelm@50080
   865
wenzelm@50080
   866
wenzelm@57591
   867
subsection {* Simplifier tracing and debugging \label{sec:simp-trace} *}
wenzelm@50063
   868
wenzelm@50063
   869
text {*
wenzelm@50063
   870
  \begin{tabular}{rcll}
wenzelm@50063
   871
    @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
wenzelm@50063
   872
    @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
wenzelm@50063
   873
    @{attribute_def simp_debug} & : & @{text attribute} & default @{text false} \\
wenzelm@57591
   874
    @{attribute_def simp_trace_new} & : & @{text attribute} \\
wenzelm@57591
   875
    @{attribute_def simp_break} & : & @{text attribute} \\
wenzelm@50063
   876
  \end{tabular}
wenzelm@50063
   877
  \medskip
wenzelm@50063
   878
wenzelm@57591
   879
  @{rail \<open>
wenzelm@57591
   880
    @@{attribute simp_trace_new} ('interactive')? \<newline>
wenzelm@57591
   881
      ('mode' '=' ('full' | 'normal'))? \<newline>
wenzelm@57591
   882
      ('depth' '=' @{syntax nat})?
wenzelm@57591
   883
    ;
wenzelm@57591
   884
wenzelm@57591
   885
    @@{attribute simp_break} (@{syntax term}*)
wenzelm@57591
   886
  \<close>}
wenzelm@57591
   887
wenzelm@57591
   888
  These attributes and configurations options control various aspects of
wenzelm@57591
   889
  Simplifier tracing and debugging.
wenzelm@50063
   890
wenzelm@50063
   891
  \begin{description}
wenzelm@50063
   892
wenzelm@50063
   893
  \item @{attribute simp_trace} makes the Simplifier output internal
wenzelm@50063
   894
  operations.  This includes rewrite steps, but also bookkeeping like
wenzelm@50063
   895
  modifications of the simpset.
wenzelm@50063
   896
wenzelm@50063
   897
  \item @{attribute simp_trace_depth_limit} limits the effect of
wenzelm@50063
   898
  @{attribute simp_trace} to the given depth of recursive Simplifier
wenzelm@50063
   899
  invocations (when solving conditions of rewrite rules).
wenzelm@50063
   900
wenzelm@50063
   901
  \item @{attribute simp_debug} makes the Simplifier output some extra
wenzelm@50063
   902
  information about internal operations.  This includes any attempted
wenzelm@50063
   903
  invocation of simplification procedures.
wenzelm@50063
   904
wenzelm@57591
   905
  \item @{attribute simp_trace_new} controls Simplifier tracing within
wenzelm@57591
   906
  Isabelle/PIDE applications, notably Isabelle/jEdit \cite{isabelle-jedit}.
wenzelm@57591
   907
  This provides a hierarchical representation of the rewriting steps
wenzelm@57591
   908
  performed by the Simplifier.
wenzelm@57591
   909
wenzelm@57591
   910
  Users can configure the behaviour by specifying breakpoints, verbosity and
wenzelm@57591
   911
  enabling or disabling the interactive mode. In normal verbosity (the
wenzelm@57591
   912
  default), only rule applications matching a breakpoint will be shown to
wenzelm@57591
   913
  the user. In full verbosity, all rule applications will be logged.
wenzelm@57591
   914
  Interactive mode interrupts the normal flow of the Simplifier and defers
wenzelm@57591
   915
  the decision how to continue to the user via some GUI dialog.
wenzelm@57591
   916
wenzelm@57591
   917
  \item @{attribute simp_break} declares term or theorem breakpoints for
wenzelm@57591
   918
  @{attribute simp_trace_new} as described above. Term breakpoints are
wenzelm@57591
   919
  patterns which are checked for matches on the redex of a rule application.
wenzelm@57591
   920
  Theorem breakpoints trigger when the corresponding theorem is applied in a
wenzelm@57591
   921
  rewrite step. For example:
wenzelm@57591
   922
wenzelm@50063
   923
  \end{description}
wenzelm@50063
   924
*}
wenzelm@50063
   925
wenzelm@57591
   926
declare conjI [simp_break]
wenzelm@57590
   927
declare [[simp_break "?x \<and> ?y"]]
wenzelm@57590
   928
wenzelm@50063
   929
wenzelm@50063
   930
subsection {* Simplification procedures \label{sec:simproc} *}
wenzelm@26782
   931
wenzelm@42925
   932
text {* Simplification procedures are ML functions that produce proven
wenzelm@42925
   933
  rewrite rules on demand.  They are associated with higher-order
wenzelm@42925
   934
  patterns that approximate the left-hand sides of equations.  The
wenzelm@42925
   935
  Simplifier first matches the current redex against one of the LHS
wenzelm@42925
   936
  patterns; if this succeeds, the corresponding ML function is
wenzelm@42925
   937
  invoked, passing the Simplifier context and redex term.  Thus rules
wenzelm@42925
   938
  may be specifically fashioned for particular situations, resulting
wenzelm@42925
   939
  in a more powerful mechanism than term rewriting by a fixed set of
wenzelm@42925
   940
  rules.
wenzelm@42925
   941
wenzelm@42925
   942
  Any successful result needs to be a (possibly conditional) rewrite
wenzelm@42925
   943
  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
wenzelm@42925
   944
  rule will be applied just as any ordinary rewrite rule.  It is
wenzelm@42925
   945
  expected to be already in \emph{internal form}, bypassing the
wenzelm@42925
   946
  automatic preprocessing of object-level equivalences.
wenzelm@42925
   947
wenzelm@26782
   948
  \begin{matharray}{rcl}
wenzelm@28761
   949
    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
wenzelm@28761
   950
    simproc & : & @{text attribute} \\
wenzelm@26782
   951
  \end{matharray}
wenzelm@26782
   952
wenzelm@55112
   953
  @{rail \<open>
wenzelm@42596
   954
    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
wenzelm@55029
   955
      @{syntax text} \<newline> (@'identifier' (@{syntax nameref}+))?
wenzelm@26782
   956
    ;
wenzelm@26782
   957
wenzelm@42596
   958
    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
wenzelm@55112
   959
  \<close>}
wenzelm@26782
   960
wenzelm@28760
   961
  \begin{description}
wenzelm@26782
   962
wenzelm@28760
   963
  \item @{command "simproc_setup"} defines a named simplification
wenzelm@26782
   964
  procedure that is invoked by the Simplifier whenever any of the
wenzelm@26782
   965
  given term patterns match the current redex.  The implementation,
wenzelm@26782
   966
  which is provided as ML source text, needs to be of type @{ML_type
wenzelm@26782
   967
  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
wenzelm@26782
   968
  cterm} represents the current redex @{text r} and the result is
wenzelm@26782
   969
  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
wenzelm@26782
   970
  generalized version), or @{ML NONE} to indicate failure.  The
wenzelm@26782
   971
  @{ML_type simpset} argument holds the full context of the current
wenzelm@26782
   972
  Simplifier invocation, including the actual Isar proof context.  The
wenzelm@26782
   973
  @{ML_type morphism} informs about the difference of the original
wenzelm@26782
   974
  compilation context wrt.\ the one of the actual application later
wenzelm@26782
   975
  on.  The optional @{keyword "identifier"} specifies theorems that
wenzelm@26782
   976
  represent the logical content of the abstract theory of this
wenzelm@26782
   977
  simproc.
wenzelm@26782
   978
wenzelm@26782
   979
  Morphisms and identifiers are only relevant for simprocs that are
wenzelm@26782
   980
  defined within a local target context, e.g.\ in a locale.
wenzelm@26782
   981
wenzelm@28760
   982
  \item @{text "simproc add: name"} and @{text "simproc del: name"}
wenzelm@26782
   983
  add or delete named simprocs to the current Simplifier context.  The
wenzelm@26782
   984
  default is to add a simproc.  Note that @{command "simproc_setup"}
wenzelm@26782
   985
  already adds the new simproc to the subsequent context.
wenzelm@26782
   986
wenzelm@28760
   987
  \end{description}
wenzelm@26782
   988
*}
wenzelm@26782
   989
wenzelm@26782
   990
wenzelm@42925
   991
subsubsection {* Example *}
wenzelm@42925
   992
wenzelm@42925
   993
text {* The following simplification procedure for @{thm
wenzelm@42925
   994
  [source=false, show_types] unit_eq} in HOL performs fine-grained
wenzelm@42925
   995
  control over rule application, beyond higher-order pattern matching.
wenzelm@42925
   996
  Declaring @{thm unit_eq} as @{attribute simp} directly would make
wenzelm@56594
   997
  the Simplifier loop!  Note that a version of this simplification
wenzelm@42925
   998
  procedure is already active in Isabelle/HOL.  *}
wenzelm@42925
   999
wenzelm@42925
  1000
simproc_setup unit ("x::unit") = {*
wenzelm@42925
  1001
  fn _ => fn _ => fn ct =>
wenzelm@42925
  1002
    if HOLogic.is_unit (term_of ct) then NONE
wenzelm@42925
  1003
    else SOME (mk_meta_eq @{thm unit_eq})
wenzelm@42925
  1004
*}
wenzelm@42925
  1005
wenzelm@42925
  1006
text {* Since the Simplifier applies simplification procedures
wenzelm@42925
  1007
  frequently, it is important to make the failure check in ML
wenzelm@42925
  1008
  reasonably fast. *}
wenzelm@42925
  1009
wenzelm@42925
  1010
wenzelm@50079
  1011
subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
wenzelm@50079
  1012
wenzelm@50079
  1013
text {* The core term-rewriting engine of the Simplifier is normally
wenzelm@50079
  1014
  used in combination with some add-on components that modify the
wenzelm@50079
  1015
  strategy and allow to integrate other non-Simplifier proof tools.
wenzelm@50079
  1016
  These may be reconfigured in ML as explained below.  Even if the
wenzelm@50079
  1017
  default strategies of object-logics like Isabelle/HOL are used
wenzelm@50079
  1018
  unchanged, it helps to understand how the standard Simplifier
wenzelm@50079
  1019
  strategies work. *}
wenzelm@50079
  1020
wenzelm@50079
  1021
wenzelm@50079
  1022
subsubsection {* The subgoaler *}
wenzelm@50079
  1023
wenzelm@50079
  1024
text {*
wenzelm@50079
  1025
  \begin{mldecls}
wenzelm@51717
  1026
  @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
wenzelm@51717
  1027
  Proof.context -> Proof.context"} \\
wenzelm@51717
  1028
  @{index_ML Simplifier.prems_of: "Proof.context -> thm list"} \\
wenzelm@50079
  1029
  \end{mldecls}
wenzelm@50079
  1030
wenzelm@50079
  1031
  The subgoaler is the tactic used to solve subgoals arising out of
wenzelm@50079
  1032
  conditional rewrite rules or congruence rules.  The default should
wenzelm@50079
  1033
  be simplification itself.  In rare situations, this strategy may
wenzelm@50079
  1034
  need to be changed.  For example, if the premise of a conditional
wenzelm@50079
  1035
  rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
wenzelm@50079
  1036
  ?m < ?n"}, the default strategy could loop.  % FIXME !??
wenzelm@50079
  1037
wenzelm@50079
  1038
  \begin{description}
wenzelm@50079
  1039
wenzelm@51717
  1040
  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
wenzelm@51717
  1041
  subgoaler of the context to @{text "tac"}.  The tactic will
wenzelm@51717
  1042
  be applied to the context of the running Simplifier instance.
wenzelm@50079
  1043
wenzelm@51717
  1044
  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
wenzelm@51717
  1045
  set of premises from the context.  This may be non-empty only if
wenzelm@50079
  1046
  the Simplifier has been told to utilize local assumptions in the
wenzelm@50079
  1047
  first place (cf.\ the options in \secref{sec:simp-meth}).
wenzelm@50079
  1048
wenzelm@50079
  1049
  \end{description}
wenzelm@50079
  1050
wenzelm@50079
  1051
  As an example, consider the following alternative subgoaler:
wenzelm@50079
  1052
*}
wenzelm@50079
  1053
wenzelm@50079
  1054
ML {*
wenzelm@51717
  1055
  fun subgoaler_tac ctxt =
wenzelm@50079
  1056
    assume_tac ORELSE'
wenzelm@51717
  1057
    resolve_tac (Simplifier.prems_of ctxt) ORELSE'
wenzelm@51717
  1058
    asm_simp_tac ctxt
wenzelm@50079
  1059
*}
wenzelm@50079
  1060
wenzelm@50079
  1061
text {* This tactic first tries to solve the subgoal by assumption or
wenzelm@50079
  1062
  by resolving with with one of the premises, calling simplification
wenzelm@50079
  1063
  only if that fails. *}
wenzelm@50079
  1064
wenzelm@50079
  1065
wenzelm@50079
  1066
subsubsection {* The solver *}
wenzelm@50079
  1067
wenzelm@50079
  1068
text {*
wenzelm@50079
  1069
  \begin{mldecls}
wenzelm@50079
  1070
  @{index_ML_type solver} \\
wenzelm@51717
  1071
  @{index_ML Simplifier.mk_solver: "string ->
wenzelm@51717
  1072
  (Proof.context -> int -> tactic) -> solver"} \\
wenzelm@51717
  1073
  @{index_ML_op setSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@51717
  1074
  @{index_ML_op addSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@51717
  1075
  @{index_ML_op setSSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@51717
  1076
  @{index_ML_op addSSolver: "Proof.context * solver -> Proof.context"} \\
wenzelm@50079
  1077
  \end{mldecls}
wenzelm@50079
  1078
wenzelm@50079
  1079
  A solver is a tactic that attempts to solve a subgoal after
wenzelm@50079
  1080
  simplification.  Its core functionality is to prove trivial subgoals
wenzelm@50079
  1081
  such as @{prop "True"} and @{text "t = t"}, but object-logics might
wenzelm@50079
  1082
  be more ambitious.  For example, Isabelle/HOL performs a restricted
wenzelm@50079
  1083
  version of linear arithmetic here.
wenzelm@50079
  1084
wenzelm@50079
  1085
  Solvers are packaged up in abstract type @{ML_type solver}, with
wenzelm@50079
  1086
  @{ML Simplifier.mk_solver} as the only operation to create a solver.
wenzelm@50079
  1087
wenzelm@50079
  1088
  \medskip Rewriting does not instantiate unknowns.  For example,
wenzelm@50079
  1089
  rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
wenzelm@50079
  1090
  instantiating @{text "?A"}.  The solver, however, is an arbitrary
wenzelm@50079
  1091
  tactic and may instantiate unknowns as it pleases.  This is the only
wenzelm@50079
  1092
  way the Simplifier can handle a conditional rewrite rule whose
wenzelm@50079
  1093
  condition contains extra variables.  When a simplification tactic is
wenzelm@50079
  1094
  to be combined with other provers, especially with the Classical
wenzelm@50079
  1095
  Reasoner, it is important whether it can be considered safe or not.
wenzelm@50079
  1096
  For this reason a simpset contains two solvers: safe and unsafe.
wenzelm@50079
  1097
wenzelm@50079
  1098
  The standard simplification strategy solely uses the unsafe solver,
wenzelm@50079
  1099
  which is appropriate in most cases.  For special applications where
wenzelm@50079
  1100
  the simplification process is not allowed to instantiate unknowns
wenzelm@50079
  1101
  within the goal, simplification starts with the safe solver, but may
wenzelm@50079
  1102
  still apply the ordinary unsafe one in nested simplifications for
wenzelm@50079
  1103
  conditional rules or congruences. Note that in this way the overall
wenzelm@50079
  1104
  tactic is not totally safe: it may instantiate unknowns that appear
wenzelm@50079
  1105
  also in other subgoals.
wenzelm@50079
  1106
wenzelm@50079
  1107
  \begin{description}
wenzelm@50079
  1108
wenzelm@50079
  1109
  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
wenzelm@50079
  1110
  "tac"} into a solver; the @{text "name"} is only attached as a
wenzelm@50079
  1111
  comment and has no further significance.
wenzelm@50079
  1112
wenzelm@51717
  1113
  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
wenzelm@51717
  1114
  the safe solver of @{text "ctxt"}.
wenzelm@50079
  1115
wenzelm@51717
  1116
  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
wenzelm@50079
  1117
  additional safe solver; it will be tried after the solvers which had
wenzelm@51717
  1118
  already been present in @{text "ctxt"}.
wenzelm@50079
  1119
wenzelm@51717
  1120
  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
wenzelm@51717
  1121
  unsafe solver of @{text "ctxt"}.
wenzelm@50079
  1122
wenzelm@51717
  1123
  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
wenzelm@50079
  1124
  additional unsafe solver; it will be tried after the solvers which
wenzelm@51717
  1125
  had already been present in @{text "ctxt"}.
wenzelm@50079
  1126
wenzelm@50079
  1127
  \end{description}
wenzelm@50079
  1128
wenzelm@51717
  1129
  \medskip The solver tactic is invoked with the context of the
wenzelm@51717
  1130
  running Simplifier.  Further operations
wenzelm@50079
  1131
  may be used to retrieve relevant information, such as the list of
wenzelm@50079
  1132
  local Simplifier premises via @{ML Simplifier.prems_of} --- this
wenzelm@50079
  1133
  list may be non-empty only if the Simplifier runs in a mode that
wenzelm@50079
  1134
  utilizes local assumptions (see also \secref{sec:simp-meth}).  The
wenzelm@50079
  1135
  solver is also presented the full goal including its assumptions in
wenzelm@50079
  1136
  any case.  Thus it can use these (e.g.\ by calling @{ML
wenzelm@50079
  1137
  assume_tac}), even if the Simplifier proper happens to ignore local
wenzelm@50079
  1138
  premises at the moment.
wenzelm@50079
  1139
wenzelm@50079
  1140
  \medskip As explained before, the subgoaler is also used to solve
wenzelm@50079
  1141
  the premises of congruence rules.  These are usually of the form
wenzelm@50079
  1142
  @{text "s = ?x"}, where @{text "s"} needs to be simplified and
wenzelm@50079
  1143
  @{text "?x"} needs to be instantiated with the result.  Typically,
wenzelm@50079
  1144
  the subgoaler will invoke the Simplifier at some point, which will
wenzelm@50079
  1145
  eventually call the solver.  For this reason, solver tactics must be
wenzelm@50079
  1146
  prepared to solve goals of the form @{text "t = ?x"}, usually by
wenzelm@50079
  1147
  reflexivity.  In particular, reflexivity should be tried before any
wenzelm@50079
  1148
  of the fancy automated proof tools.
wenzelm@50079
  1149
wenzelm@50079
  1150
  It may even happen that due to simplification the subgoal is no
wenzelm@50079
  1151
  longer an equality.  For example, @{text "False \<longleftrightarrow> ?Q"} could be
wenzelm@50079
  1152
  rewritten to @{text "\<not> ?Q"}.  To cover this case, the solver could
wenzelm@50079
  1153
  try resolving with the theorem @{text "\<not> False"} of the
wenzelm@50079
  1154
  object-logic.
wenzelm@50079
  1155
wenzelm@50079
  1156
  \medskip
wenzelm@50079
  1157
wenzelm@50079
  1158
  \begin{warn}
wenzelm@50079
  1159
  If a premise of a congruence rule cannot be proved, then the
wenzelm@50079
  1160
  congruence is ignored.  This should only happen if the rule is
wenzelm@50079
  1161
  \emph{conditional} --- that is, contains premises not of the form
wenzelm@50079
  1162
  @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
wenzelm@50079
  1163
  or possibly the subgoaler or solver, is faulty.
wenzelm@50079
  1164
  \end{warn}
wenzelm@50079
  1165
*}
wenzelm@50079
  1166
wenzelm@50079
  1167
wenzelm@50079
  1168
subsubsection {* The looper *}
wenzelm@50079
  1169
wenzelm@50079
  1170
text {*
wenzelm@50079
  1171
  \begin{mldecls}
wenzelm@51717
  1172
  @{index_ML_op setloop: "Proof.context *
wenzelm@51717
  1173
  (Proof.context -> int -> tactic) -> Proof.context"} \\
wenzelm@51717
  1174
  @{index_ML_op addloop: "Proof.context *
wenzelm@51717
  1175
  (string * (Proof.context -> int -> tactic))
wenzelm@51717
  1176
  -> Proof.context"} \\
wenzelm@51717
  1177
  @{index_ML_op delloop: "Proof.context * string -> Proof.context"} \\
wenzelm@51717
  1178
  @{index_ML Splitter.add_split: "thm -> Proof.context -> Proof.context"} \\
wenzelm@51717
  1179
  @{index_ML Splitter.del_split: "thm -> Proof.context -> Proof.context"} \\
wenzelm@50079
  1180
  \end{mldecls}
wenzelm@50079
  1181
wenzelm@50079
  1182
  The looper is a list of tactics that are applied after
wenzelm@50079
  1183
  simplification, in case the solver failed to solve the simplified
wenzelm@50079
  1184
  goal.  If the looper succeeds, the simplification process is started
wenzelm@50079
  1185
  all over again.  Each of the subgoals generated by the looper is
wenzelm@50079
  1186
  attacked in turn, in reverse order.
wenzelm@50079
  1187
wenzelm@50079
  1188
  A typical looper is \emph{case splitting}: the expansion of a
wenzelm@50079
  1189
  conditional.  Another possibility is to apply an elimination rule on
wenzelm@50079
  1190
  the assumptions.  More adventurous loopers could start an induction.
wenzelm@50079
  1191
wenzelm@50079
  1192
  \begin{description}
wenzelm@50079
  1193
wenzelm@51717
  1194
  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
wenzelm@52037
  1195
  looper tactic of @{text "ctxt"}.
wenzelm@50079
  1196
wenzelm@51717
  1197
  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
wenzelm@50079
  1198
  additional looper tactic with name @{text "name"}, which is
wenzelm@50079
  1199
  significant for managing the collection of loopers.  The tactic will
wenzelm@50079
  1200
  be tried after the looper tactics that had already been present in
wenzelm@52037
  1201
  @{text "ctxt"}.
wenzelm@50079
  1202
wenzelm@51717
  1203
  \item @{text "ctxt delloop name"} deletes the looper tactic that was
wenzelm@51717
  1204
  associated with @{text "name"} from @{text "ctxt"}.
wenzelm@50079
  1205
wenzelm@51717
  1206
  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
wenzelm@51717
  1207
  for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
wenzelm@50079
  1208
wenzelm@51717
  1209
  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
wenzelm@50079
  1210
  tactic corresponding to @{text thm} from the looper tactics of
wenzelm@51717
  1211
  @{text "ctxt"}.
wenzelm@50079
  1212
wenzelm@50079
  1213
  \end{description}
wenzelm@50079
  1214
wenzelm@50079
  1215
  The splitter replaces applications of a given function; the
wenzelm@50079
  1216
  right-hand side of the replacement can be anything.  For example,
wenzelm@50079
  1217
  here is a splitting rule for conditional expressions:
wenzelm@50079
  1218
wenzelm@50079
  1219
  @{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
wenzelm@50079
  1220
wenzelm@50079
  1221
  Another example is the elimination operator for Cartesian products
wenzelm@50079
  1222
  (which happens to be called @{text split} in Isabelle/HOL:
wenzelm@50079
  1223
wenzelm@50079
  1224
  @{text [display] "?P (split ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
wenzelm@50079
  1225
wenzelm@50079
  1226
  For technical reasons, there is a distinction between case splitting
wenzelm@50079
  1227
  in the conclusion and in the premises of a subgoal.  The former is
wenzelm@50079
  1228
  done by @{ML Splitter.split_tac} with rules like @{thm [source]
wenzelm@50079
  1229
  split_if} or @{thm [source] option.split}, which do not split the
wenzelm@50079
  1230
  subgoal, while the latter is done by @{ML Splitter.split_asm_tac}
wenzelm@50079
  1231
  with rules like @{thm [source] split_if_asm} or @{thm [source]
wenzelm@50079
  1232
  option.split_asm}, which split the subgoal.  The function @{ML
wenzelm@50079
  1233
  Splitter.add_split} automatically takes care of which tactic to
wenzelm@50079
  1234
  call, analyzing the form of the rules given as argument; it is the
wenzelm@50079
  1235
  same operation behind @{text "split"} attribute or method modifier
wenzelm@50079
  1236
  syntax in the Isar source language.
wenzelm@50079
  1237
wenzelm@50079
  1238
  Case splits should be allowed only when necessary; they are
wenzelm@50079
  1239
  expensive and hard to control.  Case-splitting on if-expressions in
wenzelm@50079
  1240
  the conclusion is usually beneficial, so it is enabled by default in
wenzelm@50079
  1241
  Isabelle/HOL and Isabelle/FOL/ZF.
wenzelm@50079
  1242
wenzelm@50079
  1243
  \begin{warn}
wenzelm@50079
  1244
  With @{ML Splitter.split_asm_tac} as looper component, the
wenzelm@50079
  1245
  Simplifier may split subgoals!  This might cause unexpected problems
wenzelm@50079
  1246
  in tactic expressions that silently assume 0 or 1 subgoals after
wenzelm@50079
  1247
  simplification.
wenzelm@50079
  1248
  \end{warn}
wenzelm@50079
  1249
*}
wenzelm@50079
  1250
wenzelm@50079
  1251
wenzelm@50063
  1252
subsection {* Forward simplification \label{sec:simp-forward} *}
wenzelm@26782
  1253
wenzelm@26782
  1254
text {*
wenzelm@26782
  1255
  \begin{matharray}{rcl}
wenzelm@28761
  1256
    @{attribute_def simplified} & : & @{text attribute} \\
wenzelm@26782
  1257
  \end{matharray}
wenzelm@26782
  1258
wenzelm@55112
  1259
  @{rail \<open>
wenzelm@42596
  1260
    @@{attribute simplified} opt? @{syntax thmrefs}?
wenzelm@26782
  1261
    ;
wenzelm@26782
  1262
wenzelm@40255
  1263
    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
wenzelm@55112
  1264
  \<close>}
wenzelm@26782
  1265
wenzelm@28760
  1266
  \begin{description}
wenzelm@26782
  1267
  
wenzelm@28760
  1268
  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
wenzelm@28760
  1269
  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
wenzelm@28760
  1270
  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
wenzelm@28760
  1271
  The result is fully simplified by default, including assumptions and
wenzelm@28760
  1272
  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
wenzelm@28760
  1273
  the same way as the for the @{text simp} method.
wenzelm@26782
  1274
wenzelm@56594
  1275
  Note that forward simplification restricts the Simplifier to its
wenzelm@26782
  1276
  most basic operation of term rewriting; solver and looper tactics
wenzelm@50079
  1277
  (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
wenzelm@50079
  1278
  @{attribute simplified} attribute should be only rarely required
wenzelm@50079
  1279
  under normal circumstances.
wenzelm@26782
  1280
wenzelm@28760
  1281
  \end{description}
wenzelm@26782
  1282
*}
wenzelm@26782
  1283
wenzelm@26782
  1284
wenzelm@27040
  1285
section {* The Classical Reasoner \label{sec:classical} *}
wenzelm@26782
  1286
wenzelm@42930
  1287
subsection {* Basic concepts *}
wenzelm@42927
  1288
wenzelm@42927
  1289
text {* Although Isabelle is generic, many users will be working in
wenzelm@42927
  1290
  some extension of classical first-order logic.  Isabelle/ZF is built
wenzelm@42927
  1291
  upon theory FOL, while Isabelle/HOL conceptually contains
wenzelm@42927
  1292
  first-order logic as a fragment.  Theorem-proving in predicate logic
wenzelm@42927
  1293
  is undecidable, but many automated strategies have been developed to
wenzelm@42927
  1294
  assist in this task.
wenzelm@42927
  1295
wenzelm@42927
  1296
  Isabelle's classical reasoner is a generic package that accepts
wenzelm@42927
  1297
  certain information about a logic and delivers a suite of automatic
wenzelm@42927
  1298
  proof tools, based on rules that are classified and declared in the
wenzelm@42927
  1299
  context.  These proof procedures are slow and simplistic compared
wenzelm@42927
  1300
  with high-end automated theorem provers, but they can save
wenzelm@42927
  1301
  considerable time and effort in practice.  They can prove theorems
wenzelm@42927
  1302
  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
wenzelm@42927
  1303
  milliseconds (including full proof reconstruction): *}
wenzelm@42927
  1304
wenzelm@42927
  1305
lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
wenzelm@42927
  1306
  by blast
wenzelm@42927
  1307
wenzelm@42927
  1308
lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
wenzelm@42927
  1309
  by blast
wenzelm@42927
  1310
wenzelm@42927
  1311
text {* The proof tools are generic.  They are not restricted to
wenzelm@42927
  1312
  first-order logic, and have been heavily used in the development of
wenzelm@42927
  1313
  the Isabelle/HOL library and applications.  The tactics can be
wenzelm@42927
  1314
  traced, and their components can be called directly; in this manner,
wenzelm@42927
  1315
  any proof can be viewed interactively.  *}
wenzelm@42927
  1316
wenzelm@42927
  1317
wenzelm@42927
  1318
subsubsection {* The sequent calculus *}
wenzelm@42927
  1319
wenzelm@42927
  1320
text {* Isabelle supports natural deduction, which is easy to use for
wenzelm@42927
  1321
  interactive proof.  But natural deduction does not easily lend
wenzelm@42927
  1322
  itself to automation, and has a bias towards intuitionism.  For
wenzelm@42927
  1323
  certain proofs in classical logic, it can not be called natural.
wenzelm@42927
  1324
  The \emph{sequent calculus}, a generalization of natural deduction,
wenzelm@42927
  1325
  is easier to automate.
wenzelm@42927
  1326
wenzelm@42927
  1327
  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
wenzelm@42927
  1328
  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
wenzelm@42927
  1329
  logic, sequents can equivalently be made from lists or multisets of
wenzelm@42927
  1330
  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
wenzelm@42927
  1331
  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
wenzelm@42927
  1332
  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
wenzelm@42927
  1333
  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
wenzelm@42927
  1334
  sequent is \textbf{basic} if its left and right sides have a common
wenzelm@42927
  1335
  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
wenzelm@42927
  1336
  valid.
wenzelm@42927
  1337
wenzelm@42927
  1338
  Sequent rules are classified as \textbf{right} or \textbf{left},
wenzelm@42927
  1339
  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
wenzelm@42927
  1340
  Rules that operate on the right side are analogous to natural
wenzelm@42927
  1341
  deduction's introduction rules, and left rules are analogous to
wenzelm@42927
  1342
  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
wenzelm@42927
  1343
  is the rule
wenzelm@42927
  1344
  \[
wenzelm@42927
  1345
  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
wenzelm@42927
  1346
  \]
wenzelm@42927
  1347
  Applying the rule backwards, this breaks down some implication on
wenzelm@42927
  1348
  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
wenzelm@42927
  1349
  the sets of formulae that are unaffected by the inference.  The
wenzelm@42927
  1350
  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
wenzelm@42927
  1351
  single rule
wenzelm@42927
  1352
  \[
wenzelm@42927
  1353
  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
wenzelm@42927
  1354
  \]
wenzelm@42927
  1355
  This breaks down some disjunction on the right side, replacing it by
wenzelm@42927
  1356
  both disjuncts.  Thus, the sequent calculus is a kind of
wenzelm@42927
  1357
  multiple-conclusion logic.
wenzelm@42927
  1358
wenzelm@42927
  1359
  To illustrate the use of multiple formulae on the right, let us
wenzelm@42927
  1360
  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
wenzelm@42927
  1361
  backwards, we reduce this formula to a basic sequent:
wenzelm@42927
  1362
  \[
wenzelm@42927
  1363
  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
wenzelm@42927
  1364
    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
wenzelm@42927
  1365
      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
wenzelm@42927
  1366
        {@{text "P, Q \<turnstile> Q, P"}}}}
wenzelm@42927
  1367
  \]
wenzelm@42927
  1368
wenzelm@42927
  1369
  This example is typical of the sequent calculus: start with the
wenzelm@42927
  1370
  desired theorem and apply rules backwards in a fairly arbitrary
wenzelm@42927
  1371
  manner.  This yields a surprisingly effective proof procedure.
wenzelm@42927
  1372
  Quantifiers add only few complications, since Isabelle handles
wenzelm@42927
  1373
  parameters and schematic variables.  See \cite[Chapter
wenzelm@42927
  1374
  10]{paulson-ml2} for further discussion.  *}
wenzelm@42927
  1375
wenzelm@42927
  1376
wenzelm@42927
  1377
subsubsection {* Simulating sequents by natural deduction *}
wenzelm@42927
  1378
wenzelm@42927
  1379
text {* Isabelle can represent sequents directly, as in the
wenzelm@42927
  1380
  object-logic LK.  But natural deduction is easier to work with, and
wenzelm@42927
  1381
  most object-logics employ it.  Fortunately, we can simulate the
wenzelm@42927
  1382
  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
wenzelm@42927
  1383
  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
wenzelm@42927
  1384
  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
wenzelm@42927
  1385
  Elim-resolution plays a key role in simulating sequent proofs.
wenzelm@42927
  1386
wenzelm@42927
  1387
  We can easily handle reasoning on the left.  Elim-resolution with
wenzelm@42927
  1388
  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
wenzelm@42927
  1389
  a similar effect as the corresponding sequent rules.  For the other
wenzelm@42927
  1390
  connectives, we use sequent-style elimination rules instead of
wenzelm@42927
  1391
  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
wenzelm@42927
  1392
  But note that the rule @{text "(\<not>L)"} has no effect under our
wenzelm@42927
  1393
  representation of sequents!
wenzelm@42927
  1394
  \[
wenzelm@42927
  1395
  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
wenzelm@42927
  1396
  \]
wenzelm@42927
  1397
wenzelm@42927
  1398
  What about reasoning on the right?  Introduction rules can only
wenzelm@42927
  1399
  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
wenzelm@42927
  1400
  other right-side formulae are represented as negated assumptions,
wenzelm@42927
  1401
  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
wenzelm@42927
  1402
  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
wenzelm@42927
  1403
  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
wenzelm@42927
  1404
wenzelm@42927
  1405
  To ensure that swaps occur only when necessary, each introduction
wenzelm@42927
  1406
  rule is converted into a swapped form: it is resolved with the
wenzelm@42927
  1407
  second premise of @{text "(swap)"}.  The swapped form of @{text
wenzelm@42927
  1408
  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
wenzelm@42927
  1409
  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
wenzelm@42927
  1410
wenzelm@42927
  1411
  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
wenzelm@42927
  1412
  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
wenzelm@42927
  1413
wenzelm@42927
  1414
  Swapped introduction rules are applied using elim-resolution, which
wenzelm@42927
  1415
  deletes the negated formula.  Our representation of sequents also
wenzelm@42927
  1416
  requires the use of ordinary introduction rules.  If we had no
wenzelm@42927
  1417
  regard for readability of intermediate goal states, we could treat
wenzelm@42927
  1418
  the right side more uniformly by representing sequents as @{text
wenzelm@42927
  1419
  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
wenzelm@42927
  1420
*}
wenzelm@42927
  1421
wenzelm@42927
  1422
wenzelm@42927
  1423
subsubsection {* Extra rules for the sequent calculus *}
wenzelm@42927
  1424
wenzelm@42927
  1425
text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
wenzelm@42927
  1426
  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
wenzelm@42927
  1427
  In addition, we need rules to embody the classical equivalence
wenzelm@42927
  1428
  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
wenzelm@42927
  1429
  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
wenzelm@42927
  1430
  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
wenzelm@42927
  1431
wenzelm@42927
  1432
  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
wenzelm@42927
  1433
  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
wenzelm@42927
  1434
wenzelm@42927
  1435
  Quantifier replication also requires special rules.  In classical
wenzelm@42927
  1436
  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
wenzelm@42927
  1437
  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
wenzelm@42927
  1438
  \[
wenzelm@42927
  1439
  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
wenzelm@42927
  1440
  \qquad
wenzelm@42927
  1441
  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
wenzelm@42927
  1442
  \]
wenzelm@42927
  1443
  Thus both kinds of quantifier may be replicated.  Theorems requiring
wenzelm@42927
  1444
  multiple uses of a universal formula are easy to invent; consider
wenzelm@42927
  1445
  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
wenzelm@42927
  1446
  @{text "n > 1"}.  Natural examples of the multiple use of an
wenzelm@42927
  1447
  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
wenzelm@42927
  1448
  \<longrightarrow> P y"}.
wenzelm@42927
  1449
wenzelm@42927
  1450
  Forgoing quantifier replication loses completeness, but gains
wenzelm@42927
  1451
  decidability, since the search space becomes finite.  Many useful
wenzelm@42927
  1452
  theorems can be proved without replication, and the search generally
wenzelm@42927
  1453
  delivers its verdict in a reasonable time.  To adopt this approach,
wenzelm@42927
  1454
  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
wenzelm@42927
  1455
  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
wenzelm@42927
  1456
  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
wenzelm@42927
  1457
  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
wenzelm@42927
  1458
wenzelm@42927
  1459
  Elim-resolution with this rule will delete the universal formula
wenzelm@42927
  1460
  after a single use.  To replicate universal quantifiers, replace the
wenzelm@42927
  1461
  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
wenzelm@42927
  1462
wenzelm@42927
  1463
  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
wenzelm@42927
  1464
  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
wenzelm@42927
  1465
wenzelm@42927
  1466
  All introduction rules mentioned above are also useful in swapped
wenzelm@42927
  1467
  form.
wenzelm@42927
  1468
wenzelm@42927
  1469
  Replication makes the search space infinite; we must apply the rules
wenzelm@42927
  1470
  with care.  The classical reasoner distinguishes between safe and
wenzelm@42927
  1471
  unsafe rules, applying the latter only when there is no alternative.
wenzelm@42927
  1472
  Depth-first search may well go down a blind alley; best-first search
wenzelm@42927
  1473
  is better behaved in an infinite search space.  However, quantifier
wenzelm@42927
  1474
  replication is too expensive to prove any but the simplest theorems.
wenzelm@42927
  1475
*}
wenzelm@42927
  1476
wenzelm@42927
  1477
wenzelm@42928
  1478
subsection {* Rule declarations *}
wenzelm@42928
  1479
wenzelm@42928
  1480
text {* The proof tools of the Classical Reasoner depend on
wenzelm@42928
  1481
  collections of rules declared in the context, which are classified
wenzelm@42928
  1482
  as introduction, elimination or destruction and as \emph{safe} or
wenzelm@42928
  1483
  \emph{unsafe}.  In general, safe rules can be attempted blindly,
wenzelm@42928
  1484
  while unsafe rules must be used with care.  A safe rule must never
wenzelm@42928
  1485
  reduce a provable goal to an unprovable set of subgoals.
wenzelm@42928
  1486
wenzelm@42928
  1487
  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
wenzelm@42928
  1488
  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
wenzelm@42928
  1489
  unprovable subgoal.  Any rule is unsafe whose premises contain new
wenzelm@42928
  1490
  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
wenzelm@42928
  1491
  unsafe, since it is applied via elim-resolution, which discards the
wenzelm@42928
  1492
  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
wenzelm@42928
  1493
  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
wenzelm@42928
  1494
  unsafe for similar reasons.  The quantifier duplication rule @{text
wenzelm@42928
  1495
  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
wenzelm@42928
  1496
  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
wenzelm@42928
  1497
  looping.  In classical first-order logic, all rules are safe except
wenzelm@42928
  1498
  those mentioned above.
wenzelm@42928
  1499
wenzelm@42928
  1500
  The safe~/ unsafe distinction is vague, and may be regarded merely
wenzelm@42928
  1501
  as a way of giving some rules priority over others.  One could argue
wenzelm@42928
  1502
  that @{text "(\<or>E)"} is unsafe, because repeated application of it
wenzelm@42928
  1503
  could generate exponentially many subgoals.  Induction rules are
wenzelm@42928
  1504
  unsafe because inductive proofs are difficult to set up
wenzelm@42928
  1505
  automatically.  Any inference is unsafe that instantiates an unknown
wenzelm@42928
  1506
  in the proof state --- thus matching must be used, rather than
wenzelm@42928
  1507
  unification.  Even proof by assumption is unsafe if it instantiates
wenzelm@42928
  1508
  unknowns shared with other subgoals.
wenzelm@42928
  1509
wenzelm@42928
  1510
  \begin{matharray}{rcl}
wenzelm@42928
  1511
    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
wenzelm@42928
  1512
    @{attribute_def intro} & : & @{text attribute} \\
wenzelm@42928
  1513
    @{attribute_def elim} & : & @{text attribute} \\
wenzelm@42928
  1514
    @{attribute_def dest} & : & @{text attribute} \\
wenzelm@42928
  1515
    @{attribute_def rule} & : & @{text attribute} \\
wenzelm@42928
  1516
    @{attribute_def iff} & : & @{text attribute} \\
wenzelm@42928
  1517
    @{attribute_def swapped} & : & @{text attribute} \\
wenzelm@42928
  1518
  \end{matharray}
wenzelm@42928
  1519
wenzelm@55112
  1520
  @{rail \<open>
wenzelm@42928
  1521
    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
wenzelm@42928
  1522
    ;
wenzelm@42928
  1523
    @@{attribute rule} 'del'
wenzelm@42928
  1524
    ;
wenzelm@42928
  1525
    @@{attribute iff} (((() | 'add') '?'?) | 'del')
wenzelm@55112
  1526
  \<close>}
wenzelm@42928
  1527
wenzelm@42928
  1528
  \begin{description}
wenzelm@42928
  1529
wenzelm@42928
  1530
  \item @{command "print_claset"} prints the collection of rules
wenzelm@42928
  1531
  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
wenzelm@42928
  1532
  within the context.
wenzelm@42928
  1533
wenzelm@42928
  1534
  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
wenzelm@42928
  1535
  declare introduction, elimination, and destruction rules,
wenzelm@42928
  1536
  respectively.  By default, rules are considered as \emph{unsafe}
wenzelm@42928
  1537
  (i.e.\ not applied blindly without backtracking), while ``@{text
wenzelm@42928
  1538
  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
wenzelm@42928
  1539
  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
wenzelm@42928
  1540
  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
wenzelm@42928
  1541
  of the @{method rule} method).  The optional natural number
wenzelm@42928
  1542
  specifies an explicit weight argument, which is ignored by the
wenzelm@42928
  1543
  automated reasoning tools, but determines the search order of single
wenzelm@42928
  1544
  rule steps.
wenzelm@42928
  1545
wenzelm@42928
  1546
  Introduction rules are those that can be applied using ordinary
wenzelm@42928
  1547
  resolution.  Their swapped forms are generated internally, which
wenzelm@42928
  1548
  will be applied using elim-resolution.  Elimination rules are
wenzelm@42928
  1549
  applied using elim-resolution.  Rules are sorted by the number of
wenzelm@42928
  1550
  new subgoals they will yield; rules that generate the fewest
wenzelm@42928
  1551
  subgoals will be tried first.  Otherwise, later declarations take
wenzelm@42928
  1552
  precedence over earlier ones.
wenzelm@42928
  1553
wenzelm@42928
  1554
  Rules already present in the context with the same classification
wenzelm@42928
  1555
  are ignored.  A warning is printed if the rule has already been
wenzelm@42928
  1556
  added with some other classification, but the rule is added anyway
wenzelm@42928
  1557
  as requested.
wenzelm@42928
  1558
wenzelm@42928
  1559
  \item @{attribute rule}~@{text del} deletes all occurrences of a
wenzelm@42928
  1560
  rule from the classical context, regardless of its classification as
wenzelm@42928
  1561
  introduction~/ elimination~/ destruction and safe~/ unsafe.
wenzelm@42928
  1562
wenzelm@42928
  1563
  \item @{attribute iff} declares logical equivalences to the
wenzelm@42928
  1564
  Simplifier and the Classical reasoner at the same time.
wenzelm@42928
  1565
  Non-conditional rules result in a safe introduction and elimination
wenzelm@42928
  1566
  pair; conditional ones are considered unsafe.  Rules with negative
wenzelm@42928
  1567
  conclusion are automatically inverted (using @{text "\<not>"}-elimination
wenzelm@42928
  1568
  internally).
wenzelm@42928
  1569
wenzelm@42928
  1570
  The ``@{text "?"}'' version of @{attribute iff} declares rules to
wenzelm@42928
  1571
  the Isabelle/Pure context only, and omits the Simplifier
wenzelm@42928
  1572
  declaration.
wenzelm@42928
  1573
wenzelm@42928
  1574
  \item @{attribute swapped} turns an introduction rule into an
wenzelm@42928
  1575
  elimination, by resolving with the classical swap principle @{text
wenzelm@42928
  1576
  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
wenzelm@42928
  1577
  illustrative purposes: the Classical Reasoner already swaps rules
wenzelm@42928
  1578
  internally as explained above.
wenzelm@42928
  1579
wenzelm@28760
  1580
  \end{description}
wenzelm@26782
  1581
*}
wenzelm@26782
  1582
wenzelm@26782
  1583
wenzelm@43365
  1584
subsection {* Structured methods *}
wenzelm@43365
  1585
wenzelm@43365
  1586
text {*
wenzelm@43365
  1587
  \begin{matharray}{rcl}
wenzelm@43365
  1588
    @{method_def rule} & : & @{text method} \\
wenzelm@43365
  1589
    @{method_def contradiction} & : & @{text method} \\
wenzelm@43365
  1590
  \end{matharray}
wenzelm@43365
  1591
wenzelm@55112
  1592
  @{rail \<open>
wenzelm@43365
  1593
    @@{method rule} @{syntax thmrefs}?
wenzelm@55112
  1594
  \<close>}
wenzelm@43365
  1595
wenzelm@43365
  1596
  \begin{description}
wenzelm@43365
  1597
wenzelm@43365
  1598
  \item @{method rule} as offered by the Classical Reasoner is a
wenzelm@43365
  1599
  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
wenzelm@43365
  1600
  versions work the same, but the classical version observes the
wenzelm@43365
  1601
  classical rule context in addition to that of Isabelle/Pure.
wenzelm@43365
  1602
wenzelm@43365
  1603
  Common object logics (HOL, ZF, etc.) declare a rich collection of
wenzelm@43365
  1604
  classical rules (even if these would qualify as intuitionistic
wenzelm@43365
  1605
  ones), but only few declarations to the rule context of
wenzelm@43365
  1606
  Isabelle/Pure (\secref{sec:pure-meth-att}).
wenzelm@43365
  1607
wenzelm@43365
  1608
  \item @{method contradiction} solves some goal by contradiction,
wenzelm@43365
  1609
  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
wenzelm@43365
  1610
  facts, which are guaranteed to participate, may appear in either
wenzelm@43365
  1611
  order.
wenzelm@43365
  1612
wenzelm@43365
  1613
  \end{description}
wenzelm@43365
  1614
*}
wenzelm@43365
  1615
wenzelm@43365
  1616
wenzelm@50070
  1617
subsection {* Fully automated methods *}
wenzelm@26782
  1618
wenzelm@26782
  1619
text {*
wenzelm@26782
  1620
  \begin{matharray}{rcl}
wenzelm@28761
  1621
    @{method_def blast} & : & @{text method} \\
wenzelm@42930
  1622
    @{method_def auto} & : & @{text method} \\
wenzelm@42930
  1623
    @{method_def force} & : & @{text method} \\
wenzelm@28761
  1624
    @{method_def fast} & : & @{text method} \\
wenzelm@28761
  1625
    @{method_def slow} & : & @{text method} \\
wenzelm@28761
  1626
    @{method_def best} & : & @{text method} \\
nipkow@44911
  1627
    @{method_def fastforce} & : & @{text method} \\
wenzelm@28761
  1628
    @{method_def slowsimp} & : & @{text method} \\
wenzelm@28761
  1629
    @{method_def bestsimp} & : & @{text method} \\
wenzelm@43367
  1630
    @{method_def deepen} & : & @{text method} \\
wenzelm@26782
  1631
  \end{matharray}
wenzelm@26782
  1632
wenzelm@55112
  1633
  @{rail \<open>
wenzelm@42930
  1634
    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
wenzelm@42930
  1635
    ;
wenzelm@42596
  1636
    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
wenzelm@26782
  1637
    ;
wenzelm@42930
  1638
    @@{method force} (@{syntax clasimpmod} * )
wenzelm@42930
  1639
    ;
wenzelm@42930
  1640
    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
wenzelm@26782
  1641
    ;
nipkow@44911
  1642
    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
wenzelm@42930
  1643
      (@{syntax clasimpmod} * )
wenzelm@42930
  1644
    ;
wenzelm@43367
  1645
    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
wenzelm@43367
  1646
    ;
wenzelm@42930
  1647
    @{syntax_def clamod}:
wenzelm@42930
  1648
      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
wenzelm@42930
  1649
    ;
wenzelm@42596
  1650
    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
wenzelm@26782
  1651
      ('cong' | 'split') (() | 'add' | 'del') |
wenzelm@26782
  1652
      'iff' (((() | 'add') '?'?) | 'del') |
wenzelm@42596
  1653
      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
wenzelm@55112
  1654
  \<close>}
wenzelm@26782
  1655
wenzelm@28760
  1656
  \begin{description}
wenzelm@26782
  1657
wenzelm@42930
  1658
  \item @{method blast} is a separate classical tableau prover that
wenzelm@42930
  1659
  uses the same classical rule declarations as explained before.
wenzelm@42930
  1660
wenzelm@42930
  1661
  Proof search is coded directly in ML using special data structures.
wenzelm@42930
  1662
  A successful proof is then reconstructed using regular Isabelle
wenzelm@42930
  1663
  inferences.  It is faster and more powerful than the other classical
wenzelm@42930
  1664
  reasoning tools, but has major limitations too.
wenzelm@42930
  1665
wenzelm@42930
  1666
  \begin{itemize}
wenzelm@42930
  1667
wenzelm@42930
  1668
  \item It does not use the classical wrapper tacticals, such as the
nipkow@44911
  1669
  integration with the Simplifier of @{method fastforce}.
wenzelm@42930
  1670
wenzelm@42930
  1671
  \item It does not perform higher-order unification, as needed by the
wenzelm@42930
  1672
  rule @{thm [source=false] rangeI} in HOL.  There are often
wenzelm@42930
  1673
  alternatives to such rules, for example @{thm [source=false]
wenzelm@42930
  1674
  range_eqI}.
wenzelm@42930
  1675
wenzelm@42930
  1676
  \item Function variables may only be applied to parameters of the
wenzelm@42930
  1677
  subgoal.  (This restriction arises because the prover does not use
wenzelm@42930
  1678
  higher-order unification.)  If other function variables are present
wenzelm@42930
  1679
  then the prover will fail with the message \texttt{Function Var's
wenzelm@42930
  1680
  argument not a bound variable}.
wenzelm@42930
  1681
wenzelm@42930
  1682
  \item Its proof strategy is more general than @{method fast} but can
wenzelm@42930
  1683
  be slower.  If @{method blast} fails or seems to be running forever,
wenzelm@42930
  1684
  try @{method fast} and the other proof tools described below.
wenzelm@42930
  1685
wenzelm@42930
  1686
  \end{itemize}
wenzelm@42930
  1687
wenzelm@42930
  1688
  The optional integer argument specifies a bound for the number of
wenzelm@42930
  1689
  unsafe steps used in a proof.  By default, @{method blast} starts
wenzelm@42930
  1690
  with a bound of 0 and increases it successively to 20.  In contrast,
wenzelm@42930
  1691
  @{text "(blast lim)"} tries to prove the goal using a search bound
wenzelm@42930
  1692
  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
wenzelm@42930
  1693
  be made much faster by supplying the successful search bound to this
wenzelm@42930
  1694
  proof method instead.
wenzelm@42930
  1695
wenzelm@42930
  1696
  \item @{method auto} combines classical reasoning with
wenzelm@42930
  1697
  simplification.  It is intended for situations where there are a lot
wenzelm@42930
  1698
  of mostly trivial subgoals; it proves all the easy ones, leaving the
wenzelm@42930
  1699
  ones it cannot prove.  Occasionally, attempting to prove the hard
wenzelm@42930
  1700
  ones may take a long time.
wenzelm@42930
  1701
wenzelm@43332
  1702
  The optional depth arguments in @{text "(auto m n)"} refer to its
wenzelm@43332
  1703
  builtin classical reasoning procedures: @{text m} (default 4) is for
wenzelm@43332
  1704
  @{method blast}, which is tried first, and @{text n} (default 2) is
wenzelm@43332
  1705
  for a slower but more general alternative that also takes wrappers
wenzelm@43332
  1706
  into account.
wenzelm@42930
  1707
wenzelm@42930
  1708
  \item @{method force} is intended to prove the first subgoal
wenzelm@42930
  1709
  completely, using many fancy proof tools and performing a rather
wenzelm@42930
  1710
  exhaustive search.  As a result, proof attempts may take rather long
wenzelm@42930
  1711
  or diverge easily.
wenzelm@42930
  1712
wenzelm@42930
  1713
  \item @{method fast}, @{method best}, @{method slow} attempt to
wenzelm@42930
  1714
  prove the first subgoal using sequent-style reasoning as explained
wenzelm@42930
  1715
  before.  Unlike @{method blast}, they construct proofs directly in
wenzelm@42930
  1716
  Isabelle.
wenzelm@26782
  1717
wenzelm@42930
  1718
  There is a difference in search strategy and back-tracking: @{method
wenzelm@42930
  1719
  fast} uses depth-first search and @{method best} uses best-first
wenzelm@42930
  1720
  search (guided by a heuristic function: normally the total size of
wenzelm@42930
  1721
  the proof state).
wenzelm@42930
  1722
wenzelm@42930
  1723
  Method @{method slow} is like @{method fast}, but conducts a broader
wenzelm@42930
  1724
  search: it may, when backtracking from a failed proof attempt, undo
wenzelm@42930
  1725
  even the step of proving a subgoal by assumption.
wenzelm@42930
  1726
wenzelm@47967
  1727
  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
wenzelm@47967
  1728
  are like @{method fast}, @{method slow}, @{method best},
wenzelm@47967
  1729
  respectively, but use the Simplifier as additional wrapper. The name
wenzelm@47967
  1730
  @{method fastforce}, reflects the behaviour of this popular method
wenzelm@47967
  1731
  better without requiring an understanding of its implementation.
wenzelm@42930
  1732
wenzelm@43367
  1733
  \item @{method deepen} works by exhaustive search up to a certain
wenzelm@43367
  1734
  depth.  The start depth is 4 (unless specified explicitly), and the
wenzelm@43367
  1735
  depth is increased iteratively up to 10.  Unsafe rules are modified
wenzelm@43367
  1736
  to preserve the formula they act on, so that it be used repeatedly.
wenzelm@43367
  1737
  This method can prove more goals than @{method fast}, but is much
wenzelm@43367
  1738
  slower, for example if the assumptions have many universal
wenzelm@43367
  1739
  quantifiers.
wenzelm@43367
  1740
wenzelm@42930
  1741
  \end{description}
wenzelm@42930
  1742
wenzelm@42930
  1743
  Any of the above methods support additional modifiers of the context
wenzelm@42930
  1744
  of classical (and simplifier) rules, but the ones related to the
wenzelm@42930
  1745
  Simplifier are explicitly prefixed by @{text simp} here.  The
wenzelm@42930
  1746
  semantics of these ad-hoc rule declarations is analogous to the
wenzelm@42930
  1747
  attributes given before.  Facts provided by forward chaining are
wenzelm@42930
  1748
  inserted into the goal before commencing proof search.
wenzelm@42930
  1749
*}
wenzelm@42930
  1750
wenzelm@42930
  1751
wenzelm@50070
  1752
subsection {* Partially automated methods *}
wenzelm@42930
  1753
wenzelm@42930
  1754
text {* These proof methods may help in situations when the
wenzelm@42930
  1755
  fully-automated tools fail.  The result is a simpler subgoal that
wenzelm@42930
  1756
  can be tackled by other means, such as by manual instantiation of
wenzelm@42930
  1757
  quantifiers.
wenzelm@42930
  1758
wenzelm@42930
  1759
  \begin{matharray}{rcl}
wenzelm@42930
  1760
    @{method_def safe} & : & @{text method} \\
wenzelm@42930
  1761
    @{method_def clarify} & : & @{text method} \\
wenzelm@42930
  1762
    @{method_def clarsimp} & : & @{text method} \\
wenzelm@42930
  1763
  \end{matharray}
wenzelm@42930
  1764
wenzelm@55112
  1765
  @{rail \<open>
wenzelm@42930
  1766
    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
wenzelm@42930
  1767
    ;
wenzelm@42930
  1768
    @@{method clarsimp} (@{syntax clasimpmod} * )
wenzelm@55112
  1769
  \<close>}
wenzelm@42930
  1770
wenzelm@42930
  1771
  \begin{description}
wenzelm@42930
  1772
wenzelm@42930
  1773
  \item @{method safe} repeatedly performs safe steps on all subgoals.
wenzelm@42930
  1774
  It is deterministic, with at most one outcome.
wenzelm@42930
  1775
wenzelm@43366
  1776
  \item @{method clarify} performs a series of safe steps without
wenzelm@50108
  1777
  splitting subgoals; see also @{method clarify_step}.
wenzelm@42930
  1778
wenzelm@42930
  1779
  \item @{method clarsimp} acts like @{method clarify}, but also does
wenzelm@42930
  1780
  simplification.  Note that if the Simplifier context includes a
wenzelm@42930
  1781
  splitter for the premises, the subgoal may still be split.
wenzelm@26782
  1782
wenzelm@28760
  1783
  \end{description}
wenzelm@26782
  1784
*}
wenzelm@26782
  1785
wenzelm@26782
  1786
wenzelm@43366
  1787
subsection {* Single-step tactics *}
wenzelm@43366
  1788
wenzelm@43366
  1789
text {*
wenzelm@50108
  1790
  \begin{matharray}{rcl}
wenzelm@50108
  1791
    @{method_def safe_step} & : & @{text method} \\
wenzelm@50108
  1792
    @{method_def inst_step} & : & @{text method} \\
wenzelm@50108
  1793
    @{method_def step} & : & @{text method} \\
wenzelm@50108
  1794
    @{method_def slow_step} & : & @{text method} \\
wenzelm@50108
  1795
    @{method_def clarify_step} & : &  @{text method} \\
wenzelm@50108
  1796
  \end{matharray}
wenzelm@43366
  1797
wenzelm@50070
  1798
  These are the primitive tactics behind the automated proof methods
wenzelm@50070
  1799
  of the Classical Reasoner.  By calling them yourself, you can
wenzelm@50070
  1800
  execute these procedures one step at a time.
wenzelm@43366
  1801
wenzelm@43366
  1802
  \begin{description}
wenzelm@43366
  1803
wenzelm@50108
  1804
  \item @{method safe_step} performs a safe step on the first subgoal.
wenzelm@50108
  1805
  The safe wrapper tacticals are applied to a tactic that may include
wenzelm@50108
  1806
  proof by assumption or Modus Ponens (taking care not to instantiate
wenzelm@50108
  1807
  unknowns), or substitution.
wenzelm@43366
  1808
wenzelm@50108
  1809
  \item @{method inst_step} is like @{method safe_step}, but allows
wenzelm@43366
  1810
  unknowns to be instantiated.
wenzelm@43366
  1811
wenzelm@50108
  1812
  \item @{method step} is the basic step of the proof procedure, it
wenzelm@50108
  1813
  operates on the first subgoal.  The unsafe wrapper tacticals are
wenzelm@50108
  1814
  applied to a tactic that tries @{method safe}, @{method inst_step},
wenzelm@50108
  1815
  or applies an unsafe rule from the context.
wenzelm@43366
  1816
wenzelm@50108
  1817
  \item @{method slow_step} resembles @{method step}, but allows
wenzelm@50108
  1818
  backtracking between using safe rules with instantiation (@{method
wenzelm@50108
  1819
  inst_step}) and using unsafe rules.  The resulting search space is
wenzelm@50108
  1820
  larger.
wenzelm@43366
  1821
wenzelm@50108
  1822
  \item @{method clarify_step} performs a safe step on the first
wenzelm@50108
  1823
  subgoal; no splitting step is applied.  For example, the subgoal
wenzelm@50108
  1824
  @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
wenzelm@50108
  1825
  Modus Ponens, etc., may be performed provided they do not
wenzelm@50108
  1826
  instantiate unknowns.  Assumptions of the form @{text "x = t"} may
wenzelm@50108
  1827
  be eliminated.  The safe wrapper tactical is applied.
wenzelm@43366
  1828
wenzelm@43366
  1829
  \end{description}
wenzelm@43366
  1830
*}
wenzelm@43366
  1831
wenzelm@43366
  1832
wenzelm@50071
  1833
subsection {* Modifying the search step *}
wenzelm@50071
  1834
wenzelm@50071
  1835
text {*
wenzelm@50071
  1836
  \begin{mldecls}
wenzelm@50071
  1837
    @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
wenzelm@51703
  1838
    @{index_ML_op addSWrapper: "Proof.context *
wenzelm@51703
  1839
  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
wenzelm@51703
  1840
    @{index_ML_op addSbefore: "Proof.context *
wenzelm@51717
  1841
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1842
    @{index_ML_op addSafter: "Proof.context *
wenzelm@51717
  1843
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1844
    @{index_ML_op delSWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
wenzelm@51703
  1845
    @{index_ML_op addWrapper: "Proof.context *
wenzelm@51703
  1846
  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
wenzelm@51703
  1847
    @{index_ML_op addbefore: "Proof.context *
wenzelm@51717
  1848
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1849
    @{index_ML_op addafter: "Proof.context *
wenzelm@51717
  1850
  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
wenzelm@51703
  1851
    @{index_ML_op delWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
wenzelm@50071
  1852
    @{index_ML addSss: "Proof.context -> Proof.context"} \\
wenzelm@50071
  1853
    @{index_ML addss: "Proof.context -> Proof.context"} \\
wenzelm@50071
  1854
  \end{mldecls}
wenzelm@50071
  1855
wenzelm@50071
  1856
  The proof strategy of the Classical Reasoner is simple.  Perform as
wenzelm@50071
  1857
  many safe inferences as possible; or else, apply certain safe rules,
wenzelm@50071
  1858
  allowing instantiation of unknowns; or else, apply an unsafe rule.
wenzelm@50071
  1859
  The tactics also eliminate assumptions of the form @{text "x = t"}
wenzelm@50071
  1860
  by substitution if they have been set up to do so.  They may perform
wenzelm@50071
  1861
  a form of Modus Ponens: if there are assumptions @{text "P \<longrightarrow> Q"} and
wenzelm@50071
  1862
  @{text "P"}, then replace @{text "P \<longrightarrow> Q"} by @{text "Q"}.
wenzelm@50071
  1863
wenzelm@50071
  1864
  The classical reasoning tools --- except @{method blast} --- allow
wenzelm@50071
  1865
  to modify this basic proof strategy by applying two lists of
wenzelm@50071
  1866
  arbitrary \emph{wrapper tacticals} to it.  The first wrapper list,
wenzelm@50108
  1867
  which is considered to contain safe wrappers only, affects @{method
wenzelm@50108
  1868
  safe_step} and all the tactics that call it.  The second one, which
wenzelm@50108
  1869
  may contain unsafe wrappers, affects the unsafe parts of @{method
wenzelm@50108
  1870
  step}, @{method slow_step}, and the tactics that call them.  A
wenzelm@50071
  1871
  wrapper transforms each step of the search, for example by
wenzelm@50071
  1872
  attempting other tactics before or after the original step tactic.
wenzelm@50071
  1873
  All members of a wrapper list are applied in turn to the respective
wenzelm@50071
  1874
  step tactic.
wenzelm@50071
  1875
wenzelm@50071
  1876
  Initially the two wrapper lists are empty, which means no
wenzelm@50071
  1877
  modification of the step tactics. Safe and unsafe wrappers are added
wenzelm@50071
  1878
  to a claset with the functions given below, supplying them with
wenzelm@50071
  1879
  wrapper names.  These names may be used to selectively delete
wenzelm@50071
  1880
  wrappers.
wenzelm@50071
  1881
wenzelm@50071
  1882
  \begin{description}
wenzelm@50071
  1883
wenzelm@51703
  1884
  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
wenzelm@50071
  1885
  which should yield a safe tactic, to modify the existing safe step
wenzelm@50071
  1886
  tactic.
wenzelm@50071
  1887
wenzelm@51703
  1888
  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
wenzelm@50071
  1889
  safe wrapper, such that it is tried \emph{before} each safe step of
wenzelm@50071
  1890
  the search.
wenzelm@50071
  1891
wenzelm@51703
  1892
  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
wenzelm@50071
  1893
  safe wrapper, such that it is tried when a safe step of the search
wenzelm@50071
  1894
  would fail.
wenzelm@50071
  1895
wenzelm@51703
  1896
  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
wenzelm@50071
  1897
  the given name.
wenzelm@50071
  1898
wenzelm@51703
  1899
  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
wenzelm@50071
  1900
  modify the existing (unsafe) step tactic.
wenzelm@50071
  1901
wenzelm@51703
  1902
  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
wenzelm@50071
  1903
  unsafe wrapper, such that it its result is concatenated
wenzelm@50071
  1904
  \emph{before} the result of each unsafe step.
wenzelm@50071
  1905
wenzelm@51703
  1906
  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
wenzelm@50071
  1907
  unsafe wrapper, such that it its result is concatenated \emph{after}
wenzelm@50071
  1908
  the result of each unsafe step.
wenzelm@50071
  1909
wenzelm@51703
  1910
  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
wenzelm@50071
  1911
  the given name.
wenzelm@50071
  1912
wenzelm@50071
  1913
  \item @{text "addSss"} adds the simpset of the context to its
wenzelm@50071
  1914
  classical set. The assumptions and goal will be simplified, in a
wenzelm@50071
  1915
  rather safe way, after each safe step of the search.
wenzelm@50071
  1916
wenzelm@50071
  1917
  \item @{text "addss"} adds the simpset of the context to its
wenzelm@50071
  1918
  classical set. The assumptions and goal will be simplified, before
wenzelm@50071
  1919
  the each unsafe step of the search.
wenzelm@50071
  1920
wenzelm@50071
  1921
  \end{description}
wenzelm@50071
  1922
*}
wenzelm@50071
  1923
wenzelm@50071
  1924
wenzelm@27044
  1925
section {* Object-logic setup \label{sec:object-logic} *}
wenzelm@26790
  1926
wenzelm@26790
  1927
text {*
wenzelm@26790
  1928
  \begin{matharray}{rcl}
wenzelm@28761
  1929
    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
wenzelm@28761
  1930
    @{method_def atomize} & : & @{text method} \\
wenzelm@28761
  1931
    @{attribute_def atomize} & : & @{text attribute} \\
wenzelm@28761
  1932
    @{attribute_def rule_format} & : & @{text attribute} \\
wenzelm@28761
  1933
    @{attribute_def rulify} & : & @{text attribute} \\
wenzelm@26790
  1934
  \end{matharray}
wenzelm@26790
  1935
wenzelm@26790
  1936
  The very starting point for any Isabelle object-logic is a ``truth
wenzelm@26790
  1937
  judgment'' that links object-level statements to the meta-logic
wenzelm@26790
  1938
  (with its minimal language of @{text prop} that covers universal
wenzelm@26790
  1939
  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
wenzelm@26790
  1940
wenzelm@26790
  1941
  Common object-logics are sufficiently expressive to internalize rule
wenzelm@26790
  1942
  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
wenzelm@26790
  1943
  language.  This is useful in certain situations where a rule needs
wenzelm@26790
  1944
  to be viewed as an atomic statement from the meta-level perspective,
wenzelm@26790
  1945
  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
wenzelm@26790
  1946
wenzelm@26790
  1947
  From the following language elements, only the @{method atomize}
wenzelm@26790
  1948
  method and @{attribute rule_format} attribute are occasionally
wenzelm@26790
  1949
  required by end-users, the rest is for those who need to setup their
wenzelm@26790
  1950
  own object-logic.  In the latter case existing formulations of
wenzelm@26790
  1951
  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
wenzelm@26790
  1952
wenzelm@26790
  1953
  Generic tools may refer to the information provided by object-logic
wenzelm@26790
  1954
  declarations internally.
wenzelm@26790
  1955
wenzelm@55112
  1956
  @{rail \<open>
wenzelm@46494
  1957
    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
wenzelm@26790
  1958
    ;
wenzelm@42596
  1959
    @@{attribute atomize} ('(' 'full' ')')?
wenzelm@26790
  1960
    ;
wenzelm@42596
  1961
    @@{attribute rule_format} ('(' 'noasm' ')')?
wenzelm@55112
  1962
  \<close>}
wenzelm@26790
  1963
wenzelm@28760
  1964
  \begin{description}
wenzelm@26790
  1965
  
wenzelm@28760
  1966
  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
wenzelm@28760
  1967
  @{text c} as the truth judgment of the current object-logic.  Its
wenzelm@28760
  1968
  type @{text \<sigma>} should specify a coercion of the category of
wenzelm@28760
  1969
  object-level propositions to @{text prop} of the Pure meta-logic;
wenzelm@28760
  1970
  the mixfix annotation @{text "(mx)"} would typically just link the
wenzelm@28760
  1971
  object language (internally of syntactic category @{text logic})
wenzelm@28760
  1972
  with that of @{text prop}.  Only one @{command "judgment"}
wenzelm@28760
  1973
  declaration may be given in any theory development.
wenzelm@26790
  1974
  
wenzelm@28760
  1975
  \item @{method atomize} (as a method) rewrites any non-atomic
wenzelm@26790
  1976
  premises of a sub-goal, using the meta-level equations declared via
wenzelm@26790
  1977
  @{attribute atomize} (as an attribute) beforehand.  As a result,
wenzelm@26790
  1978
  heavily nested goals become amenable to fundamental operations such
wenzelm@42626
  1979
  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
wenzelm@26790
  1980
  "(full)"}'' option here means to turn the whole subgoal into an
wenzelm@26790
  1981
  object-statement (if possible), including the outermost parameters
wenzelm@26790
  1982
  and assumptions as well.
wenzelm@26790
  1983
wenzelm@26790
  1984
  A typical collection of @{attribute atomize} rules for a particular
wenzelm@26790
  1985
  object-logic would provide an internalization for each of the
wenzelm@26790
  1986
  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
wenzelm@26790
  1987
  Meta-level conjunction should be covered as well (this is
wenzelm@26790
  1988
  particularly important for locales, see \secref{sec:locale}).
wenzelm@26790
  1989
wenzelm@28760
  1990
  \item @{attribute rule_format} rewrites a theorem by the equalities
wenzelm@28760
  1991
  declared as @{attribute rulify} rules in the current object-logic.
wenzelm@28760
  1992
  By default, the result is fully normalized, including assumptions
wenzelm@28760
  1993
  and conclusions at any depth.  The @{text "(no_asm)"} option
wenzelm@28760
  1994
  restricts the transformation to the conclusion of a rule.
wenzelm@26790
  1995
wenzelm@26790
  1996
  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
wenzelm@26790
  1997
  rule_format} is to replace (bounded) universal quantification
wenzelm@26790
  1998
  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
wenzelm@26790
  1999
  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
wenzelm@26790
  2000
wenzelm@28760
  2001
  \end{description}
wenzelm@26790
  2002
*}
wenzelm@26790
  2003
wenzelm@50083
  2004
wenzelm@50083
  2005
section {* Tracing higher-order unification *}
wenzelm@50083
  2006
wenzelm@50083
  2007
text {*
wenzelm@50083
  2008
  \begin{tabular}{rcll}
wenzelm@50083
  2009
    @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
wenzelm@50083
  2010
    @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
wenzelm@50083
  2011
    @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
wenzelm@50083
  2012
    @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
wenzelm@50083
  2013
  \end{tabular}
wenzelm@50083
  2014
  \medskip
wenzelm@50083
  2015
wenzelm@50083
  2016
  Higher-order unification works well in most practical situations,
wenzelm@50083
  2017
  but sometimes needs extra care to identify problems.  These tracing
wenzelm@50083
  2018
  options may help.
wenzelm@50083
  2019
wenzelm@50083
  2020
  \begin{description}
wenzelm@50083
  2021
wenzelm@50083
  2022
  \item @{attribute unify_trace_simp} controls tracing of the
wenzelm@50083
  2023
  simplification phase of higher-order unification.
wenzelm@50083
  2024
wenzelm@50083
  2025
  \item @{attribute unify_trace_types} controls warnings of
wenzelm@50083
  2026
  incompleteness, when unification is not considering all possible
wenzelm@50083
  2027
  instantiations of schematic type variables.
wenzelm@50083
  2028
wenzelm@50083
  2029
  \item @{attribute unify_trace_bound} determines the depth where
wenzelm@50083
  2030
  unification starts to print tracing information once it reaches
wenzelm@50083
  2031
  depth; 0 for full tracing.  At the default value, tracing
wenzelm@50083
  2032
  information is almost never printed in practice.
wenzelm@50083
  2033
wenzelm@50083
  2034
  \item @{attribute unify_search_bound} prevents unification from
wenzelm@50083
  2035
  searching past the given depth.  Because of this bound, higher-order
wenzelm@50083
  2036
  unification cannot return an infinite sequence, though it can return
wenzelm@50083
  2037
  an exponentially long one.  The search rarely approaches the default
wenzelm@50083
  2038
  value in practice.  If the search is cut off, unification prints a
wenzelm@50083
  2039
  warning ``Unification bound exceeded''.
wenzelm@50083
  2040
wenzelm@50083
  2041
  \end{description}
wenzelm@50083
  2042
wenzelm@50083
  2043
  \begin{warn}
wenzelm@50083
  2044
  Options for unification cannot be modified in a local context.  Only
wenzelm@50083
  2045
  the global theory content is taken into account.
wenzelm@50083
  2046
  \end{warn}
wenzelm@50083
  2047
*}
wenzelm@50083
  2048
wenzelm@26782
  2049
end