even more standardized doc session names after #b266e7a86485
authorhaftmann
Tue Apr 08 12:46:38 2014 +0200 (2014-04-08)
changeset 56451856492b0f755
parent 56450 16d4213d4cbc
child 56452 0c98c9118407
even more standardized doc session names after #b266e7a86485
src/Doc/Isar-Ref/Base.thy
src/Doc/Isar-Ref/Document_Preparation.thy
src/Doc/Isar-Ref/First_Order_Logic.thy
src/Doc/Isar-Ref/Framework.thy
src/Doc/Isar-Ref/Generic.thy
src/Doc/Isar-Ref/HOL_Specific.thy
src/Doc/Isar-Ref/Inner_Syntax.thy
src/Doc/Isar-Ref/ML_Tactic.thy
src/Doc/Isar-Ref/Misc.thy
src/Doc/Isar-Ref/Outer_Syntax.thy
src/Doc/Isar-Ref/Preface.thy
src/Doc/Isar-Ref/Proof.thy
src/Doc/Isar-Ref/Quick_Reference.thy
src/Doc/Isar-Ref/Spec.thy
src/Doc/Isar-Ref/Symbols.thy
src/Doc/Isar-Ref/Synopsis.thy
src/Doc/Isar-Ref/document/build
src/Doc/Isar-Ref/document/isar-vm.pdf
src/Doc/Isar-Ref/document/isar-vm.svg
src/Doc/Isar-Ref/document/root.tex
src/Doc/Isar-Ref/document/showsymbols
src/Doc/Isar-Ref/document/style.sty
src/Doc/Isar_Ref/Base.thy
src/Doc/Isar_Ref/Document_Preparation.thy
src/Doc/Isar_Ref/First_Order_Logic.thy
src/Doc/Isar_Ref/Framework.thy
src/Doc/Isar_Ref/Generic.thy
src/Doc/Isar_Ref/HOL_Specific.thy
src/Doc/Isar_Ref/Inner_Syntax.thy
src/Doc/Isar_Ref/ML_Tactic.thy
src/Doc/Isar_Ref/Misc.thy
src/Doc/Isar_Ref/Outer_Syntax.thy
src/Doc/Isar_Ref/Preface.thy
src/Doc/Isar_Ref/Proof.thy
src/Doc/Isar_Ref/Quick_Reference.thy
src/Doc/Isar_Ref/Spec.thy
src/Doc/Isar_Ref/Symbols.thy
src/Doc/Isar_Ref/Synopsis.thy
src/Doc/Isar_Ref/document/build
src/Doc/Isar_Ref/document/isar-vm.pdf
src/Doc/Isar_Ref/document/isar-vm.svg
src/Doc/Isar_Ref/document/root.tex
src/Doc/Isar_Ref/document/showsymbols
src/Doc/Isar_Ref/document/style.sty
src/Doc/JEdit/document/build
src/Doc/Logics-ZF/FOL_examples.thy
src/Doc/Logics-ZF/IFOL_examples.thy
src/Doc/Logics-ZF/If.thy
src/Doc/Logics-ZF/ZF_Isar.thy
src/Doc/Logics-ZF/ZF_examples.thy
src/Doc/Logics-ZF/document/FOL.tex
src/Doc/Logics-ZF/document/ZF.tex
src/Doc/Logics-ZF/document/build
src/Doc/Logics-ZF/document/logics.sty
src/Doc/Logics-ZF/document/root.tex
src/Doc/Logics_ZF/FOL_examples.thy
src/Doc/Logics_ZF/IFOL_examples.thy
src/Doc/Logics_ZF/If.thy
src/Doc/Logics_ZF/ZF_Isar.thy
src/Doc/Logics_ZF/ZF_examples.thy
src/Doc/Logics_ZF/document/FOL.tex
src/Doc/Logics_ZF/document/ZF.tex
src/Doc/Logics_ZF/document/build
src/Doc/Logics_ZF/document/logics.sty
src/Doc/Logics_ZF/document/root.tex
src/Doc/Prog-Prove/Basics.thy
src/Doc/Prog-Prove/Bool_nat_list.thy
src/Doc/Prog-Prove/Isar.thy
src/Doc/Prog-Prove/LaTeXsugar.thy
src/Doc/Prog-Prove/Logic.thy
src/Doc/Prog-Prove/MyList.thy
src/Doc/Prog-Prove/Types_and_funs.thy
src/Doc/Prog-Prove/document/bang.pdf
src/Doc/Prog-Prove/document/build
src/Doc/Prog-Prove/document/intro-isabelle.tex
src/Doc/Prog-Prove/document/mathpartir.sty
src/Doc/Prog-Prove/document/prelude.tex
src/Doc/Prog-Prove/document/root.bib
src/Doc/Prog-Prove/document/root.tex
src/Doc/Prog-Prove/document/svmono.cls
src/Doc/Prog_Prove/Basics.thy
src/Doc/Prog_Prove/Bool_nat_list.thy
src/Doc/Prog_Prove/Isar.thy
src/Doc/Prog_Prove/LaTeXsugar.thy
src/Doc/Prog_Prove/Logic.thy
src/Doc/Prog_Prove/MyList.thy
src/Doc/Prog_Prove/Types_and_funs.thy
src/Doc/Prog_Prove/document/bang.pdf
src/Doc/Prog_Prove/document/build
src/Doc/Prog_Prove/document/intro-isabelle.tex
src/Doc/Prog_Prove/document/mathpartir.sty
src/Doc/Prog_Prove/document/prelude.tex
src/Doc/Prog_Prove/document/root.bib
src/Doc/Prog_Prove/document/root.tex
src/Doc/Prog_Prove/document/svmono.cls
src/Doc/ROOT
src/Doc/System/document/build
     1.1 --- a/src/Doc/Isar-Ref/Base.thy	Mon Apr 07 16:37:57 2014 +0200
     1.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.3 @@ -1,7 +0,0 @@
     1.4 -theory Base
     1.5 -imports Pure
     1.6 -begin
     1.7 -
     1.8 -ML_file "../antiquote_setup.ML"
     1.9 -
    1.10 -end
     2.1 --- a/src/Doc/Isar-Ref/Document_Preparation.thy	Mon Apr 07 16:37:57 2014 +0200
     2.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.3 @@ -1,589 +0,0 @@
     2.4 -theory Document_Preparation
     2.5 -imports Base Main
     2.6 -begin
     2.7 -
     2.8 -chapter {* Document preparation \label{ch:document-prep} *}
     2.9 -
    2.10 -text {* Isabelle/Isar provides a simple document preparation system
    2.11 -  based on {PDF-\LaTeX}, with support for hyperlinks and bookmarks
    2.12 -  within that format.  This allows to produce papers, books, theses
    2.13 -  etc.\ from Isabelle theory sources.
    2.14 -
    2.15 -  {\LaTeX} output is generated while processing a \emph{session} in
    2.16 -  batch mode, as explained in the \emph{The Isabelle System Manual}
    2.17 -  \cite{isabelle-sys}.  The main Isabelle tools to get started with
    2.18 -  document preparation are @{tool_ref mkroot} and @{tool_ref build}.
    2.19 -
    2.20 -  The classic Isabelle/HOL tutorial \cite{isabelle-hol-book} also
    2.21 -  explains some aspects of theory presentation.  *}
    2.22 -
    2.23 -
    2.24 -section {* Markup commands \label{sec:markup} *}
    2.25 -
    2.26 -text {*
    2.27 -  \begin{matharray}{rcl}
    2.28 -    @{command_def "header"} & : & @{text "toplevel \<rightarrow> toplevel"} \\[0.5ex]
    2.29 -    @{command_def "chapter"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    2.30 -    @{command_def "section"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    2.31 -    @{command_def "subsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    2.32 -    @{command_def "subsubsection"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    2.33 -    @{command_def "text"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    2.34 -    @{command_def "text_raw"} & : & @{text "local_theory \<rightarrow> local_theory"} \\[0.5ex]
    2.35 -    @{command_def "sect"} & : & @{text "proof \<rightarrow> proof"} \\
    2.36 -    @{command_def "subsect"} & : & @{text "proof \<rightarrow> proof"} \\
    2.37 -    @{command_def "subsubsect"} & : & @{text "proof \<rightarrow> proof"} \\
    2.38 -    @{command_def "txt"} & : & @{text "proof \<rightarrow> proof"} \\
    2.39 -    @{command_def "txt_raw"} & : & @{text "proof \<rightarrow> proof"} \\
    2.40 -  \end{matharray}
    2.41 -
    2.42 -  Markup commands provide a structured way to insert text into the
    2.43 -  document generated from a theory.  Each markup command takes a
    2.44 -  single @{syntax text} argument, which is passed as argument to a
    2.45 -  corresponding {\LaTeX} macro.  The default macros provided by
    2.46 -  @{file "~~/lib/texinputs/isabelle.sty"} can be redefined according
    2.47 -  to the needs of the underlying document and {\LaTeX} styles.
    2.48 -
    2.49 -  Note that formal comments (\secref{sec:comments}) are similar to
    2.50 -  markup commands, but have a different status within Isabelle/Isar
    2.51 -  syntax.
    2.52 -
    2.53 -  @{rail \<open>
    2.54 -    (@@{command chapter} | @@{command section} | @@{command subsection} |
    2.55 -      @@{command subsubsection} | @@{command text}) @{syntax target}? @{syntax text}
    2.56 -    ;
    2.57 -    (@@{command header} | @@{command text_raw} | @@{command sect} | @@{command subsect} |
    2.58 -      @@{command subsubsect} | @@{command txt} | @@{command txt_raw}) @{syntax text}
    2.59 -  \<close>}
    2.60 -
    2.61 -  \begin{description}
    2.62 -
    2.63 -  \item @{command header} provides plain text markup just preceding
    2.64 -  the formal beginning of a theory.  The corresponding {\LaTeX} macro
    2.65 -  is @{verbatim "\\isamarkupheader"}, which acts like @{command
    2.66 -  section} by default.
    2.67 -  
    2.68 -  \item @{command chapter}, @{command section}, @{command subsection},
    2.69 -  and @{command subsubsection} mark chapter and section headings
    2.70 -  within the main theory body or local theory targets.  The
    2.71 -  corresponding {\LaTeX} macros are @{verbatim "\\isamarkupchapter"},
    2.72 -  @{verbatim "\\isamarkupsection"}, @{verbatim
    2.73 -  "\\isamarkupsubsection"} etc.
    2.74 -
    2.75 -  \item @{command sect}, @{command subsect}, and @{command subsubsect}
    2.76 -  mark section headings within proofs.  The corresponding {\LaTeX}
    2.77 -  macros are @{verbatim "\\isamarkupsect"}, @{verbatim
    2.78 -  "\\isamarkupsubsect"} etc.
    2.79 -
    2.80 -  \item @{command text} and @{command txt} specify paragraphs of plain
    2.81 -  text.  This corresponds to a {\LaTeX} environment @{verbatim
    2.82 -  "\\begin{isamarkuptext}"} @{text "\<dots>"} @{verbatim
    2.83 -  "\\end{isamarkuptext}"} etc.
    2.84 -
    2.85 -  \item @{command text_raw} and @{command txt_raw} insert {\LaTeX}
    2.86 -  source into the output, without additional markup.  Thus the full
    2.87 -  range of document manipulations becomes available, at the risk of
    2.88 -  messing up document output.
    2.89 -
    2.90 -  \end{description}
    2.91 -
    2.92 -  Except for @{command "text_raw"} and @{command "txt_raw"}, the text
    2.93 -  passed to any of the above markup commands may refer to formal
    2.94 -  entities via \emph{document antiquotations}, see also
    2.95 -  \secref{sec:antiq}.  These are interpreted in the present theory or
    2.96 -  proof context, or the named @{text "target"}.
    2.97 -
    2.98 -  \medskip The proof markup commands closely resemble those for theory
    2.99 -  specifications, but have a different formal status and produce
   2.100 -  different {\LaTeX} macros.  The default definitions coincide for
   2.101 -  analogous commands such as @{command section} and @{command sect}.
   2.102 -*}
   2.103 -
   2.104 -
   2.105 -section {* Document Antiquotations \label{sec:antiq} *}
   2.106 -
   2.107 -text {*
   2.108 -  \begin{matharray}{rcl}
   2.109 -    @{antiquotation_def "theory"} & : & @{text antiquotation} \\
   2.110 -    @{antiquotation_def "thm"} & : & @{text antiquotation} \\
   2.111 -    @{antiquotation_def "lemma"} & : & @{text antiquotation} \\
   2.112 -    @{antiquotation_def "prop"} & : & @{text antiquotation} \\
   2.113 -    @{antiquotation_def "term"} & : & @{text antiquotation} \\
   2.114 -    @{antiquotation_def term_type} & : & @{text antiquotation} \\
   2.115 -    @{antiquotation_def typeof} & : & @{text antiquotation} \\
   2.116 -    @{antiquotation_def const} & : & @{text antiquotation} \\
   2.117 -    @{antiquotation_def abbrev} & : & @{text antiquotation} \\
   2.118 -    @{antiquotation_def typ} & : & @{text antiquotation} \\
   2.119 -    @{antiquotation_def type} & : & @{text antiquotation} \\
   2.120 -    @{antiquotation_def class} & : & @{text antiquotation} \\
   2.121 -    @{antiquotation_def "text"} & : & @{text antiquotation} \\
   2.122 -    @{antiquotation_def goals} & : & @{text antiquotation} \\
   2.123 -    @{antiquotation_def subgoals} & : & @{text antiquotation} \\
   2.124 -    @{antiquotation_def prf} & : & @{text antiquotation} \\
   2.125 -    @{antiquotation_def full_prf} & : & @{text antiquotation} \\
   2.126 -    @{antiquotation_def ML} & : & @{text antiquotation} \\
   2.127 -    @{antiquotation_def ML_op} & : & @{text antiquotation} \\
   2.128 -    @{antiquotation_def ML_type} & : & @{text antiquotation} \\
   2.129 -    @{antiquotation_def ML_structure} & : & @{text antiquotation} \\
   2.130 -    @{antiquotation_def ML_functor} & : & @{text antiquotation} \\
   2.131 -    @{antiquotation_def "file"} & : & @{text antiquotation} \\
   2.132 -    @{antiquotation_def "url"} & : & @{text antiquotation} \\
   2.133 -  \end{matharray}
   2.134 -
   2.135 -  The overall content of an Isabelle/Isar theory may alternate between
   2.136 -  formal and informal text.  The main body consists of formal
   2.137 -  specification and proof commands, interspersed with markup commands
   2.138 -  (\secref{sec:markup}) or document comments (\secref{sec:comments}).
   2.139 -  The argument of markup commands quotes informal text to be printed
   2.140 -  in the resulting document, but may again refer to formal entities
   2.141 -  via \emph{document antiquotations}.
   2.142 -
   2.143 -  For example, embedding of ``@{text [source=false] "@{term [show_types] \"f x = a + x\"}"}''
   2.144 -  within a text block makes
   2.145 -  \isa{{\isacharparenleft}f{\isasymColon}{\isacharprime}a\ {\isasymRightarrow}\ {\isacharprime}a{\isacharparenright}\ {\isacharparenleft}x{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharequal}\ {\isacharparenleft}a{\isasymColon}{\isacharprime}a{\isacharparenright}\ {\isacharplus}\ x} appear in the final {\LaTeX} document.
   2.146 -
   2.147 -  Antiquotations usually spare the author tedious typing of logical
   2.148 -  entities in full detail.  Even more importantly, some degree of
   2.149 -  consistency-checking between the main body of formal text and its
   2.150 -  informal explanation is achieved, since terms and types appearing in
   2.151 -  antiquotations are checked within the current theory or proof
   2.152 -  context.
   2.153 -
   2.154 -  %% FIXME less monolithic presentation, move to individual sections!?
   2.155 -  @{rail \<open>
   2.156 -    '@{' antiquotation '}'
   2.157 -    ;
   2.158 -    @{syntax_def antiquotation}:
   2.159 -      @@{antiquotation theory} options @{syntax name} |
   2.160 -      @@{antiquotation thm} options styles @{syntax thmrefs} |
   2.161 -      @@{antiquotation lemma} options @{syntax prop} @'by' @{syntax method} @{syntax method}? |
   2.162 -      @@{antiquotation prop} options styles @{syntax prop} |
   2.163 -      @@{antiquotation term} options styles @{syntax term} |
   2.164 -      @@{antiquotation (HOL) value} options styles @{syntax term} |
   2.165 -      @@{antiquotation term_type} options styles @{syntax term} |
   2.166 -      @@{antiquotation typeof} options styles @{syntax term} |
   2.167 -      @@{antiquotation const} options @{syntax term} |
   2.168 -      @@{antiquotation abbrev} options @{syntax term} |
   2.169 -      @@{antiquotation typ} options @{syntax type} |
   2.170 -      @@{antiquotation type} options @{syntax name} |
   2.171 -      @@{antiquotation class} options @{syntax name} |
   2.172 -      @@{antiquotation text} options @{syntax name}
   2.173 -    ;
   2.174 -    @{syntax antiquotation}:
   2.175 -      @@{antiquotation goals} options |
   2.176 -      @@{antiquotation subgoals} options |
   2.177 -      @@{antiquotation prf} options @{syntax thmrefs} |
   2.178 -      @@{antiquotation full_prf} options @{syntax thmrefs} |
   2.179 -      @@{antiquotation ML} options @{syntax name} |
   2.180 -      @@{antiquotation ML_op} options @{syntax name} |
   2.181 -      @@{antiquotation ML_type} options @{syntax name} |
   2.182 -      @@{antiquotation ML_structure} options @{syntax name} |
   2.183 -      @@{antiquotation ML_functor} options @{syntax name} |
   2.184 -      @@{antiquotation "file"} options @{syntax name} |
   2.185 -      @@{antiquotation file_unchecked} options @{syntax name} |
   2.186 -      @@{antiquotation url} options @{syntax name}
   2.187 -    ;
   2.188 -    options: '[' (option * ',') ']'
   2.189 -    ;
   2.190 -    option: @{syntax name} | @{syntax name} '=' @{syntax name}
   2.191 -    ;
   2.192 -    styles: '(' (style + ',') ')'
   2.193 -    ;
   2.194 -    style: (@{syntax name} +)
   2.195 -  \<close>}
   2.196 -
   2.197 -  Note that the syntax of antiquotations may \emph{not} include source
   2.198 -  comments @{verbatim "(*"}~@{text "\<dots>"}~@{verbatim "*)"} nor verbatim
   2.199 -  text @{verbatim "{"}@{verbatim "*"}~@{text "\<dots>"}~@{verbatim
   2.200 -  "*"}@{verbatim "}"}.
   2.201 -
   2.202 -  \begin{description}
   2.203 -  
   2.204 -  \item @{text "@{theory A}"} prints the name @{text "A"}, which is
   2.205 -  guaranteed to refer to a valid ancestor theory in the current
   2.206 -  context.
   2.207 -
   2.208 -  \item @{text "@{thm a\<^sub>1 \<dots> a\<^sub>n}"} prints theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}.
   2.209 -  Full fact expressions are allowed here, including attributes
   2.210 -  (\secref{sec:syn-att}).
   2.211 -
   2.212 -  \item @{text "@{prop \<phi>}"} prints a well-typed proposition @{text
   2.213 -  "\<phi>"}.
   2.214 -
   2.215 -  \item @{text "@{lemma \<phi> by m}"} proves a well-typed proposition
   2.216 -  @{text "\<phi>"} by method @{text m} and prints the original @{text "\<phi>"}.
   2.217 -
   2.218 -  \item @{text "@{term t}"} prints a well-typed term @{text "t"}.
   2.219 -  
   2.220 -  \item @{text "@{value t}"} evaluates a term @{text "t"} and prints
   2.221 -  its result, see also @{command_ref (HOL) value}.
   2.222 -
   2.223 -  \item @{text "@{term_type t}"} prints a well-typed term @{text "t"}
   2.224 -  annotated with its type.
   2.225 -
   2.226 -  \item @{text "@{typeof t}"} prints the type of a well-typed term
   2.227 -  @{text "t"}.
   2.228 -
   2.229 -  \item @{text "@{const c}"} prints a logical or syntactic constant
   2.230 -  @{text "c"}.
   2.231 -  
   2.232 -  \item @{text "@{abbrev c x\<^sub>1 \<dots> x\<^sub>n}"} prints a constant abbreviation
   2.233 -  @{text "c x\<^sub>1 \<dots> x\<^sub>n \<equiv> rhs"} as defined in the current context.
   2.234 -
   2.235 -  \item @{text "@{typ \<tau>}"} prints a well-formed type @{text "\<tau>"}.
   2.236 -
   2.237 -  \item @{text "@{type \<kappa>}"} prints a (logical or syntactic) type
   2.238 -    constructor @{text "\<kappa>"}.
   2.239 -
   2.240 -  \item @{text "@{class c}"} prints a class @{text c}.
   2.241 -
   2.242 -  \item @{text "@{text s}"} prints uninterpreted source text @{text
   2.243 -  s}.  This is particularly useful to print portions of text according
   2.244 -  to the Isabelle document style, without demanding well-formedness,
   2.245 -  e.g.\ small pieces of terms that should not be parsed or
   2.246 -  type-checked yet.
   2.247 -
   2.248 -  \item @{text "@{goals}"} prints the current \emph{dynamic} goal
   2.249 -  state.  This is mainly for support of tactic-emulation scripts
   2.250 -  within Isar.  Presentation of goal states does not conform to the
   2.251 -  idea of human-readable proof documents!
   2.252 -
   2.253 -  When explaining proofs in detail it is usually better to spell out
   2.254 -  the reasoning via proper Isar proof commands, instead of peeking at
   2.255 -  the internal machine configuration.
   2.256 -  
   2.257 -  \item @{text "@{subgoals}"} is similar to @{text "@{goals}"}, but
   2.258 -  does not print the main goal.
   2.259 -  
   2.260 -  \item @{text "@{prf a\<^sub>1 \<dots> a\<^sub>n}"} prints the (compact) proof terms
   2.261 -  corresponding to the theorems @{text "a\<^sub>1 \<dots> a\<^sub>n"}. Note that this
   2.262 -  requires proof terms to be switched on for the current logic
   2.263 -  session.
   2.264 -  
   2.265 -  \item @{text "@{full_prf a\<^sub>1 \<dots> a\<^sub>n}"} is like @{text "@{prf a\<^sub>1 \<dots>
   2.266 -  a\<^sub>n}"}, but prints the full proof terms, i.e.\ also displays
   2.267 -  information omitted in the compact proof term, which is denoted by
   2.268 -  ``@{text _}'' placeholders there.
   2.269 -  
   2.270 -  \item @{text "@{ML s}"}, @{text "@{ML_op s}"}, @{text "@{ML_type
   2.271 -  s}"}, @{text "@{ML_structure s}"}, and @{text "@{ML_functor s}"}
   2.272 -  check text @{text s} as ML value, infix operator, type, structure,
   2.273 -  and functor respectively.  The source is printed verbatim.
   2.274 -
   2.275 -  \item @{text "@{file path}"} checks that @{text "path"} refers to a
   2.276 -  file (or directory) and prints it verbatim.
   2.277 -
   2.278 -  \item @{text "@{file_unchecked path}"} is like @{text "@{file
   2.279 -  path}"}, but does not check the existence of the @{text "path"}
   2.280 -  within the file-system.
   2.281 -
   2.282 -  \item @{text "@{url name}"} produces markup for the given URL, which
   2.283 -  results in an active hyperlink within the text.
   2.284 -
   2.285 -  \end{description}
   2.286 -*}
   2.287 -
   2.288 -
   2.289 -subsection {* Styled antiquotations *}
   2.290 -
   2.291 -text {* The antiquotations @{text thm}, @{text prop} and @{text
   2.292 -  term} admit an extra \emph{style} specification to modify the
   2.293 -  printed result.  A style is specified by a name with a possibly
   2.294 -  empty number of arguments;  multiple styles can be sequenced with
   2.295 -  commas.  The following standard styles are available:
   2.296 -
   2.297 -  \begin{description}
   2.298 -  
   2.299 -  \item @{text lhs} extracts the first argument of any application
   2.300 -  form with at least two arguments --- typically meta-level or
   2.301 -  object-level equality, or any other binary relation.
   2.302 -  
   2.303 -  \item @{text rhs} is like @{text lhs}, but extracts the second
   2.304 -  argument.
   2.305 -  
   2.306 -  \item @{text "concl"} extracts the conclusion @{text C} from a rule
   2.307 -  in Horn-clause normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}.
   2.308 -  
   2.309 -  \item @{text "prem"} @{text n} extract premise number
   2.310 -  @{text "n"} from from a rule in Horn-clause
   2.311 -  normal form @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> C"}
   2.312 -
   2.313 -  \end{description}
   2.314 -*}
   2.315 -
   2.316 -
   2.317 -subsection {* General options *}
   2.318 -
   2.319 -text {* The following options are available to tune the printed output
   2.320 -  of antiquotations.  Note that many of these coincide with system and
   2.321 -  configuration options of the same names.
   2.322 -
   2.323 -  \begin{description}
   2.324 -
   2.325 -  \item @{antiquotation_option_def show_types}~@{text "= bool"} and
   2.326 -  @{antiquotation_option_def show_sorts}~@{text "= bool"} control
   2.327 -  printing of explicit type and sort constraints.
   2.328 -
   2.329 -  \item @{antiquotation_option_def show_structs}~@{text "= bool"}
   2.330 -  controls printing of implicit structures.
   2.331 -
   2.332 -  \item @{antiquotation_option_def show_abbrevs}~@{text "= bool"}
   2.333 -  controls folding of abbreviations.
   2.334 -
   2.335 -  \item @{antiquotation_option_def names_long}~@{text "= bool"} forces
   2.336 -  names of types and constants etc.\ to be printed in their fully
   2.337 -  qualified internal form.
   2.338 -
   2.339 -  \item @{antiquotation_option_def names_short}~@{text "= bool"}
   2.340 -  forces names of types and constants etc.\ to be printed unqualified.
   2.341 -  Note that internalizing the output again in the current context may
   2.342 -  well yield a different result.
   2.343 -
   2.344 -  \item @{antiquotation_option_def names_unique}~@{text "= bool"}
   2.345 -  determines whether the printed version of qualified names should be
   2.346 -  made sufficiently long to avoid overlap with names declared further
   2.347 -  back.  Set to @{text false} for more concise output.
   2.348 -
   2.349 -  \item @{antiquotation_option_def eta_contract}~@{text "= bool"}
   2.350 -  prints terms in @{text \<eta>}-contracted form.
   2.351 -
   2.352 -  \item @{antiquotation_option_def display}~@{text "= bool"} indicates
   2.353 -  if the text is to be output as multi-line ``display material'',
   2.354 -  rather than a small piece of text without line breaks (which is the
   2.355 -  default).
   2.356 -
   2.357 -  In this mode the embedded entities are printed in the same style as
   2.358 -  the main theory text.
   2.359 -
   2.360 -  \item @{antiquotation_option_def break}~@{text "= bool"} controls
   2.361 -  line breaks in non-display material.
   2.362 -
   2.363 -  \item @{antiquotation_option_def quotes}~@{text "= bool"} indicates
   2.364 -  if the output should be enclosed in double quotes.
   2.365 -
   2.366 -  \item @{antiquotation_option_def mode}~@{text "= name"} adds @{text
   2.367 -  name} to the print mode to be used for presentation.  Note that the
   2.368 -  standard setup for {\LaTeX} output is already present by default,
   2.369 -  including the modes @{text latex} and @{text xsymbols}.
   2.370 -
   2.371 -  \item @{antiquotation_option_def margin}~@{text "= nat"} and
   2.372 -  @{antiquotation_option_def indent}~@{text "= nat"} change the margin
   2.373 -  or indentation for pretty printing of display material.
   2.374 -
   2.375 -  \item @{antiquotation_option_def goals_limit}~@{text "= nat"}
   2.376 -  determines the maximum number of subgoals to be printed (for goal-based
   2.377 -  antiquotation).
   2.378 -
   2.379 -  \item @{antiquotation_option_def source}~@{text "= bool"} prints the
   2.380 -  original source text of the antiquotation arguments, rather than its
   2.381 -  internal representation.  Note that formal checking of
   2.382 -  @{antiquotation "thm"}, @{antiquotation "term"}, etc. is still
   2.383 -  enabled; use the @{antiquotation "text"} antiquotation for unchecked
   2.384 -  output.
   2.385 -
   2.386 -  Regular @{text "term"} and @{text "typ"} antiquotations with @{text
   2.387 -  "source = false"} involve a full round-trip from the original source
   2.388 -  to an internalized logical entity back to a source form, according
   2.389 -  to the syntax of the current context.  Thus the printed output is
   2.390 -  not under direct control of the author, it may even fluctuate a bit
   2.391 -  as the underlying theory is changed later on.
   2.392 -
   2.393 -  In contrast, @{antiquotation_option source}~@{text "= true"}
   2.394 -  admits direct printing of the given source text, with the desirable
   2.395 -  well-formedness check in the background, but without modification of
   2.396 -  the printed text.
   2.397 -
   2.398 -  \end{description}
   2.399 -
   2.400 -  For boolean flags, ``@{text "name = true"}'' may be abbreviated as
   2.401 -  ``@{text name}''.  All of the above flags are disabled by default,
   2.402 -  unless changed specifically for a logic session in the corresponding
   2.403 -  @{verbatim "ROOT"} file.  *}
   2.404 -
   2.405 -
   2.406 -section {* Markup via command tags \label{sec:tags} *}
   2.407 -
   2.408 -text {* Each Isabelle/Isar command may be decorated by additional
   2.409 -  presentation tags, to indicate some modification in the way it is
   2.410 -  printed in the document.
   2.411 -
   2.412 -  @{rail \<open>
   2.413 -    @{syntax_def tags}: ( tag * )
   2.414 -    ;
   2.415 -    tag: '%' (@{syntax ident} | @{syntax string})
   2.416 -  \<close>}
   2.417 -
   2.418 -  Some tags are pre-declared for certain classes of commands, serving
   2.419 -  as default markup if no tags are given in the text:
   2.420 -
   2.421 -  \medskip
   2.422 -  \begin{tabular}{ll}
   2.423 -    @{text "theory"} & theory begin/end \\
   2.424 -    @{text "proof"} & all proof commands \\
   2.425 -    @{text "ML"} & all commands involving ML code \\
   2.426 -  \end{tabular}
   2.427 -
   2.428 -  \medskip The Isabelle document preparation system
   2.429 -  \cite{isabelle-sys} allows tagged command regions to be presented
   2.430 -  specifically, e.g.\ to fold proof texts, or drop parts of the text
   2.431 -  completely.
   2.432 -
   2.433 -  For example ``@{command "by"}~@{text "%invisible auto"}'' causes
   2.434 -  that piece of proof to be treated as @{text invisible} instead of
   2.435 -  @{text "proof"} (the default), which may be shown or hidden
   2.436 -  depending on the document setup.  In contrast, ``@{command
   2.437 -  "by"}~@{text "%visible auto"}'' forces this text to be shown
   2.438 -  invariably.
   2.439 -
   2.440 -  Explicit tag specifications within a proof apply to all subsequent
   2.441 -  commands of the same level of nesting.  For example, ``@{command
   2.442 -  "proof"}~@{text "%visible \<dots>"}~@{command "qed"}'' forces the whole
   2.443 -  sub-proof to be typeset as @{text visible} (unless some of its parts
   2.444 -  are tagged differently).
   2.445 -
   2.446 -  \medskip Command tags merely produce certain markup environments for
   2.447 -  type-setting.  The meaning of these is determined by {\LaTeX}
   2.448 -  macros, as defined in @{file "~~/lib/texinputs/isabelle.sty"} or
   2.449 -  by the document author.  The Isabelle document preparation tools
   2.450 -  also provide some high-level options to specify the meaning of
   2.451 -  arbitrary tags to ``keep'', ``drop'', or ``fold'' the corresponding
   2.452 -  parts of the text.  Logic sessions may also specify ``document
   2.453 -  versions'', where given tags are interpreted in some particular way.
   2.454 -  Again see \cite{isabelle-sys} for further details.
   2.455 -*}
   2.456 -
   2.457 -
   2.458 -section {* Railroad diagrams *}
   2.459 -
   2.460 -text {*
   2.461 -  \begin{matharray}{rcl}
   2.462 -    @{antiquotation_def "rail"} & : & @{text antiquotation} \\
   2.463 -  \end{matharray}
   2.464 -
   2.465 -  @{rail \<open>
   2.466 -    'rail' (@{syntax string} | @{syntax cartouche})
   2.467 -  \<close>}
   2.468 -
   2.469 -  The @{antiquotation rail} antiquotation allows to include syntax
   2.470 -  diagrams into Isabelle documents.  {\LaTeX} requires the style file
   2.471 -  @{file "~~/lib/texinputs/pdfsetup.sty"}, which can be used via
   2.472 -  @{verbatim "\\usepackage{pdfsetup}"} in @{verbatim "root.tex"}, for
   2.473 -  example.
   2.474 -
   2.475 -  The rail specification language is quoted here as Isabelle @{syntax
   2.476 -  string} or text @{syntax "cartouche"}; it has its own grammar given
   2.477 -  below.
   2.478 -
   2.479 -  \begingroup
   2.480 -  \def\isasymnewline{\isatext{\tt\isacharbackslash<newline>}}
   2.481 -  @{rail \<open>
   2.482 -  rule? + ';'
   2.483 -  ;
   2.484 -  rule: ((identifier | @{syntax antiquotation}) ':')? body
   2.485 -  ;
   2.486 -  body: concatenation + '|'
   2.487 -  ;
   2.488 -  concatenation: ((atom '?'?) +) (('*' | '+') atom?)?
   2.489 -  ;
   2.490 -  atom: '(' body? ')' | identifier |
   2.491 -    '@'? (string | @{syntax antiquotation}) |
   2.492 -    '\<newline>'
   2.493 -  \<close>}
   2.494 -  \endgroup
   2.495 -
   2.496 -  The lexical syntax of @{text "identifier"} coincides with that of
   2.497 -  @{syntax ident} in regular Isabelle syntax, but @{text string} uses
   2.498 -  single quotes instead of double quotes of the standard @{syntax
   2.499 -  string} category.
   2.500 -
   2.501 -  Each @{text rule} defines a formal language (with optional name),
   2.502 -  using a notation that is similar to EBNF or regular expressions with
   2.503 -  recursion.  The meaning and visual appearance of these rail language
   2.504 -  elements is illustrated by the following representative examples.
   2.505 -
   2.506 -  \begin{itemize}
   2.507 -
   2.508 -  \item Empty @{verbatim "()"}
   2.509 -
   2.510 -  @{rail \<open>()\<close>}
   2.511 -
   2.512 -  \item Nonterminal @{verbatim "A"}
   2.513 -
   2.514 -  @{rail \<open>A\<close>}
   2.515 -
   2.516 -  \item Nonterminal via Isabelle antiquotation
   2.517 -  @{verbatim "@{syntax method}"}
   2.518 -
   2.519 -  @{rail \<open>@{syntax method}\<close>}
   2.520 -
   2.521 -  \item Terminal @{verbatim "'xyz'"}
   2.522 -
   2.523 -  @{rail \<open>'xyz'\<close>}
   2.524 -
   2.525 -  \item Terminal in keyword style @{verbatim "@'xyz'"}
   2.526 -
   2.527 -  @{rail \<open>@'xyz'\<close>}
   2.528 -
   2.529 -  \item Terminal via Isabelle antiquotation
   2.530 -  @{verbatim "@@{method rule}"}
   2.531 -
   2.532 -  @{rail \<open>@@{method rule}\<close>}
   2.533 -
   2.534 -  \item Concatenation @{verbatim "A B C"}
   2.535 -
   2.536 -  @{rail \<open>A B C\<close>}
   2.537 -
   2.538 -  \item Newline inside concatenation
   2.539 -  @{verbatim "A B C \<newline> D E F"}
   2.540 -
   2.541 -  @{rail \<open>A B C \<newline> D E F\<close>}
   2.542 -
   2.543 -  \item Variants @{verbatim "A | B | C"}
   2.544 -
   2.545 -  @{rail \<open>A | B | C\<close>}
   2.546 -
   2.547 -  \item Option @{verbatim "A ?"}
   2.548 -
   2.549 -  @{rail \<open>A ?\<close>}
   2.550 -
   2.551 -  \item Repetition @{verbatim "A *"}
   2.552 -
   2.553 -  @{rail \<open>A *\<close>}
   2.554 -
   2.555 -  \item Repetition with separator @{verbatim "A * sep"}
   2.556 -
   2.557 -  @{rail \<open>A * sep\<close>}
   2.558 -
   2.559 -  \item Strict repetition @{verbatim "A +"}
   2.560 -
   2.561 -  @{rail \<open>A +\<close>}
   2.562 -
   2.563 -  \item Strict repetition with separator @{verbatim "A + sep"}
   2.564 -
   2.565 -  @{rail \<open>A + sep\<close>}
   2.566 -
   2.567 -  \end{itemize}
   2.568 -*}
   2.569 -
   2.570 -
   2.571 -section {* Draft presentation *}
   2.572 -
   2.573 -text {*
   2.574 -  \begin{matharray}{rcl}
   2.575 -    @{command_def "display_drafts"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
   2.576 -  \end{matharray}
   2.577 -
   2.578 -  @{rail \<open>
   2.579 -    @@{command display_drafts} (@{syntax name} +)
   2.580 -  \<close>}
   2.581 -
   2.582 -  \begin{description}
   2.583 -
   2.584 -  \item @{command "display_drafts"}~@{text paths} performs simple output of a
   2.585 -  given list of raw source files. Only those symbols that do not require
   2.586 -  additional {\LaTeX} packages are displayed properly, everything else is left
   2.587 -  verbatim.
   2.588 -
   2.589 -  \end{description}
   2.590 -*}
   2.591 -
   2.592 -end
     3.1 --- a/src/Doc/Isar-Ref/First_Order_Logic.thy	Mon Apr 07 16:37:57 2014 +0200
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,520 +0,0 @@
     3.4 -
     3.5 -header {* Example: First-Order Logic *}
     3.6 -
     3.7 -theory %visible First_Order_Logic
     3.8 -imports Base  (* FIXME Pure!? *)
     3.9 -begin
    3.10 -
    3.11 -text {*
    3.12 -  \noindent In order to commence a new object-logic within
    3.13 -  Isabelle/Pure we introduce abstract syntactic categories @{text "i"}
    3.14 -  for individuals and @{text "o"} for object-propositions.  The latter
    3.15 -  is embedded into the language of Pure propositions by means of a
    3.16 -  separate judgment.
    3.17 -*}
    3.18 -
    3.19 -typedecl i
    3.20 -typedecl o
    3.21 -
    3.22 -judgment
    3.23 -  Trueprop :: "o \<Rightarrow> prop"    ("_" 5)
    3.24 -
    3.25 -text {*
    3.26 -  \noindent Note that the object-logic judgement is implicit in the
    3.27 -  syntax: writing @{prop A} produces @{term "Trueprop A"} internally.
    3.28 -  From the Pure perspective this means ``@{prop A} is derivable in the
    3.29 -  object-logic''.
    3.30 -*}
    3.31 -
    3.32 -
    3.33 -subsection {* Equational reasoning \label{sec:framework-ex-equal} *}
    3.34 -
    3.35 -text {*
    3.36 -  Equality is axiomatized as a binary predicate on individuals, with
    3.37 -  reflexivity as introduction, and substitution as elimination
    3.38 -  principle.  Note that the latter is particularly convenient in a
    3.39 -  framework like Isabelle, because syntactic congruences are
    3.40 -  implicitly produced by unification of @{term "B x"} against
    3.41 -  expressions containing occurrences of @{term x}.
    3.42 -*}
    3.43 -
    3.44 -axiomatization
    3.45 -  equal :: "i \<Rightarrow> i \<Rightarrow> o"  (infix "=" 50)
    3.46 -where
    3.47 -  refl [intro]: "x = x" and
    3.48 -  subst [elim]: "x = y \<Longrightarrow> B x \<Longrightarrow> B y"
    3.49 -
    3.50 -text {*
    3.51 -  \noindent Substitution is very powerful, but also hard to control in
    3.52 -  full generality.  We derive some common symmetry~/ transitivity
    3.53 -  schemes of @{term equal} as particular consequences.
    3.54 -*}
    3.55 -
    3.56 -theorem sym [sym]:
    3.57 -  assumes "x = y"
    3.58 -  shows "y = x"
    3.59 -proof -
    3.60 -  have "x = x" ..
    3.61 -  with `x = y` show "y = x" ..
    3.62 -qed
    3.63 -
    3.64 -theorem forw_subst [trans]:
    3.65 -  assumes "y = x" and "B x"
    3.66 -  shows "B y"
    3.67 -proof -
    3.68 -  from `y = x` have "x = y" ..
    3.69 -  from this and `B x` show "B y" ..
    3.70 -qed
    3.71 -
    3.72 -theorem back_subst [trans]:
    3.73 -  assumes "B x" and "x = y"
    3.74 -  shows "B y"
    3.75 -proof -
    3.76 -  from `x = y` and `B x`
    3.77 -  show "B y" ..
    3.78 -qed
    3.79 -
    3.80 -theorem trans [trans]:
    3.81 -  assumes "x = y" and "y = z"
    3.82 -  shows "x = z"
    3.83 -proof -
    3.84 -  from `y = z` and `x = y`
    3.85 -  show "x = z" ..
    3.86 -qed
    3.87 -
    3.88 -
    3.89 -subsection {* Basic group theory *}
    3.90 -
    3.91 -text {*
    3.92 -  As an example for equational reasoning we consider some bits of
    3.93 -  group theory.  The subsequent locale definition postulates group
    3.94 -  operations and axioms; we also derive some consequences of this
    3.95 -  specification.
    3.96 -*}
    3.97 -
    3.98 -locale group =
    3.99 -  fixes prod :: "i \<Rightarrow> i \<Rightarrow> i"  (infix "\<circ>" 70)
   3.100 -    and inv :: "i \<Rightarrow> i"  ("(_\<inverse>)" [1000] 999)
   3.101 -    and unit :: i  ("1")
   3.102 -  assumes assoc: "(x \<circ> y) \<circ> z = x \<circ> (y \<circ> z)"
   3.103 -    and left_unit:  "1 \<circ> x = x"
   3.104 -    and left_inv: "x\<inverse> \<circ> x = 1"
   3.105 -begin
   3.106 -
   3.107 -theorem right_inv: "x \<circ> x\<inverse> = 1"
   3.108 -proof -
   3.109 -  have "x \<circ> x\<inverse> = 1 \<circ> (x \<circ> x\<inverse>)" by (rule left_unit [symmetric])
   3.110 -  also have "\<dots> = (1 \<circ> x) \<circ> x\<inverse>" by (rule assoc [symmetric])
   3.111 -  also have "1 = (x\<inverse>)\<inverse> \<circ> x\<inverse>" by (rule left_inv [symmetric])
   3.112 -  also have "\<dots> \<circ> x = (x\<inverse>)\<inverse> \<circ> (x\<inverse> \<circ> x)" by (rule assoc)
   3.113 -  also have "x\<inverse> \<circ> x = 1" by (rule left_inv)
   3.114 -  also have "((x\<inverse>)\<inverse> \<circ> \<dots>) \<circ> x\<inverse> = (x\<inverse>)\<inverse> \<circ> (1 \<circ> x\<inverse>)" by (rule assoc)
   3.115 -  also have "1 \<circ> x\<inverse> = x\<inverse>" by (rule left_unit)
   3.116 -  also have "(x\<inverse>)\<inverse> \<circ> \<dots> = 1" by (rule left_inv)
   3.117 -  finally show "x \<circ> x\<inverse> = 1" .
   3.118 -qed
   3.119 -
   3.120 -theorem right_unit: "x \<circ> 1 = x"
   3.121 -proof -
   3.122 -  have "1 = x\<inverse> \<circ> x" by (rule left_inv [symmetric])
   3.123 -  also have "x \<circ> \<dots> = (x \<circ> x\<inverse>) \<circ> x" by (rule assoc [symmetric])
   3.124 -  also have "x \<circ> x\<inverse> = 1" by (rule right_inv)
   3.125 -  also have "\<dots> \<circ> x = x" by (rule left_unit)
   3.126 -  finally show "x \<circ> 1 = x" .
   3.127 -qed
   3.128 -
   3.129 -text {*
   3.130 -  \noindent Reasoning from basic axioms is often tedious.  Our proofs
   3.131 -  work by producing various instances of the given rules (potentially
   3.132 -  the symmetric form) using the pattern ``@{command have}~@{text
   3.133 -  eq}~@{command "by"}~@{text "(rule r)"}'' and composing the chain of
   3.134 -  results via @{command also}/@{command finally}.  These steps may
   3.135 -  involve any of the transitivity rules declared in
   3.136 -  \secref{sec:framework-ex-equal}, namely @{thm trans} in combining
   3.137 -  the first two results in @{thm right_inv} and in the final steps of
   3.138 -  both proofs, @{thm forw_subst} in the first combination of @{thm
   3.139 -  right_unit}, and @{thm back_subst} in all other calculational steps.
   3.140 -
   3.141 -  Occasional substitutions in calculations are adequate, but should
   3.142 -  not be over-emphasized.  The other extreme is to compose a chain by
   3.143 -  plain transitivity only, with replacements occurring always in
   3.144 -  topmost position. For example:
   3.145 -*}
   3.146 -
   3.147 -(*<*)
   3.148 -theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
   3.149 -proof -
   3.150 -  assume [symmetric, defn]: "\<And>x y. (x \<equiv> y) \<equiv> Trueprop (x = y)"
   3.151 -(*>*)
   3.152 -  have "x \<circ> 1 = x \<circ> (x\<inverse> \<circ> x)" unfolding left_inv ..
   3.153 -  also have "\<dots> = (x \<circ> x\<inverse>) \<circ> x" unfolding assoc ..
   3.154 -  also have "\<dots> = 1 \<circ> x" unfolding right_inv ..
   3.155 -  also have "\<dots> = x" unfolding left_unit ..
   3.156 -  finally have "x \<circ> 1 = x" .
   3.157 -(*<*)
   3.158 -qed
   3.159 -(*>*)
   3.160 -
   3.161 -text {*
   3.162 -  \noindent Here we have re-used the built-in mechanism for unfolding
   3.163 -  definitions in order to normalize each equational problem.  A more
   3.164 -  realistic object-logic would include proper setup for the Simplifier
   3.165 -  (\secref{sec:simplifier}), the main automated tool for equational
   3.166 -  reasoning in Isabelle.  Then ``@{command unfolding}~@{thm
   3.167 -  left_inv}~@{command ".."}'' would become ``@{command "by"}~@{text
   3.168 -  "(simp only: left_inv)"}'' etc.
   3.169 -*}
   3.170 -
   3.171 -end
   3.172 -
   3.173 -
   3.174 -subsection {* Propositional logic \label{sec:framework-ex-prop} *}
   3.175 -
   3.176 -text {*
   3.177 -  We axiomatize basic connectives of propositional logic: implication,
   3.178 -  disjunction, and conjunction.  The associated rules are modeled
   3.179 -  after Gentzen's system of Natural Deduction \cite{Gentzen:1935}.
   3.180 -*}
   3.181 -
   3.182 -axiomatization
   3.183 -  imp :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<longrightarrow>" 25) where
   3.184 -  impI [intro]: "(A \<Longrightarrow> B) \<Longrightarrow> A \<longrightarrow> B" and
   3.185 -  impD [dest]: "(A \<longrightarrow> B) \<Longrightarrow> A \<Longrightarrow> B"
   3.186 -
   3.187 -axiomatization
   3.188 -  disj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<or>" 30) where
   3.189 -  disjI\<^sub>1 [intro]: "A \<Longrightarrow> A \<or> B" and
   3.190 -  disjI\<^sub>2 [intro]: "B \<Longrightarrow> A \<or> B" and
   3.191 -  disjE [elim]: "A \<or> B \<Longrightarrow> (A \<Longrightarrow> C) \<Longrightarrow> (B \<Longrightarrow> C) \<Longrightarrow> C"
   3.192 -
   3.193 -axiomatization
   3.194 -  conj :: "o \<Rightarrow> o \<Rightarrow> o"  (infixr "\<and>" 35) where
   3.195 -  conjI [intro]: "A \<Longrightarrow> B \<Longrightarrow> A \<and> B" and
   3.196 -  conjD\<^sub>1: "A \<and> B \<Longrightarrow> A" and
   3.197 -  conjD\<^sub>2: "A \<and> B \<Longrightarrow> B"
   3.198 -
   3.199 -text {*
   3.200 -  \noindent The conjunctive destructions have the disadvantage that
   3.201 -  decomposing @{prop "A \<and> B"} involves an immediate decision which
   3.202 -  component should be projected.  The more convenient simultaneous
   3.203 -  elimination @{prop "A \<and> B \<Longrightarrow> (A \<Longrightarrow> B \<Longrightarrow> C) \<Longrightarrow> C"} can be derived as
   3.204 -  follows:
   3.205 -*}
   3.206 -
   3.207 -theorem conjE [elim]:
   3.208 -  assumes "A \<and> B"
   3.209 -  obtains A and B
   3.210 -proof
   3.211 -  from `A \<and> B` show A by (rule conjD\<^sub>1)
   3.212 -  from `A \<and> B` show B by (rule conjD\<^sub>2)
   3.213 -qed
   3.214 -
   3.215 -text {*
   3.216 -  \noindent Here is an example of swapping conjuncts with a single
   3.217 -  intermediate elimination step:
   3.218 -*}
   3.219 -
   3.220 -(*<*)
   3.221 -lemma "\<And>A. PROP A \<Longrightarrow> PROP A"
   3.222 -proof -
   3.223 -(*>*)
   3.224 -  assume "A \<and> B"
   3.225 -  then obtain B and A ..
   3.226 -  then have "B \<and> A" ..
   3.227 -(*<*)
   3.228 -qed
   3.229 -(*>*)
   3.230 -
   3.231 -text {*
   3.232 -  \noindent Note that the analogous elimination rule for disjunction
   3.233 -  ``@{text "\<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"}'' coincides with
   3.234 -  the original axiomatization of @{thm disjE}.
   3.235 -
   3.236 -  \medskip We continue propositional logic by introducing absurdity
   3.237 -  with its characteristic elimination.  Plain truth may then be
   3.238 -  defined as a proposition that is trivially true.
   3.239 -*}
   3.240 -
   3.241 -axiomatization
   3.242 -  false :: o  ("\<bottom>") where
   3.243 -  falseE [elim]: "\<bottom> \<Longrightarrow> A"
   3.244 -
   3.245 -definition
   3.246 -  true :: o  ("\<top>") where
   3.247 -  "\<top> \<equiv> \<bottom> \<longrightarrow> \<bottom>"
   3.248 -
   3.249 -theorem trueI [intro]: \<top>
   3.250 -  unfolding true_def ..
   3.251 -
   3.252 -text {*
   3.253 -  \medskip\noindent Now negation represents an implication towards
   3.254 -  absurdity:
   3.255 -*}
   3.256 -
   3.257 -definition
   3.258 -  not :: "o \<Rightarrow> o"  ("\<not> _" [40] 40) where
   3.259 -  "\<not> A \<equiv> A \<longrightarrow> \<bottom>"
   3.260 -
   3.261 -theorem notI [intro]:
   3.262 -  assumes "A \<Longrightarrow> \<bottom>"
   3.263 -  shows "\<not> A"
   3.264 -unfolding not_def
   3.265 -proof
   3.266 -  assume A
   3.267 -  then show \<bottom> by (rule `A \<Longrightarrow> \<bottom>`)
   3.268 -qed
   3.269 -
   3.270 -theorem notE [elim]:
   3.271 -  assumes "\<not> A" and A
   3.272 -  shows B
   3.273 -proof -
   3.274 -  from `\<not> A` have "A \<longrightarrow> \<bottom>" unfolding not_def .
   3.275 -  from `A \<longrightarrow> \<bottom>` and `A` have \<bottom> ..
   3.276 -  then show B ..
   3.277 -qed
   3.278 -
   3.279 -
   3.280 -subsection {* Classical logic *}
   3.281 -
   3.282 -text {*
   3.283 -  Subsequently we state the principle of classical contradiction as a
   3.284 -  local assumption.  Thus we refrain from forcing the object-logic
   3.285 -  into the classical perspective.  Within that context, we may derive
   3.286 -  well-known consequences of the classical principle.
   3.287 -*}
   3.288 -
   3.289 -locale classical =
   3.290 -  assumes classical: "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"
   3.291 -begin
   3.292 -
   3.293 -theorem double_negation:
   3.294 -  assumes "\<not> \<not> C"
   3.295 -  shows C
   3.296 -proof (rule classical)
   3.297 -  assume "\<not> C"
   3.298 -  with `\<not> \<not> C` show C ..
   3.299 -qed
   3.300 -
   3.301 -theorem tertium_non_datur: "C \<or> \<not> C"
   3.302 -proof (rule double_negation)
   3.303 -  show "\<not> \<not> (C \<or> \<not> C)"
   3.304 -  proof
   3.305 -    assume "\<not> (C \<or> \<not> C)"
   3.306 -    have "\<not> C"
   3.307 -    proof
   3.308 -      assume C then have "C \<or> \<not> C" ..
   3.309 -      with `\<not> (C \<or> \<not> C)` show \<bottom> ..
   3.310 -    qed
   3.311 -    then have "C \<or> \<not> C" ..
   3.312 -    with `\<not> (C \<or> \<not> C)` show \<bottom> ..
   3.313 -  qed
   3.314 -qed
   3.315 -
   3.316 -text {*
   3.317 -  \noindent These examples illustrate both classical reasoning and
   3.318 -  non-trivial propositional proofs in general.  All three rules
   3.319 -  characterize classical logic independently, but the original rule is
   3.320 -  already the most convenient to use, because it leaves the conclusion
   3.321 -  unchanged.  Note that @{prop "(\<not> C \<Longrightarrow> C) \<Longrightarrow> C"} fits again into our
   3.322 -  format for eliminations, despite the additional twist that the
   3.323 -  context refers to the main conclusion.  So we may write @{thm
   3.324 -  classical} as the Isar statement ``@{text "\<OBTAINS> \<not> thesis"}''.
   3.325 -  This also explains nicely how classical reasoning really works:
   3.326 -  whatever the main @{text thesis} might be, we may always assume its
   3.327 -  negation!
   3.328 -*}
   3.329 -
   3.330 -end
   3.331 -
   3.332 -
   3.333 -subsection {* Quantifiers \label{sec:framework-ex-quant} *}
   3.334 -
   3.335 -text {*
   3.336 -  Representing quantifiers is easy, thanks to the higher-order nature
   3.337 -  of the underlying framework.  According to the well-known technique
   3.338 -  introduced by Church \cite{church40}, quantifiers are operators on
   3.339 -  predicates, which are syntactically represented as @{text "\<lambda>"}-terms
   3.340 -  of type @{typ "i \<Rightarrow> o"}.  Binder notation turns @{text "All (\<lambda>x. B
   3.341 -  x)"} into @{text "\<forall>x. B x"} etc.
   3.342 -*}
   3.343 -
   3.344 -axiomatization
   3.345 -  All :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<forall>" 10) where
   3.346 -  allI [intro]: "(\<And>x. B x) \<Longrightarrow> \<forall>x. B x" and
   3.347 -  allD [dest]: "(\<forall>x. B x) \<Longrightarrow> B a"
   3.348 -
   3.349 -axiomatization
   3.350 -  Ex :: "(i \<Rightarrow> o) \<Rightarrow> o"  (binder "\<exists>" 10) where
   3.351 -  exI [intro]: "B a \<Longrightarrow> (\<exists>x. B x)" and
   3.352 -  exE [elim]: "(\<exists>x. B x) \<Longrightarrow> (\<And>x. B x \<Longrightarrow> C) \<Longrightarrow> C"
   3.353 -
   3.354 -text {*
   3.355 -  \noindent The statement of @{thm exE} corresponds to ``@{text
   3.356 -  "\<ASSUMES> \<exists>x. B x \<OBTAINS> x \<WHERE> B x"}'' in Isar.  In the
   3.357 -  subsequent example we illustrate quantifier reasoning involving all
   3.358 -  four rules:
   3.359 -*}
   3.360 -
   3.361 -theorem
   3.362 -  assumes "\<exists>x. \<forall>y. R x y"
   3.363 -  shows "\<forall>y. \<exists>x. R x y"
   3.364 -proof    -- {* @{text "\<forall>"} introduction *}
   3.365 -  obtain x where "\<forall>y. R x y" using `\<exists>x. \<forall>y. R x y` ..    -- {* @{text "\<exists>"} elimination *}
   3.366 -  fix y have "R x y" using `\<forall>y. R x y` ..    -- {* @{text "\<forall>"} destruction *}
   3.367 -  then show "\<exists>x. R x y" ..    -- {* @{text "\<exists>"} introduction *}
   3.368 -qed
   3.369 -
   3.370 -
   3.371 -subsection {* Canonical reasoning patterns *}
   3.372 -
   3.373 -text {*
   3.374 -  The main rules of first-order predicate logic from
   3.375 -  \secref{sec:framework-ex-prop} and \secref{sec:framework-ex-quant}
   3.376 -  can now be summarized as follows, using the native Isar statement
   3.377 -  format of \secref{sec:framework-stmt}.
   3.378 -
   3.379 -  \medskip
   3.380 -  \begin{tabular}{l}
   3.381 -  @{text "impI: \<ASSUMES> A \<Longrightarrow> B \<SHOWS> A \<longrightarrow> B"} \\
   3.382 -  @{text "impD: \<ASSUMES> A \<longrightarrow> B \<AND> A \<SHOWS> B"} \\[1ex]
   3.383 -
   3.384 -  @{text "disjI\<^sub>1: \<ASSUMES> A \<SHOWS> A \<or> B"} \\
   3.385 -  @{text "disjI\<^sub>2: \<ASSUMES> B \<SHOWS> A \<or> B"} \\
   3.386 -  @{text "disjE: \<ASSUMES> A \<or> B \<OBTAINS> A \<BBAR> B"} \\[1ex]
   3.387 -
   3.388 -  @{text "conjI: \<ASSUMES> A \<AND> B \<SHOWS> A \<and> B"} \\
   3.389 -  @{text "conjE: \<ASSUMES> A \<and> B \<OBTAINS> A \<AND> B"} \\[1ex]
   3.390 -
   3.391 -  @{text "falseE: \<ASSUMES> \<bottom> \<SHOWS> A"} \\
   3.392 -  @{text "trueI: \<SHOWS> \<top>"} \\[1ex]
   3.393 -
   3.394 -  @{text "notI: \<ASSUMES> A \<Longrightarrow> \<bottom> \<SHOWS> \<not> A"} \\
   3.395 -  @{text "notE: \<ASSUMES> \<not> A \<AND> A \<SHOWS> B"} \\[1ex]
   3.396 -
   3.397 -  @{text "allI: \<ASSUMES> \<And>x. B x \<SHOWS> \<forall>x. B x"} \\
   3.398 -  @{text "allE: \<ASSUMES> \<forall>x. B x \<SHOWS> B a"} \\[1ex]
   3.399 -
   3.400 -  @{text "exI: \<ASSUMES> B a \<SHOWS> \<exists>x. B x"} \\
   3.401 -  @{text "exE: \<ASSUMES> \<exists>x. B x \<OBTAINS> a \<WHERE> B a"}
   3.402 -  \end{tabular}
   3.403 -  \medskip
   3.404 -
   3.405 -  \noindent This essentially provides a declarative reading of Pure
   3.406 -  rules as Isar reasoning patterns: the rule statements tells how a
   3.407 -  canonical proof outline shall look like.  Since the above rules have
   3.408 -  already been declared as @{attribute (Pure) intro}, @{attribute
   3.409 -  (Pure) elim}, @{attribute (Pure) dest} --- each according to its
   3.410 -  particular shape --- we can immediately write Isar proof texts as
   3.411 -  follows:
   3.412 -*}
   3.413 -
   3.414 -(*<*)
   3.415 -theorem "\<And>A. PROP A \<Longrightarrow> PROP A"
   3.416 -proof -
   3.417 -(*>*)
   3.418 -
   3.419 -  txt_raw {*\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.420 -
   3.421 -  have "A \<longrightarrow> B"
   3.422 -  proof
   3.423 -    assume A
   3.424 -    show B sorry %noproof
   3.425 -  qed
   3.426 -
   3.427 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.428 -
   3.429 -  have "A \<longrightarrow> B" and A sorry %noproof
   3.430 -  then have B ..
   3.431 -
   3.432 -  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.433 -
   3.434 -  have A sorry %noproof
   3.435 -  then have "A \<or> B" ..
   3.436 -
   3.437 -  have B sorry %noproof
   3.438 -  then have "A \<or> B" ..
   3.439 -
   3.440 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.441 -
   3.442 -  have "A \<or> B" sorry %noproof
   3.443 -  then have C
   3.444 -  proof
   3.445 -    assume A
   3.446 -    then show C sorry %noproof
   3.447 -  next
   3.448 -    assume B
   3.449 -    then show C sorry %noproof
   3.450 -  qed
   3.451 -
   3.452 -  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.453 -
   3.454 -  have A and B sorry %noproof
   3.455 -  then have "A \<and> B" ..
   3.456 -
   3.457 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.458 -
   3.459 -  have "A \<and> B" sorry %noproof
   3.460 -  then obtain A and B ..
   3.461 -
   3.462 -  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.463 -
   3.464 -  have "\<bottom>" sorry %noproof
   3.465 -  then have A ..
   3.466 -
   3.467 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.468 -
   3.469 -  have "\<top>" ..
   3.470 -
   3.471 -  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.472 -
   3.473 -  have "\<not> A"
   3.474 -  proof
   3.475 -    assume A
   3.476 -    then show "\<bottom>" sorry %noproof
   3.477 -  qed
   3.478 -
   3.479 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.480 -
   3.481 -  have "\<not> A" and A sorry %noproof
   3.482 -  then have B ..
   3.483 -
   3.484 -  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.485 -
   3.486 -  have "\<forall>x. B x"
   3.487 -  proof
   3.488 -    fix x
   3.489 -    show "B x" sorry %noproof
   3.490 -  qed
   3.491 -
   3.492 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.493 -
   3.494 -  have "\<forall>x. B x" sorry %noproof
   3.495 -  then have "B a" ..
   3.496 -
   3.497 -  txt_raw {*\end{minipage}\\[3ex]\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.498 -
   3.499 -  have "\<exists>x. B x"
   3.500 -  proof
   3.501 -    show "B a" sorry %noproof
   3.502 -  qed
   3.503 -
   3.504 -  txt_raw {*\end{minipage}\qquad\begin{minipage}[t]{0.4\textwidth}*}(*<*)next(*>*)
   3.505 -
   3.506 -  have "\<exists>x. B x" sorry %noproof
   3.507 -  then obtain a where "B a" ..
   3.508 -
   3.509 -  txt_raw {*\end{minipage}*}
   3.510 -
   3.511 -(*<*)
   3.512 -qed
   3.513 -(*>*)
   3.514 -
   3.515 -text {*
   3.516 -  \bigskip\noindent Of course, these proofs are merely examples.  As
   3.517 -  sketched in \secref{sec:framework-subproof}, there is a fair amount
   3.518 -  of flexibility in expressing Pure deductions in Isar.  Here the user
   3.519 -  is asked to express himself adequately, aiming at proof texts of
   3.520 -  literary quality.
   3.521 -*}
   3.522 -
   3.523 -end %visible
     4.1 --- a/src/Doc/Isar-Ref/Framework.thy	Mon Apr 07 16:37:57 2014 +0200
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,1016 +0,0 @@
     4.4 -theory Framework
     4.5 -imports Base Main
     4.6 -begin
     4.7 -
     4.8 -chapter {* The Isabelle/Isar Framework \label{ch:isar-framework} *}
     4.9 -
    4.10 -text {*
    4.11 -  Isabelle/Isar
    4.12 -  \cite{Wenzel:1999:TPHOL,Wenzel-PhD,Nipkow-TYPES02,Wenzel-Paulson:2006,Wenzel:2006:Festschrift}
    4.13 -  is intended as a generic framework for developing formal
    4.14 -  mathematical documents with full proof checking.  Definitions and
    4.15 -  proofs are organized as theories.  An assembly of theory sources may
    4.16 -  be presented as a printed document; see also
    4.17 -  \chref{ch:document-prep}.
    4.18 -
    4.19 -  The main objective of Isar is the design of a human-readable
    4.20 -  structured proof language, which is called the ``primary proof
    4.21 -  format'' in Isar terminology.  Such a primary proof language is
    4.22 -  somewhere in the middle between the extremes of primitive proof
    4.23 -  objects and actual natural language.  In this respect, Isar is a bit
    4.24 -  more formalistic than Mizar
    4.25 -  \cite{Trybulec:1993:MizarFeatures,Rudnicki:1992:MizarOverview,Wiedijk:1999:Mizar},
    4.26 -  using logical symbols for certain reasoning schemes where Mizar
    4.27 -  would prefer English words; see \cite{Wenzel-Wiedijk:2002} for
    4.28 -  further comparisons of these systems.
    4.29 -
    4.30 -  So Isar challenges the traditional way of recording informal proofs
    4.31 -  in mathematical prose, as well as the common tendency to see fully
    4.32 -  formal proofs directly as objects of some logical calculus (e.g.\
    4.33 -  @{text "\<lambda>"}-terms in a version of type theory).  In fact, Isar is
    4.34 -  better understood as an interpreter of a simple block-structured
    4.35 -  language for describing the data flow of local facts and goals,
    4.36 -  interspersed with occasional invocations of proof methods.
    4.37 -  Everything is reduced to logical inferences internally, but these
    4.38 -  steps are somewhat marginal compared to the overall bookkeeping of
    4.39 -  the interpretation process.  Thanks to careful design of the syntax
    4.40 -  and semantics of Isar language elements, a formal record of Isar
    4.41 -  instructions may later appear as an intelligible text to the
    4.42 -  attentive reader.
    4.43 -
    4.44 -  The Isar proof language has emerged from careful analysis of some
    4.45 -  inherent virtues of the existing logical framework of Isabelle/Pure
    4.46 -  \cite{paulson-found,paulson700}, notably composition of higher-order
    4.47 -  natural deduction rules, which is a generalization of Gentzen's
    4.48 -  original calculus \cite{Gentzen:1935}.  The approach of generic
    4.49 -  inference systems in Pure is continued by Isar towards actual proof
    4.50 -  texts.
    4.51 -
    4.52 -  Concrete applications require another intermediate layer: an
    4.53 -  object-logic.  Isabelle/HOL \cite{isa-tutorial} (simply-typed
    4.54 -  set-theory) is being used most of the time; Isabelle/ZF
    4.55 -  \cite{isabelle-ZF} is less extensively developed, although it would
    4.56 -  probably fit better for classical mathematics.
    4.57 -
    4.58 -  \medskip In order to illustrate natural deduction in Isar, we shall
    4.59 -  refer to the background theory and library of Isabelle/HOL.  This
    4.60 -  includes common notions of predicate logic, naive set-theory etc.\
    4.61 -  using fairly standard mathematical notation.  From the perspective
    4.62 -  of generic natural deduction there is nothing special about the
    4.63 -  logical connectives of HOL (@{text "\<and>"}, @{text "\<or>"}, @{text "\<forall>"},
    4.64 -  @{text "\<exists>"}, etc.), only the resulting reasoning principles are
    4.65 -  relevant to the user.  There are similar rules available for
    4.66 -  set-theory operators (@{text "\<inter>"}, @{text "\<union>"}, @{text "\<Inter>"}, @{text
    4.67 -  "\<Union>"}, etc.), or any other theory developed in the library (lattice
    4.68 -  theory, topology etc.).
    4.69 -
    4.70 -  Subsequently we briefly review fragments of Isar proof texts
    4.71 -  corresponding directly to such general deduction schemes.  The
    4.72 -  examples shall refer to set-theory, to minimize the danger of
    4.73 -  understanding connectives of predicate logic as something special.
    4.74 -
    4.75 -  \medskip The following deduction performs @{text "\<inter>"}-introduction,
    4.76 -  working forwards from assumptions towards the conclusion.  We give
    4.77 -  both the Isar text, and depict the primitive rule involved, as
    4.78 -  determined by unification of the problem against rules that are
    4.79 -  declared in the library context.
    4.80 -*}
    4.81 -
    4.82 -text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
    4.83 -
    4.84 -(*<*)
    4.85 -notepad
    4.86 -begin
    4.87 -(*>*)
    4.88 -    assume "x \<in> A" and "x \<in> B"
    4.89 -    then have "x \<in> A \<inter> B" ..
    4.90 -(*<*)
    4.91 -end
    4.92 -(*>*)
    4.93 -
    4.94 -text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
    4.95 -
    4.96 -text {*
    4.97 -  \infer{@{prop "x \<in> A \<inter> B"}}{@{prop "x \<in> A"} & @{prop "x \<in> B"}}
    4.98 -*}
    4.99 -
   4.100 -text_raw {*\end{minipage}*}
   4.101 -
   4.102 -text {*
   4.103 -  \medskip\noindent Note that @{command assume} augments the proof
   4.104 -  context, @{command then} indicates that the current fact shall be
   4.105 -  used in the next step, and @{command have} states an intermediate
   4.106 -  goal.  The two dots ``@{command ".."}'' refer to a complete proof of
   4.107 -  this claim, using the indicated facts and a canonical rule from the
   4.108 -  context.  We could have been more explicit here by spelling out the
   4.109 -  final proof step via the @{command "by"} command:
   4.110 -*}
   4.111 -
   4.112 -(*<*)
   4.113 -notepad
   4.114 -begin
   4.115 -(*>*)
   4.116 -    assume "x \<in> A" and "x \<in> B"
   4.117 -    then have "x \<in> A \<inter> B" by (rule IntI)
   4.118 -(*<*)
   4.119 -end
   4.120 -(*>*)
   4.121 -
   4.122 -text {*
   4.123 -  \noindent The format of the @{text "\<inter>"}-introduction rule represents
   4.124 -  the most basic inference, which proceeds from given premises to a
   4.125 -  conclusion, without any nested proof context involved.
   4.126 -
   4.127 -  The next example performs backwards introduction on @{term "\<Inter>\<A>"},
   4.128 -  the intersection of all sets within a given set.  This requires a
   4.129 -  nested proof of set membership within a local context, where @{term
   4.130 -  A} is an arbitrary-but-fixed member of the collection:
   4.131 -*}
   4.132 -
   4.133 -text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
   4.134 -
   4.135 -(*<*)
   4.136 -notepad
   4.137 -begin
   4.138 -(*>*)
   4.139 -    have "x \<in> \<Inter>\<A>"
   4.140 -    proof
   4.141 -      fix A
   4.142 -      assume "A \<in> \<A>"
   4.143 -      show "x \<in> A" sorry %noproof
   4.144 -    qed
   4.145 -(*<*)
   4.146 -end
   4.147 -(*>*)
   4.148 -
   4.149 -text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
   4.150 -
   4.151 -text {*
   4.152 -  \infer{@{prop "x \<in> \<Inter>\<A>"}}{\infer*{@{prop "x \<in> A"}}{@{text "[A][A \<in> \<A>]"}}}
   4.153 -*}
   4.154 -
   4.155 -text_raw {*\end{minipage}*}
   4.156 -
   4.157 -text {*
   4.158 -  \medskip\noindent This Isar reasoning pattern again refers to the
   4.159 -  primitive rule depicted above.  The system determines it in the
   4.160 -  ``@{command proof}'' step, which could have been spelt out more
   4.161 -  explicitly as ``@{command proof}~@{text "(rule InterI)"}''.  Note
   4.162 -  that the rule involves both a local parameter @{term "A"} and an
   4.163 -  assumption @{prop "A \<in> \<A>"} in the nested reasoning.  This kind of
   4.164 -  compound rule typically demands a genuine sub-proof in Isar, working
   4.165 -  backwards rather than forwards as seen before.  In the proof body we
   4.166 -  encounter the @{command fix}-@{command assume}-@{command show}
   4.167 -  outline of nested sub-proofs that is typical for Isar.  The final
   4.168 -  @{command show} is like @{command have} followed by an additional
   4.169 -  refinement of the enclosing claim, using the rule derived from the
   4.170 -  proof body.
   4.171 -
   4.172 -  \medskip The next example involves @{term "\<Union>\<A>"}, which can be
   4.173 -  characterized as the set of all @{term "x"} such that @{prop "\<exists>A. x
   4.174 -  \<in> A \<and> A \<in> \<A>"}.  The elimination rule for @{prop "x \<in> \<Union>\<A>"} does
   4.175 -  not mention @{text "\<exists>"} and @{text "\<and>"} at all, but admits to obtain
   4.176 -  directly a local @{term "A"} such that @{prop "x \<in> A"} and @{prop "A
   4.177 -  \<in> \<A>"} hold.  This corresponds to the following Isar proof and
   4.178 -  inference rule, respectively:
   4.179 -*}
   4.180 -
   4.181 -text_raw {*\medskip\begin{minipage}{0.6\textwidth}*}
   4.182 -
   4.183 -(*<*)
   4.184 -notepad
   4.185 -begin
   4.186 -(*>*)
   4.187 -    assume "x \<in> \<Union>\<A>"
   4.188 -    then have C
   4.189 -    proof
   4.190 -      fix A
   4.191 -      assume "x \<in> A" and "A \<in> \<A>"
   4.192 -      show C sorry %noproof
   4.193 -    qed
   4.194 -(*<*)
   4.195 -end
   4.196 -(*>*)
   4.197 -
   4.198 -text_raw {*\end{minipage}\begin{minipage}{0.4\textwidth}*}
   4.199 -
   4.200 -text {*
   4.201 -  \infer{@{prop "C"}}{@{prop "x \<in> \<Union>\<A>"} & \infer*{@{prop "C"}~}{@{text "[A][x \<in> A, A \<in> \<A>]"}}}
   4.202 -*}
   4.203 -
   4.204 -text_raw {*\end{minipage}*}
   4.205 -
   4.206 -text {*
   4.207 -  \medskip\noindent Although the Isar proof follows the natural
   4.208 -  deduction rule closely, the text reads not as natural as
   4.209 -  anticipated.  There is a double occurrence of an arbitrary
   4.210 -  conclusion @{prop "C"}, which represents the final result, but is
   4.211 -  irrelevant for now.  This issue arises for any elimination rule
   4.212 -  involving local parameters.  Isar provides the derived language
   4.213 -  element @{command obtain}, which is able to perform the same
   4.214 -  elimination proof more conveniently:
   4.215 -*}
   4.216 -
   4.217 -(*<*)
   4.218 -notepad
   4.219 -begin
   4.220 -(*>*)
   4.221 -    assume "x \<in> \<Union>\<A>"
   4.222 -    then obtain A where "x \<in> A" and "A \<in> \<A>" ..
   4.223 -(*<*)
   4.224 -end
   4.225 -(*>*)
   4.226 -
   4.227 -text {*
   4.228 -  \noindent Here we avoid to mention the final conclusion @{prop "C"}
   4.229 -  and return to plain forward reasoning.  The rule involved in the
   4.230 -  ``@{command ".."}'' proof is the same as before.
   4.231 -*}
   4.232 -
   4.233 -
   4.234 -section {* The Pure framework \label{sec:framework-pure} *}
   4.235 -
   4.236 -text {*
   4.237 -  The Pure logic \cite{paulson-found,paulson700} is an intuitionistic
   4.238 -  fragment of higher-order logic \cite{church40}.  In type-theoretic
   4.239 -  parlance, there are three levels of @{text "\<lambda>"}-calculus with
   4.240 -  corresponding arrows @{text "\<Rightarrow>"}/@{text "\<And>"}/@{text "\<Longrightarrow>"}:
   4.241 -
   4.242 -  \medskip
   4.243 -  \begin{tabular}{ll}
   4.244 -  @{text "\<alpha> \<Rightarrow> \<beta>"} & syntactic function space (terms depending on terms) \\
   4.245 -  @{text "\<And>x. B(x)"} & universal quantification (proofs depending on terms) \\
   4.246 -  @{text "A \<Longrightarrow> B"} & implication (proofs depending on proofs) \\
   4.247 -  \end{tabular}
   4.248 -  \medskip
   4.249 -
   4.250 -  \noindent Here only the types of syntactic terms, and the
   4.251 -  propositions of proof terms have been shown.  The @{text
   4.252 -  "\<lambda>"}-structure of proofs can be recorded as an optional feature of
   4.253 -  the Pure inference kernel \cite{Berghofer-Nipkow:2000:TPHOL}, but
   4.254 -  the formal system can never depend on them due to \emph{proof
   4.255 -  irrelevance}.
   4.256 -
   4.257 -  On top of this most primitive layer of proofs, Pure implements a
   4.258 -  generic calculus for nested natural deduction rules, similar to
   4.259 -  \cite{Schroeder-Heister:1984}.  Here object-logic inferences are
   4.260 -  internalized as formulae over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
   4.261 -  Combining such rule statements may involve higher-order unification
   4.262 -  \cite{paulson-natural}.
   4.263 -*}
   4.264 -
   4.265 -
   4.266 -subsection {* Primitive inferences *}
   4.267 -
   4.268 -text {*
   4.269 -  Term syntax provides explicit notation for abstraction @{text "\<lambda>x ::
   4.270 -  \<alpha>. b(x)"} and application @{text "b a"}, while types are usually
   4.271 -  implicit thanks to type-inference; terms of type @{text "prop"} are
   4.272 -  called propositions.  Logical statements are composed via @{text "\<And>x
   4.273 -  :: \<alpha>. B(x)"} and @{text "A \<Longrightarrow> B"}.  Primitive reasoning operates on
   4.274 -  judgments of the form @{text "\<Gamma> \<turnstile> \<phi>"}, with standard introduction
   4.275 -  and elimination rules for @{text "\<And>"} and @{text "\<Longrightarrow>"} that refer to
   4.276 -  fixed parameters @{text "x\<^sub>1, \<dots>, x\<^sub>m"} and hypotheses
   4.277 -  @{text "A\<^sub>1, \<dots>, A\<^sub>n"} from the context @{text "\<Gamma>"};
   4.278 -  the corresponding proof terms are left implicit.  The subsequent
   4.279 -  inference rules define @{text "\<Gamma> \<turnstile> \<phi>"} inductively, relative to a
   4.280 -  collection of axioms:
   4.281 -
   4.282 -  \[
   4.283 -  \infer{@{text "\<turnstile> A"}}{(@{text "A"} \text{~axiom})}
   4.284 -  \qquad
   4.285 -  \infer{@{text "A \<turnstile> A"}}{}
   4.286 -  \]
   4.287 -
   4.288 -  \[
   4.289 -  \infer{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}{@{text "\<Gamma> \<turnstile> B(x)"} & @{text "x \<notin> \<Gamma>"}}
   4.290 -  \qquad
   4.291 -  \infer{@{text "\<Gamma> \<turnstile> B(a)"}}{@{text "\<Gamma> \<turnstile> \<And>x. B(x)"}}
   4.292 -  \]
   4.293 -
   4.294 -  \[
   4.295 -  \infer{@{text "\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<Gamma> \<turnstile> B"}}
   4.296 -  \qquad
   4.297 -  \infer{@{text "\<Gamma>\<^sub>1 \<union> \<Gamma>\<^sub>2 \<turnstile> B"}}{@{text "\<Gamma>\<^sub>1 \<turnstile> A \<Longrightarrow> B"} & @{text "\<Gamma>\<^sub>2 \<turnstile> A"}}
   4.298 -  \]
   4.299 -
   4.300 -  Furthermore, Pure provides a built-in equality @{text "\<equiv> :: \<alpha> \<Rightarrow> \<alpha> \<Rightarrow>
   4.301 -  prop"} with axioms for reflexivity, substitution, extensionality,
   4.302 -  and @{text "\<alpha>\<beta>\<eta>"}-conversion on @{text "\<lambda>"}-terms.
   4.303 -
   4.304 -  \medskip An object-logic introduces another layer on top of Pure,
   4.305 -  e.g.\ with types @{text "i"} for individuals and @{text "o"} for
   4.306 -  propositions, term constants @{text "Trueprop :: o \<Rightarrow> prop"} as
   4.307 -  (implicit) derivability judgment and connectives like @{text "\<and> :: o
   4.308 -  \<Rightarrow> o \<Rightarrow> o"} or @{text "\<forall> :: (i \<Rightarrow> o) \<Rightarrow> o"}, and axioms for object-level
   4.309 -  rules such as @{text "conjI: A \<Longrightarrow> B \<Longrightarrow> A \<and> B"} or @{text "allI: (\<And>x. B
   4.310 -  x) \<Longrightarrow> \<forall>x. B x"}.  Derived object rules are represented as theorems of
   4.311 -  Pure.  After the initial object-logic setup, further axiomatizations
   4.312 -  are usually avoided; plain definitions and derived principles are
   4.313 -  used exclusively.
   4.314 -*}
   4.315 -
   4.316 -
   4.317 -subsection {* Reasoning with rules \label{sec:framework-resolution} *}
   4.318 -
   4.319 -text {*
   4.320 -  Primitive inferences mostly serve foundational purposes.  The main
   4.321 -  reasoning mechanisms of Pure operate on nested natural deduction
   4.322 -  rules expressed as formulae, using @{text "\<And>"} to bind local
   4.323 -  parameters and @{text "\<Longrightarrow>"} to express entailment.  Multiple
   4.324 -  parameters and premises are represented by repeating these
   4.325 -  connectives in a right-associative manner.
   4.326 -
   4.327 -  Since @{text "\<And>"} and @{text "\<Longrightarrow>"} commute thanks to the theorem
   4.328 -  @{prop "(A \<Longrightarrow> (\<And>x. B x)) \<equiv> (\<And>x. A \<Longrightarrow> B x)"}, we may assume w.l.o.g.\
   4.329 -  that rule statements always observe the normal form where
   4.330 -  quantifiers are pulled in front of implications at each level of
   4.331 -  nesting.  This means that any Pure proposition may be presented as a
   4.332 -  \emph{Hereditary Harrop Formula} \cite{Miller:1991} which is of the
   4.333 -  form @{text "\<And>x\<^sub>1 \<dots> x\<^sub>m. H\<^sub>1 \<Longrightarrow> \<dots> H\<^sub>n \<Longrightarrow>
   4.334 -  A"} for @{text "m, n \<ge> 0"}, and @{text "A"} atomic, and @{text
   4.335 -  "H\<^sub>1, \<dots>, H\<^sub>n"} being recursively of the same format.
   4.336 -  Following the convention that outermost quantifiers are implicit,
   4.337 -  Horn clauses @{text "A\<^sub>1 \<Longrightarrow> \<dots> A\<^sub>n \<Longrightarrow> A"} are a special
   4.338 -  case of this.
   4.339 -
   4.340 -  For example, @{text "\<inter>"}-introduction rule encountered before is
   4.341 -  represented as a Pure theorem as follows:
   4.342 -  \[
   4.343 -  @{text "IntI:"}~@{prop "x \<in> A \<Longrightarrow> x \<in> B \<Longrightarrow> x \<in> A \<inter> B"}
   4.344 -  \]
   4.345 -
   4.346 -  \noindent This is a plain Horn clause, since no further nesting on
   4.347 -  the left is involved.  The general @{text "\<Inter>"}-introduction
   4.348 -  corresponds to a Hereditary Harrop Formula with one additional level
   4.349 -  of nesting:
   4.350 -  \[
   4.351 -  @{text "InterI:"}~@{prop "(\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A) \<Longrightarrow> x \<in> \<Inter>\<A>"}
   4.352 -  \]
   4.353 -
   4.354 -  \medskip Goals are also represented as rules: @{text "A\<^sub>1 \<Longrightarrow>
   4.355 -  \<dots> A\<^sub>n \<Longrightarrow> C"} states that the sub-goals @{text "A\<^sub>1, \<dots>,
   4.356 -  A\<^sub>n"} entail the result @{text "C"}; for @{text "n = 0"} the
   4.357 -  goal is finished.  To allow @{text "C"} being a rule statement
   4.358 -  itself, we introduce the protective marker @{text "# :: prop \<Rightarrow>
   4.359 -  prop"}, which is defined as identity and hidden from the user.  We
   4.360 -  initialize and finish goal states as follows:
   4.361 -
   4.362 -  \[
   4.363 -  \begin{array}{c@ {\qquad}c}
   4.364 -  \infer[(@{inference_def init})]{@{text "C \<Longrightarrow> #C"}}{} &
   4.365 -  \infer[(@{inference_def finish})]{@{text C}}{@{text "#C"}}
   4.366 -  \end{array}
   4.367 -  \]
   4.368 -
   4.369 -  \noindent Goal states are refined in intermediate proof steps until
   4.370 -  a finished form is achieved.  Here the two main reasoning principles
   4.371 -  are @{inference resolution}, for back-chaining a rule against a
   4.372 -  sub-goal (replacing it by zero or more sub-goals), and @{inference
   4.373 -  assumption}, for solving a sub-goal (finding a short-circuit with
   4.374 -  local assumptions).  Below @{text "\<^vec>x"} stands for @{text
   4.375 -  "x\<^sub>1, \<dots>, x\<^sub>n"} (@{text "n \<ge> 0"}).
   4.376 -
   4.377 -  \[
   4.378 -  \infer[(@{inference_def resolution})]
   4.379 -  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>A (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
   4.380 -  {\begin{tabular}{rl}
   4.381 -    @{text "rule:"} &
   4.382 -    @{text "\<^vec>A \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
   4.383 -    @{text "goal:"} &
   4.384 -    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
   4.385 -    @{text "goal unifier:"} &
   4.386 -    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
   4.387 -   \end{tabular}}
   4.388 -  \]
   4.389 -
   4.390 -  \medskip
   4.391 -
   4.392 -  \[
   4.393 -  \infer[(@{inference_def assumption})]{@{text "C\<vartheta>"}}
   4.394 -  {\begin{tabular}{rl}
   4.395 -    @{text "goal:"} &
   4.396 -    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> A \<^vec>x) \<Longrightarrow> C"} \\
   4.397 -    @{text "assm unifier:"} & @{text "A\<vartheta> = H\<^sub>i\<vartheta>"}~~\text{(for some~@{text "H\<^sub>i"})} \\
   4.398 -   \end{tabular}}
   4.399 -  \]
   4.400 -
   4.401 -  The following trace illustrates goal-oriented reasoning in
   4.402 -  Isabelle/Pure:
   4.403 -
   4.404 -  {\footnotesize
   4.405 -  \medskip
   4.406 -  \begin{tabular}{r@ {\quad}l}
   4.407 -  @{text "(A \<and> B \<Longrightarrow> B \<and> A) \<Longrightarrow> #(A \<and> B \<Longrightarrow> B \<and> A)"} & @{text "(init)"} \\
   4.408 -  @{text "(A \<and> B \<Longrightarrow> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution B \<Longrightarrow> A \<Longrightarrow> B \<and> A)"} \\
   4.409 -  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> (A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> B)"} \\
   4.410 -  @{text "(A \<and> B \<Longrightarrow> A) \<Longrightarrow> #\<dots>"} & @{text "(assumption)"} \\
   4.411 -  @{text "(A \<and> B \<Longrightarrow> A \<and> B) \<Longrightarrow> #\<dots>"} & @{text "(resolution A \<and> B \<Longrightarrow> A)"} \\
   4.412 -  @{text "#\<dots>"} & @{text "(assumption)"} \\
   4.413 -  @{text "A \<and> B \<Longrightarrow> B \<and> A"} & @{text "(finish)"} \\
   4.414 -  \end{tabular}
   4.415 -  \medskip
   4.416 -  }
   4.417 -
   4.418 -  Compositions of @{inference assumption} after @{inference
   4.419 -  resolution} occurs quite often, typically in elimination steps.
   4.420 -  Traditional Isabelle tactics accommodate this by a combined
   4.421 -  @{inference_def elim_resolution} principle.  In contrast, Isar uses
   4.422 -  a slightly more refined combination, where the assumptions to be
   4.423 -  closed are marked explicitly, using again the protective marker
   4.424 -  @{text "#"}:
   4.425 -
   4.426 -  \[
   4.427 -  \infer[(@{inference refinement})]
   4.428 -  {@{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> \<^vec>G' (\<^vec>a \<^vec>x))\<vartheta> \<Longrightarrow> C\<vartheta>"}}
   4.429 -  {\begin{tabular}{rl}
   4.430 -    @{text "sub\<hyphen>proof:"} &
   4.431 -    @{text "\<^vec>G \<^vec>a \<Longrightarrow> B \<^vec>a"} \\
   4.432 -    @{text "goal:"} &
   4.433 -    @{text "(\<And>\<^vec>x. \<^vec>H \<^vec>x \<Longrightarrow> B' \<^vec>x) \<Longrightarrow> C"} \\
   4.434 -    @{text "goal unifier:"} &
   4.435 -    @{text "(\<lambda>\<^vec>x. B (\<^vec>a \<^vec>x))\<vartheta> = B'\<vartheta>"} \\
   4.436 -    @{text "assm unifiers:"} &
   4.437 -    @{text "(\<lambda>\<^vec>x. G\<^sub>j (\<^vec>a \<^vec>x))\<vartheta> = #H\<^sub>i\<vartheta>"} \\
   4.438 -    & \quad (for each marked @{text "G\<^sub>j"} some @{text "#H\<^sub>i"}) \\
   4.439 -   \end{tabular}}
   4.440 -  \]
   4.441 -
   4.442 -  \noindent Here the @{text "sub\<hyphen>proof"} rule stems from the
   4.443 -  main @{command fix}-@{command assume}-@{command show} outline of
   4.444 -  Isar (cf.\ \secref{sec:framework-subproof}): each assumption
   4.445 -  indicated in the text results in a marked premise @{text "G"} above.
   4.446 -  The marking enforces resolution against one of the sub-goal's
   4.447 -  premises.  Consequently, @{command fix}-@{command assume}-@{command
   4.448 -  show} enables to fit the result of a sub-proof quite robustly into a
   4.449 -  pending sub-goal, while maintaining a good measure of flexibility.
   4.450 -*}
   4.451 -
   4.452 -
   4.453 -section {* The Isar proof language \label{sec:framework-isar} *}
   4.454 -
   4.455 -text {*
   4.456 -  Structured proofs are presented as high-level expressions for
   4.457 -  composing entities of Pure (propositions, facts, and goals).  The
   4.458 -  Isar proof language allows to organize reasoning within the
   4.459 -  underlying rule calculus of Pure, but Isar is not another logical
   4.460 -  calculus!
   4.461 -
   4.462 -  Isar is an exercise in sound minimalism.  Approximately half of the
   4.463 -  language is introduced as primitive, the rest defined as derived
   4.464 -  concepts.  The following grammar describes the core language
   4.465 -  (category @{text "proof"}), which is embedded into theory
   4.466 -  specification elements such as @{command theorem}; see also
   4.467 -  \secref{sec:framework-stmt} for the separate category @{text
   4.468 -  "statement"}.
   4.469 -
   4.470 -  \medskip
   4.471 -  \begin{tabular}{rcl}
   4.472 -    @{text "theory\<hyphen>stmt"} & = & @{command "theorem"}~@{text "statement proof  |"}~~@{command "definition"}~@{text "\<dots>  |  \<dots>"} \\[1ex]
   4.473 -
   4.474 -    @{text "proof"} & = & @{text "prfx\<^sup>*"}~@{command "proof"}~@{text "method\<^sup>? stmt\<^sup>*"}~@{command "qed"}~@{text "method\<^sup>?"} \\[1ex]
   4.475 -
   4.476 -    @{text prfx} & = & @{command "using"}~@{text "facts"} \\
   4.477 -    & @{text "|"} & @{command "unfolding"}~@{text "facts"} \\
   4.478 -
   4.479 -    @{text stmt} & = & @{command "{"}~@{text "stmt\<^sup>*"}~@{command "}"} \\
   4.480 -    & @{text "|"} & @{command "next"} \\
   4.481 -    & @{text "|"} & @{command "note"}~@{text "name = facts"} \\
   4.482 -    & @{text "|"} & @{command "let"}~@{text "term = term"} \\
   4.483 -    & @{text "|"} & @{command "fix"}~@{text "var\<^sup>+"} \\
   4.484 -    & @{text "|"} & @{command assume}~@{text "\<guillemotleft>inference\<guillemotright> name: props"} \\
   4.485 -    & @{text "|"} & @{command "then"}@{text "\<^sup>?"}~@{text goal} \\
   4.486 -    @{text goal} & = & @{command "have"}~@{text "name: props proof"} \\
   4.487 -    & @{text "|"} & @{command "show"}~@{text "name: props proof"} \\
   4.488 -  \end{tabular}
   4.489 -
   4.490 -  \medskip Simultaneous propositions or facts may be separated by the
   4.491 -  @{keyword "and"} keyword.
   4.492 -
   4.493 -  \medskip The syntax for terms and propositions is inherited from
   4.494 -  Pure (and the object-logic).  A @{text "pattern"} is a @{text
   4.495 -  "term"} with schematic variables, to be bound by higher-order
   4.496 -  matching.
   4.497 -
   4.498 -  \medskip Facts may be referenced by name or proposition.  For
   4.499 -  example, the result of ``@{command have}~@{text "a: A \<langle>proof\<rangle>"}''
   4.500 -  becomes available both as @{text "a"} and
   4.501 -  \isacharbackquoteopen@{text "A"}\isacharbackquoteclose.  Moreover,
   4.502 -  fact expressions may involve attributes that modify either the
   4.503 -  theorem or the background context.  For example, the expression
   4.504 -  ``@{text "a [OF b]"}'' refers to the composition of two facts
   4.505 -  according to the @{inference resolution} inference of
   4.506 -  \secref{sec:framework-resolution}, while ``@{text "a [intro]"}''
   4.507 -  declares a fact as introduction rule in the context.
   4.508 -
   4.509 -  The special fact called ``@{fact this}'' always refers to the last
   4.510 -  result, as produced by @{command note}, @{command assume}, @{command
   4.511 -  have}, or @{command show}.  Since @{command note} occurs
   4.512 -  frequently together with @{command then} we provide some
   4.513 -  abbreviations:
   4.514 -
   4.515 -  \medskip
   4.516 -  \begin{tabular}{rcl}
   4.517 -    @{command from}~@{text a} & @{text "\<equiv>"} & @{command note}~@{text a}~@{command then} \\
   4.518 -    @{command with}~@{text a} & @{text "\<equiv>"} & @{command from}~@{text "a \<AND> this"} \\
   4.519 -  \end{tabular}
   4.520 -  \medskip
   4.521 -
   4.522 -  The @{text "method"} category is essentially a parameter and may be
   4.523 -  populated later.  Methods use the facts indicated by @{command
   4.524 -  "then"} or @{command using}, and then operate on the goal state.
   4.525 -  Some basic methods are predefined: ``@{method "-"}'' leaves the goal
   4.526 -  unchanged, ``@{method this}'' applies the facts as rules to the
   4.527 -  goal, ``@{method (Pure) "rule"}'' applies the facts to another rule and the
   4.528 -  result to the goal (both ``@{method this}'' and ``@{method (Pure) rule}''
   4.529 -  refer to @{inference resolution} of
   4.530 -  \secref{sec:framework-resolution}).  The secondary arguments to
   4.531 -  ``@{method (Pure) rule}'' may be specified explicitly as in ``@{text "(rule
   4.532 -  a)"}'', or picked from the context.  In the latter case, the system
   4.533 -  first tries rules declared as @{attribute (Pure) elim} or
   4.534 -  @{attribute (Pure) dest}, followed by those declared as @{attribute
   4.535 -  (Pure) intro}.
   4.536 -
   4.537 -  The default method for @{command proof} is ``@{method (Pure) rule}''
   4.538 -  (arguments picked from the context), for @{command qed} it is
   4.539 -  ``@{method "-"}''.  Further abbreviations for terminal proof steps
   4.540 -  are ``@{command "by"}~@{text "method\<^sub>1 method\<^sub>2"}'' for
   4.541 -  ``@{command proof}~@{text "method\<^sub>1"}~@{command qed}~@{text
   4.542 -  "method\<^sub>2"}'', and ``@{command ".."}'' for ``@{command
   4.543 -  "by"}~@{method (Pure) rule}, and ``@{command "."}'' for ``@{command
   4.544 -  "by"}~@{method this}''.  The @{command unfolding} element operates
   4.545 -  directly on the current facts and goal by applying equalities.
   4.546 -
   4.547 -  \medskip Block structure can be indicated explicitly by ``@{command
   4.548 -  "{"}~@{text "\<dots>"}~@{command "}"}'', although the body of a sub-proof
   4.549 -  already involves implicit nesting.  In any case, @{command next}
   4.550 -  jumps into the next section of a block, i.e.\ it acts like closing
   4.551 -  an implicit block scope and opening another one; there is no direct
   4.552 -  correspondence to subgoals here.
   4.553 -
   4.554 -  The remaining elements @{command fix} and @{command assume} build up
   4.555 -  a local context (see \secref{sec:framework-context}), while
   4.556 -  @{command show} refines a pending sub-goal by the rule resulting
   4.557 -  from a nested sub-proof (see \secref{sec:framework-subproof}).
   4.558 -  Further derived concepts will support calculational reasoning (see
   4.559 -  \secref{sec:framework-calc}).
   4.560 -*}
   4.561 -
   4.562 -
   4.563 -subsection {* Context elements \label{sec:framework-context} *}
   4.564 -
   4.565 -text {*
   4.566 -  In judgments @{text "\<Gamma> \<turnstile> \<phi>"} of the primitive framework, @{text "\<Gamma>"}
   4.567 -  essentially acts like a proof context.  Isar elaborates this idea
   4.568 -  towards a higher-level notion, with additional information for
   4.569 -  type-inference, term abbreviations, local facts, hypotheses etc.
   4.570 -
   4.571 -  The element @{command fix}~@{text "x :: \<alpha>"} declares a local
   4.572 -  parameter, i.e.\ an arbitrary-but-fixed entity of a given type; in
   4.573 -  results exported from the context, @{text "x"} may become anything.
   4.574 -  The @{command assume}~@{text "\<guillemotleft>inference\<guillemotright>"} element provides a
   4.575 -  general interface to hypotheses: ``@{command assume}~@{text
   4.576 -  "\<guillemotleft>inference\<guillemotright> A"}'' produces @{text "A \<turnstile> A"} locally, while the
   4.577 -  included inference tells how to discharge @{text A} from results
   4.578 -  @{text "A \<turnstile> B"} later on.  There is no user-syntax for @{text
   4.579 -  "\<guillemotleft>inference\<guillemotright>"}, i.e.\ it may only occur internally when derived
   4.580 -  commands are defined in ML.
   4.581 -
   4.582 -  At the user-level, the default inference for @{command assume} is
   4.583 -  @{inference discharge} as given below.  The additional variants
   4.584 -  @{command presume} and @{command def} are defined as follows:
   4.585 -
   4.586 -  \medskip
   4.587 -  \begin{tabular}{rcl}
   4.588 -    @{command presume}~@{text A} & @{text "\<equiv>"} & @{command assume}~@{text "\<guillemotleft>weak\<hyphen>discharge\<guillemotright> A"} \\
   4.589 -    @{command def}~@{text "x \<equiv> a"} & @{text "\<equiv>"} & @{command fix}~@{text x}~@{command assume}~@{text "\<guillemotleft>expansion\<guillemotright> x \<equiv> a"} \\
   4.590 -  \end{tabular}
   4.591 -  \medskip
   4.592 -
   4.593 -  \[
   4.594 -  \infer[(@{inference_def discharge})]{@{text "\<strut>\<Gamma> - A \<turnstile> #A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
   4.595 -  \]
   4.596 -  \[
   4.597 -  \infer[(@{inference_def "weak\<hyphen>discharge"})]{@{text "\<strut>\<Gamma> - A \<turnstile> A \<Longrightarrow> B"}}{@{text "\<strut>\<Gamma> \<turnstile> B"}}
   4.598 -  \]
   4.599 -  \[
   4.600 -  \infer[(@{inference_def expansion})]{@{text "\<strut>\<Gamma> - (x \<equiv> a) \<turnstile> B a"}}{@{text "\<strut>\<Gamma> \<turnstile> B x"}}
   4.601 -  \]
   4.602 -
   4.603 -  \medskip Note that @{inference discharge} and @{inference
   4.604 -  "weak\<hyphen>discharge"} differ in the marker for @{prop A}, which is
   4.605 -  relevant when the result of a @{command fix}-@{command
   4.606 -  assume}-@{command show} outline is composed with a pending goal,
   4.607 -  cf.\ \secref{sec:framework-subproof}.
   4.608 -
   4.609 -  The most interesting derived context element in Isar is @{command
   4.610 -  obtain} \cite[\S5.3]{Wenzel-PhD}, which supports generalized
   4.611 -  elimination steps in a purely forward manner.  The @{command obtain}
   4.612 -  command takes a specification of parameters @{text "\<^vec>x"} and
   4.613 -  assumptions @{text "\<^vec>A"} to be added to the context, together
   4.614 -  with a proof of a case rule stating that this extension is
   4.615 -  conservative (i.e.\ may be removed from closed results later on):
   4.616 -
   4.617 -  \medskip
   4.618 -  \begin{tabular}{l}
   4.619 -  @{text "\<langle>facts\<rangle>"}~~@{command obtain}~@{text "\<^vec>x \<WHERE> \<^vec>A \<^vec>x  \<langle>proof\<rangle> \<equiv>"} \\[0.5ex]
   4.620 -  \quad @{command have}~@{text "case: \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis\<rangle>"} \\
   4.621 -  \quad @{command proof}~@{method "-"} \\
   4.622 -  \qquad @{command fix}~@{text thesis} \\
   4.623 -  \qquad @{command assume}~@{text "[intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis"} \\
   4.624 -  \qquad @{command show}~@{text thesis}~@{command using}~@{text "\<langle>facts\<rangle> \<langle>proof\<rangle>"} \\
   4.625 -  \quad @{command qed} \\
   4.626 -  \quad @{command fix}~@{text "\<^vec>x"}~@{command assume}~@{text "\<guillemotleft>elimination case\<guillemotright> \<^vec>A \<^vec>x"} \\
   4.627 -  \end{tabular}
   4.628 -  \medskip
   4.629 -
   4.630 -  \[
   4.631 -  \infer[(@{inference elimination})]{@{text "\<Gamma> \<turnstile> B"}}{
   4.632 -    \begin{tabular}{rl}
   4.633 -    @{text "case:"} &
   4.634 -    @{text "\<Gamma> \<turnstile> \<And>thesis. (\<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis) \<Longrightarrow> thesis"} \\[0.2ex]
   4.635 -    @{text "result:"} &
   4.636 -    @{text "\<Gamma> \<union> \<^vec>A \<^vec>y \<turnstile> B"} \\[0.2ex]
   4.637 -    \end{tabular}}
   4.638 -  \]
   4.639 -
   4.640 -  \noindent Here the name ``@{text thesis}'' is a specific convention
   4.641 -  for an arbitrary-but-fixed proposition; in the primitive natural
   4.642 -  deduction rules shown before we have occasionally used @{text C}.
   4.643 -  The whole statement of ``@{command obtain}~@{text x}~@{keyword
   4.644 -  "where"}~@{text "A x"}'' may be read as a claim that @{text "A x"}
   4.645 -  may be assumed for some arbitrary-but-fixed @{text "x"}.  Also note
   4.646 -  that ``@{command obtain}~@{text "A \<AND> B"}'' without parameters
   4.647 -  is similar to ``@{command have}~@{text "A \<AND> B"}'', but the
   4.648 -  latter involves multiple sub-goals.
   4.649 -
   4.650 -  \medskip The subsequent Isar proof texts explain all context
   4.651 -  elements introduced above using the formal proof language itself.
   4.652 -  After finishing a local proof within a block, we indicate the
   4.653 -  exported result via @{command note}.
   4.654 -*}
   4.655 -
   4.656 -(*<*)
   4.657 -theorem True
   4.658 -proof
   4.659 -(*>*)
   4.660 -  txt_raw {* \begin{minipage}[t]{0.45\textwidth} *}
   4.661 -  {
   4.662 -    fix x
   4.663 -    have "B x" sorry %noproof
   4.664 -  }
   4.665 -  note `\<And>x. B x`
   4.666 -  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
   4.667 -  {
   4.668 -    assume A
   4.669 -    have B sorry %noproof
   4.670 -  }
   4.671 -  note `A \<Longrightarrow> B`
   4.672 -  txt_raw {* \end{minipage}\\[3ex]\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
   4.673 -  {
   4.674 -    def x \<equiv> a
   4.675 -    have "B x" sorry %noproof
   4.676 -  }
   4.677 -  note `B a`
   4.678 -  txt_raw {* \end{minipage}\quad\begin{minipage}[t]{0.45\textwidth} *}(*<*)next(*>*)
   4.679 -  {
   4.680 -    obtain x where "A x" sorry %noproof
   4.681 -    have B sorry %noproof
   4.682 -  }
   4.683 -  note `B`
   4.684 -  txt_raw {* \end{minipage} *}
   4.685 -(*<*)
   4.686 -qed
   4.687 -(*>*)
   4.688 -
   4.689 -text {*
   4.690 -  \bigskip\noindent This illustrates the meaning of Isar context
   4.691 -  elements without goals getting in between.
   4.692 -*}
   4.693 -
   4.694 -subsection {* Structured statements \label{sec:framework-stmt} *}
   4.695 -
   4.696 -text {*
   4.697 -  The category @{text "statement"} of top-level theorem specifications
   4.698 -  is defined as follows:
   4.699 -
   4.700 -  \medskip
   4.701 -  \begin{tabular}{rcl}
   4.702 -  @{text "statement"} & @{text "\<equiv>"} & @{text "name: props \<AND> \<dots>"} \\
   4.703 -  & @{text "|"} & @{text "context\<^sup>* conclusion"} \\[0.5ex]
   4.704 -
   4.705 -  @{text "context"} & @{text "\<equiv>"} & @{text "\<FIXES> vars \<AND> \<dots>"} \\
   4.706 -  & @{text "|"} & @{text "\<ASSUMES> name: props \<AND> \<dots>"} \\
   4.707 -
   4.708 -  @{text "conclusion"} & @{text "\<equiv>"} & @{text "\<SHOWS> name: props \<AND> \<dots>"} \\
   4.709 -  & @{text "|"} & @{text "\<OBTAINS> vars \<AND> \<dots> \<WHERE> name: props \<AND> \<dots>"} \\
   4.710 -  & & \quad @{text "\<BBAR> \<dots>"} \\
   4.711 -  \end{tabular}
   4.712 -
   4.713 -  \medskip\noindent A simple @{text "statement"} consists of named
   4.714 -  propositions.  The full form admits local context elements followed
   4.715 -  by the actual conclusions, such as ``@{keyword "fixes"}~@{text
   4.716 -  x}~@{keyword "assumes"}~@{text "A x"}~@{keyword "shows"}~@{text "B
   4.717 -  x"}''.  The final result emerges as a Pure rule after discharging
   4.718 -  the context: @{prop "\<And>x. A x \<Longrightarrow> B x"}.
   4.719 -
   4.720 -  The @{keyword "obtains"} variant is another abbreviation defined
   4.721 -  below; unlike @{command obtain} (cf.\
   4.722 -  \secref{sec:framework-context}) there may be several ``cases''
   4.723 -  separated by ``@{text "\<BBAR>"}'', each consisting of several
   4.724 -  parameters (@{text "vars"}) and several premises (@{text "props"}).
   4.725 -  This specifies multi-branch elimination rules.
   4.726 -
   4.727 -  \medskip
   4.728 -  \begin{tabular}{l}
   4.729 -  @{text "\<OBTAINS> \<^vec>x \<WHERE> \<^vec>A \<^vec>x   \<BBAR>   \<dots>   \<equiv>"} \\[0.5ex]
   4.730 -  \quad @{text "\<FIXES> thesis"} \\
   4.731 -  \quad @{text "\<ASSUMES> [intro]: \<And>\<^vec>x. \<^vec>A \<^vec>x \<Longrightarrow> thesis  \<AND>  \<dots>"} \\
   4.732 -  \quad @{text "\<SHOWS> thesis"} \\
   4.733 -  \end{tabular}
   4.734 -  \medskip
   4.735 -
   4.736 -  Presenting structured statements in such an ``open'' format usually
   4.737 -  simplifies the subsequent proof, because the outer structure of the
   4.738 -  problem is already laid out directly.  E.g.\ consider the following
   4.739 -  canonical patterns for @{text "\<SHOWS>"} and @{text "\<OBTAINS>"},
   4.740 -  respectively:
   4.741 -*}
   4.742 -
   4.743 -text_raw {*\begin{minipage}{0.5\textwidth}*}
   4.744 -
   4.745 -theorem
   4.746 -  fixes x and y
   4.747 -  assumes "A x" and "B y"
   4.748 -  shows "C x y"
   4.749 -proof -
   4.750 -  from `A x` and `B y`
   4.751 -  show "C x y" sorry %noproof
   4.752 -qed
   4.753 -
   4.754 -text_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
   4.755 -
   4.756 -theorem
   4.757 -  obtains x and y
   4.758 -  where "A x" and "B y"
   4.759 -proof -
   4.760 -  have "A a" and "B b" sorry %noproof
   4.761 -  then show thesis ..
   4.762 -qed
   4.763 -
   4.764 -text_raw {*\end{minipage}*}
   4.765 -
   4.766 -text {*
   4.767 -  \medskip\noindent Here local facts \isacharbackquoteopen@{text "A
   4.768 -  x"}\isacharbackquoteclose\ and \isacharbackquoteopen@{text "B
   4.769 -  y"}\isacharbackquoteclose\ are referenced immediately; there is no
   4.770 -  need to decompose the logical rule structure again.  In the second
   4.771 -  proof the final ``@{command then}~@{command show}~@{text
   4.772 -  thesis}~@{command ".."}''  involves the local rule case @{text "\<And>x
   4.773 -  y. A x \<Longrightarrow> B y \<Longrightarrow> thesis"} for the particular instance of terms @{text
   4.774 -  "a"} and @{text "b"} produced in the body.
   4.775 -*}
   4.776 -
   4.777 -
   4.778 -subsection {* Structured proof refinement \label{sec:framework-subproof} *}
   4.779 -
   4.780 -text {*
   4.781 -  By breaking up the grammar for the Isar proof language, we may
   4.782 -  understand a proof text as a linear sequence of individual proof
   4.783 -  commands.  These are interpreted as transitions of the Isar virtual
   4.784 -  machine (Isar/VM), which operates on a block-structured
   4.785 -  configuration in single steps.  This allows users to write proof
   4.786 -  texts in an incremental manner, and inspect intermediate
   4.787 -  configurations for debugging.
   4.788 -
   4.789 -  The basic idea is analogous to evaluating algebraic expressions on a
   4.790 -  stack machine: @{text "(a + b) \<cdot> c"} then corresponds to a sequence
   4.791 -  of single transitions for each symbol @{text "(, a, +, b, ), \<cdot>, c"}.
   4.792 -  In Isar the algebraic values are facts or goals, and the operations
   4.793 -  are inferences.
   4.794 -
   4.795 -  \medskip The Isar/VM state maintains a stack of nodes, each node
   4.796 -  contains the local proof context, the linguistic mode, and a pending
   4.797 -  goal (optional).  The mode determines the type of transition that
   4.798 -  may be performed next, it essentially alternates between forward and
   4.799 -  backward reasoning, with an intermediate stage for chained facts
   4.800 -  (see \figref{fig:isar-vm}).
   4.801 -
   4.802 -  \begin{figure}[htb]
   4.803 -  \begin{center}
   4.804 -  \includegraphics[width=0.8\textwidth]{isar-vm}
   4.805 -  \end{center}
   4.806 -  \caption{Isar/VM modes}\label{fig:isar-vm}
   4.807 -  \end{figure}
   4.808 -
   4.809 -  For example, in @{text "state"} mode Isar acts like a mathematical
   4.810 -  scratch-pad, accepting declarations like @{command fix}, @{command
   4.811 -  assume}, and claims like @{command have}, @{command show}.  A goal
   4.812 -  statement changes the mode to @{text "prove"}, which means that we
   4.813 -  may now refine the problem via @{command unfolding} or @{command
   4.814 -  proof}.  Then we are again in @{text "state"} mode of a proof body,
   4.815 -  which may issue @{command show} statements to solve pending
   4.816 -  sub-goals.  A concluding @{command qed} will return to the original
   4.817 -  @{text "state"} mode one level upwards.  The subsequent Isar/VM
   4.818 -  trace indicates block structure, linguistic mode, goal state, and
   4.819 -  inferences:
   4.820 -*}
   4.821 -
   4.822 -text_raw {* \begingroup\footnotesize *}
   4.823 -(*<*)notepad begin
   4.824 -(*>*)
   4.825 -  txt_raw {* \begin{minipage}[t]{0.18\textwidth} *}
   4.826 -  have "A \<longrightarrow> B"
   4.827 -  proof
   4.828 -    assume A
   4.829 -    show B
   4.830 -      sorry %noproof
   4.831 -  qed
   4.832 -  txt_raw {* \end{minipage}\quad
   4.833 -\begin{minipage}[t]{0.06\textwidth}
   4.834 -@{text "begin"} \\
   4.835 -\\
   4.836 -\\
   4.837 -@{text "begin"} \\
   4.838 -@{text "end"} \\
   4.839 -@{text "end"} \\
   4.840 -\end{minipage}
   4.841 -\begin{minipage}[t]{0.08\textwidth}
   4.842 -@{text "prove"} \\
   4.843 -@{text "state"} \\
   4.844 -@{text "state"} \\
   4.845 -@{text "prove"} \\
   4.846 -@{text "state"} \\
   4.847 -@{text "state"} \\
   4.848 -\end{minipage}\begin{minipage}[t]{0.35\textwidth}
   4.849 -@{text "(A \<longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
   4.850 -@{text "(A \<Longrightarrow> B) \<Longrightarrow> #(A \<longrightarrow> B)"} \\
   4.851 -\\
   4.852 -\\
   4.853 -@{text "#(A \<longrightarrow> B)"} \\
   4.854 -@{text "A \<longrightarrow> B"} \\
   4.855 -\end{minipage}\begin{minipage}[t]{0.4\textwidth}
   4.856 -@{text "(init)"} \\
   4.857 -@{text "(resolution impI)"} \\
   4.858 -\\
   4.859 -\\
   4.860 -@{text "(refinement #A \<Longrightarrow> B)"} \\
   4.861 -@{text "(finish)"} \\
   4.862 -\end{minipage} *}
   4.863 -(*<*)
   4.864 -end
   4.865 -(*>*)
   4.866 -text_raw {* \endgroup *}
   4.867 -
   4.868 -text {*
   4.869 -  \noindent Here the @{inference refinement} inference from
   4.870 -  \secref{sec:framework-resolution} mediates composition of Isar
   4.871 -  sub-proofs nicely.  Observe that this principle incorporates some
   4.872 -  degree of freedom in proof composition.  In particular, the proof
   4.873 -  body allows parameters and assumptions to be re-ordered, or commuted
   4.874 -  according to Hereditary Harrop Form.  Moreover, context elements
   4.875 -  that are not used in a sub-proof may be omitted altogether.  For
   4.876 -  example:
   4.877 -*}
   4.878 -
   4.879 -text_raw {*\begin{minipage}{0.5\textwidth}*}
   4.880 -
   4.881 -(*<*)
   4.882 -notepad
   4.883 -begin
   4.884 -(*>*)
   4.885 -  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
   4.886 -  proof -
   4.887 -    fix x and y
   4.888 -    assume "A x" and "B y"
   4.889 -    show "C x y" sorry %noproof
   4.890 -  qed
   4.891 -
   4.892 -txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
   4.893 -
   4.894 -(*<*)
   4.895 -next
   4.896 -(*>*)
   4.897 -  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
   4.898 -  proof -
   4.899 -    fix x assume "A x"
   4.900 -    fix y assume "B y"
   4.901 -    show "C x y" sorry %noproof
   4.902 -  qed
   4.903 -
   4.904 -txt_raw {*\end{minipage}\\[3ex]\begin{minipage}{0.5\textwidth}*}
   4.905 -
   4.906 -(*<*)
   4.907 -next
   4.908 -(*>*)
   4.909 -  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
   4.910 -  proof -
   4.911 -    fix y assume "B y"
   4.912 -    fix x assume "A x"
   4.913 -    show "C x y" sorry
   4.914 -  qed
   4.915 -
   4.916 -txt_raw {*\end{minipage}\begin{minipage}{0.5\textwidth}*}
   4.917 -(*<*)
   4.918 -next
   4.919 -(*>*)
   4.920 -  have "\<And>x y. A x \<Longrightarrow> B y \<Longrightarrow> C x y"
   4.921 -  proof -
   4.922 -    fix y assume "B y"
   4.923 -    fix x
   4.924 -    show "C x y" sorry
   4.925 -  qed
   4.926 -(*<*)
   4.927 -end
   4.928 -(*>*)
   4.929 -
   4.930 -text_raw {*\end{minipage}*}
   4.931 -
   4.932 -text {*
   4.933 -  \medskip\noindent Such ``peephole optimizations'' of Isar texts are
   4.934 -  practically important to improve readability, by rearranging
   4.935 -  contexts elements according to the natural flow of reasoning in the
   4.936 -  body, while still observing the overall scoping rules.
   4.937 -
   4.938 -  \medskip This illustrates the basic idea of structured proof
   4.939 -  processing in Isar.  The main mechanisms are based on natural
   4.940 -  deduction rule composition within the Pure framework.  In
   4.941 -  particular, there are no direct operations on goal states within the
   4.942 -  proof body.  Moreover, there is no hidden automated reasoning
   4.943 -  involved, just plain unification.
   4.944 -*}
   4.945 -
   4.946 -
   4.947 -subsection {* Calculational reasoning \label{sec:framework-calc} *}
   4.948 -
   4.949 -text {*
   4.950 -  The existing Isar infrastructure is sufficiently flexible to support
   4.951 -  calculational reasoning (chains of transitivity steps) as derived
   4.952 -  concept.  The generic proof elements introduced below depend on
   4.953 -  rules declared as @{attribute trans} in the context.  It is left to
   4.954 -  the object-logic to provide a suitable rule collection for mixed
   4.955 -  relations of @{text "="}, @{text "<"}, @{text "\<le>"}, @{text "\<subset>"},
   4.956 -  @{text "\<subseteq>"} etc.  Due to the flexibility of rule composition
   4.957 -  (\secref{sec:framework-resolution}), substitution of equals by
   4.958 -  equals is covered as well, even substitution of inequalities
   4.959 -  involving monotonicity conditions; see also \cite[\S6]{Wenzel-PhD}
   4.960 -  and \cite{Bauer-Wenzel:2001}.
   4.961 -
   4.962 -  The generic calculational mechanism is based on the observation that
   4.963 -  rules such as @{text "trans:"}~@{prop "x = y \<Longrightarrow> y = z \<Longrightarrow> x = z"}
   4.964 -  proceed from the premises towards the conclusion in a deterministic
   4.965 -  fashion.  Thus we may reason in forward mode, feeding intermediate
   4.966 -  results into rules selected from the context.  The course of
   4.967 -  reasoning is organized by maintaining a secondary fact called
   4.968 -  ``@{fact calculation}'', apart from the primary ``@{fact this}''
   4.969 -  already provided by the Isar primitives.  In the definitions below,
   4.970 -  @{attribute OF} refers to @{inference resolution}
   4.971 -  (\secref{sec:framework-resolution}) with multiple rule arguments,
   4.972 -  and @{text "trans"} represents to a suitable rule from the context:
   4.973 -
   4.974 -  \begin{matharray}{rcl}
   4.975 -    @{command "also"}@{text "\<^sub>0"} & \equiv & @{command "note"}~@{text "calculation = this"} \\
   4.976 -    @{command "also"}@{text "\<^sub>n\<^sub>+\<^sub>1"} & \equiv & @{command "note"}~@{text "calculation = trans [OF calculation this]"} \\[0.5ex]
   4.977 -    @{command "finally"} & \equiv & @{command "also"}~@{command "from"}~@{text calculation} \\
   4.978 -  \end{matharray}
   4.979 -
   4.980 -  \noindent The start of a calculation is determined implicitly in the
   4.981 -  text: here @{command also} sets @{fact calculation} to the current
   4.982 -  result; any subsequent occurrence will update @{fact calculation} by
   4.983 -  combination with the next result and a transitivity rule.  The
   4.984 -  calculational sequence is concluded via @{command finally}, where
   4.985 -  the final result is exposed for use in a concluding claim.
   4.986 -
   4.987 -  Here is a canonical proof pattern, using @{command have} to
   4.988 -  establish the intermediate results:
   4.989 -*}
   4.990 -
   4.991 -(*<*)
   4.992 -notepad
   4.993 -begin
   4.994 -(*>*)
   4.995 -  have "a = b" sorry
   4.996 -  also have "\<dots> = c" sorry
   4.997 -  also have "\<dots> = d" sorry
   4.998 -  finally have "a = d" .
   4.999 -(*<*)
  4.1000 -end
  4.1001 -(*>*)
  4.1002 -
  4.1003 -text {*
  4.1004 -  \noindent The term ``@{text "\<dots>"}'' above is a special abbreviation
  4.1005 -  provided by the Isabelle/Isar syntax layer: it statically refers to
  4.1006 -  the right-hand side argument of the previous statement given in the
  4.1007 -  text.  Thus it happens to coincide with relevant sub-expressions in
  4.1008 -  the calculational chain, but the exact correspondence is dependent
  4.1009 -  on the transitivity rules being involved.
  4.1010 -
  4.1011 -  \medskip Symmetry rules such as @{prop "x = y \<Longrightarrow> y = x"} are like
  4.1012 -  transitivities with only one premise.  Isar maintains a separate
  4.1013 -  rule collection declared via the @{attribute sym} attribute, to be
  4.1014 -  used in fact expressions ``@{text "a [symmetric]"}'', or single-step
  4.1015 -  proofs ``@{command assume}~@{text "x = y"}~@{command then}~@{command
  4.1016 -  have}~@{text "y = x"}~@{command ".."}''.
  4.1017 -*}
  4.1018 -
  4.1019 -end
  4.1020 \ No newline at end of file
     5.1 --- a/src/Doc/Isar-Ref/Generic.thy	Mon Apr 07 16:37:57 2014 +0200
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,2016 +0,0 @@
     5.4 -theory Generic
     5.5 -imports Base Main
     5.6 -begin
     5.7 -
     5.8 -chapter {* Generic tools and packages \label{ch:gen-tools} *}
     5.9 -
    5.10 -section {* Configuration options \label{sec:config} *}
    5.11 -
    5.12 -text {* Isabelle/Pure maintains a record of named configuration
    5.13 -  options within the theory or proof context, with values of type
    5.14 -  @{ML_type bool}, @{ML_type int}, @{ML_type real}, or @{ML_type
    5.15 -  string}.  Tools may declare options in ML, and then refer to these
    5.16 -  values (relative to the context).  Thus global reference variables
    5.17 -  are easily avoided.  The user may change the value of a
    5.18 -  configuration option by means of an associated attribute of the same
    5.19 -  name.  This form of context declaration works particularly well with
    5.20 -  commands such as @{command "declare"} or @{command "using"} like
    5.21 -  this:
    5.22 -*}
    5.23 -
    5.24 -declare [[show_main_goal = false]]
    5.25 -
    5.26 -notepad
    5.27 -begin
    5.28 -  note [[show_main_goal = true]]
    5.29 -end
    5.30 -
    5.31 -text {* For historical reasons, some tools cannot take the full proof
    5.32 -  context into account and merely refer to the background theory.
    5.33 -  This is accommodated by configuration options being declared as
    5.34 -  ``global'', which may not be changed within a local context.
    5.35 -
    5.36 -  \begin{matharray}{rcll}
    5.37 -    @{command_def "print_options"} & : & @{text "context \<rightarrow>"} \\
    5.38 -  \end{matharray}
    5.39 -
    5.40 -  @{rail \<open>
    5.41 -    @{syntax name} ('=' ('true' | 'false' | @{syntax int} | @{syntax float} | @{syntax name}))?
    5.42 -  \<close>}
    5.43 -
    5.44 -  \begin{description}
    5.45 -  
    5.46 -  \item @{command "print_options"} prints the available configuration
    5.47 -  options, with names, types, and current values.
    5.48 -  
    5.49 -  \item @{text "name = value"} as an attribute expression modifies the
    5.50 -  named option, with the syntax of the value depending on the option's
    5.51 -  type.  For @{ML_type bool} the default value is @{text true}.  Any
    5.52 -  attempt to change a global option in a local context is ignored.
    5.53 -
    5.54 -  \end{description}
    5.55 -*}
    5.56 -
    5.57 -
    5.58 -section {* Basic proof tools *}
    5.59 -
    5.60 -subsection {* Miscellaneous methods and attributes \label{sec:misc-meth-att} *}
    5.61 -
    5.62 -text {*
    5.63 -  \begin{matharray}{rcl}
    5.64 -    @{method_def unfold} & : & @{text method} \\
    5.65 -    @{method_def fold} & : & @{text method} \\
    5.66 -    @{method_def insert} & : & @{text method} \\[0.5ex]
    5.67 -    @{method_def erule}@{text "\<^sup>*"} & : & @{text method} \\
    5.68 -    @{method_def drule}@{text "\<^sup>*"} & : & @{text method} \\
    5.69 -    @{method_def frule}@{text "\<^sup>*"} & : & @{text method} \\
    5.70 -    @{method_def intro} & : & @{text method} \\
    5.71 -    @{method_def elim} & : & @{text method} \\
    5.72 -    @{method_def succeed} & : & @{text method} \\
    5.73 -    @{method_def fail} & : & @{text method} \\
    5.74 -  \end{matharray}
    5.75 -
    5.76 -  @{rail \<open>
    5.77 -    (@@{method fold} | @@{method unfold} | @@{method insert}) @{syntax thmrefs}
    5.78 -    ;
    5.79 -    (@@{method erule} | @@{method drule} | @@{method frule})
    5.80 -      ('(' @{syntax nat} ')')? @{syntax thmrefs}
    5.81 -    ;
    5.82 -    (@@{method intro} | @@{method elim}) @{syntax thmrefs}?
    5.83 -  \<close>}
    5.84 -
    5.85 -  \begin{description}
    5.86 -  
    5.87 -  \item @{method unfold}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{method fold}~@{text
    5.88 -  "a\<^sub>1 \<dots> a\<^sub>n"} expand (or fold back) the given definitions throughout
    5.89 -  all goals; any chained facts provided are inserted into the goal and
    5.90 -  subject to rewriting as well.
    5.91 -
    5.92 -  \item @{method insert}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} inserts theorems as facts
    5.93 -  into all goals of the proof state.  Note that current facts
    5.94 -  indicated for forward chaining are ignored.
    5.95 -
    5.96 -  \item @{method erule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, @{method
    5.97 -  drule}~@{text "a\<^sub>1 \<dots> a\<^sub>n"}, and @{method frule}~@{text
    5.98 -  "a\<^sub>1 \<dots> a\<^sub>n"} are similar to the basic @{method rule}
    5.99 -  method (see \secref{sec:pure-meth-att}), but apply rules by
   5.100 -  elim-resolution, destruct-resolution, and forward-resolution,
   5.101 -  respectively \cite{isabelle-implementation}.  The optional natural
   5.102 -  number argument (default 0) specifies additional assumption steps to
   5.103 -  be performed here.
   5.104 -
   5.105 -  Note that these methods are improper ones, mainly serving for
   5.106 -  experimentation and tactic script emulation.  Different modes of
   5.107 -  basic rule application are usually expressed in Isar at the proof
   5.108 -  language level, rather than via implicit proof state manipulations.
   5.109 -  For example, a proper single-step elimination would be done using
   5.110 -  the plain @{method rule} method, with forward chaining of current
   5.111 -  facts.
   5.112 -
   5.113 -  \item @{method intro} and @{method elim} repeatedly refine some goal
   5.114 -  by intro- or elim-resolution, after having inserted any chained
   5.115 -  facts.  Exactly the rules given as arguments are taken into account;
   5.116 -  this allows fine-tuned decomposition of a proof problem, in contrast
   5.117 -  to common automated tools.
   5.118 -
   5.119 -  \item @{method succeed} yields a single (unchanged) result; it is
   5.120 -  the identity of the ``@{text ","}'' method combinator (cf.\
   5.121 -  \secref{sec:proof-meth}).
   5.122 -
   5.123 -  \item @{method fail} yields an empty result sequence; it is the
   5.124 -  identity of the ``@{text "|"}'' method combinator (cf.\
   5.125 -  \secref{sec:proof-meth}).
   5.126 -
   5.127 -  \end{description}
   5.128 -
   5.129 -  \begin{matharray}{rcl}
   5.130 -    @{attribute_def tagged} & : & @{text attribute} \\
   5.131 -    @{attribute_def untagged} & : & @{text attribute} \\[0.5ex]
   5.132 -    @{attribute_def THEN} & : & @{text attribute} \\
   5.133 -    @{attribute_def unfolded} & : & @{text attribute} \\
   5.134 -    @{attribute_def folded} & : & @{text attribute} \\
   5.135 -    @{attribute_def abs_def} & : & @{text attribute} \\[0.5ex]
   5.136 -    @{attribute_def rotated} & : & @{text attribute} \\
   5.137 -    @{attribute_def (Pure) elim_format} & : & @{text attribute} \\
   5.138 -    @{attribute_def no_vars}@{text "\<^sup>*"} & : & @{text attribute} \\
   5.139 -  \end{matharray}
   5.140 -
   5.141 -  @{rail \<open>
   5.142 -    @@{attribute tagged} @{syntax name} @{syntax name}
   5.143 -    ;
   5.144 -    @@{attribute untagged} @{syntax name}
   5.145 -    ;
   5.146 -    @@{attribute THEN} ('[' @{syntax nat} ']')? @{syntax thmref}
   5.147 -    ;
   5.148 -    (@@{attribute unfolded} | @@{attribute folded}) @{syntax thmrefs}
   5.149 -    ;
   5.150 -    @@{attribute rotated} @{syntax int}?
   5.151 -  \<close>}
   5.152 -
   5.153 -  \begin{description}
   5.154 -
   5.155 -  \item @{attribute tagged}~@{text "name value"} and @{attribute
   5.156 -  untagged}~@{text name} add and remove \emph{tags} of some theorem.
   5.157 -  Tags may be any list of string pairs that serve as formal comment.
   5.158 -  The first string is considered the tag name, the second its value.
   5.159 -  Note that @{attribute untagged} removes any tags of the same name.
   5.160 -
   5.161 -  \item @{attribute THEN}~@{text a} composes rules by resolution; it
   5.162 -  resolves with the first premise of @{text a} (an alternative
   5.163 -  position may be also specified).  See also @{ML_op "RS"} in
   5.164 -  \cite{isabelle-implementation}.
   5.165 -  
   5.166 -  \item @{attribute unfolded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} and @{attribute
   5.167 -  folded}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} expand and fold back again the given
   5.168 -  definitions throughout a rule.
   5.169 -
   5.170 -  \item @{attribute abs_def} turns an equation of the form @{prop "f x
   5.171 -  y \<equiv> t"} into @{prop "f \<equiv> \<lambda>x y. t"}, which ensures that @{method
   5.172 -  simp} or @{method unfold} steps always expand it.  This also works
   5.173 -  for object-logic equality.
   5.174 -
   5.175 -  \item @{attribute rotated}~@{text n} rotate the premises of a
   5.176 -  theorem by @{text n} (default 1).
   5.177 -
   5.178 -  \item @{attribute (Pure) elim_format} turns a destruction rule into
   5.179 -  elimination rule format, by resolving with the rule @{prop "PROP A \<Longrightarrow>
   5.180 -  (PROP A \<Longrightarrow> PROP B) \<Longrightarrow> PROP B"}.
   5.181 -  
   5.182 -  Note that the Classical Reasoner (\secref{sec:classical}) provides
   5.183 -  its own version of this operation.
   5.184 -
   5.185 -  \item @{attribute no_vars} replaces schematic variables by free
   5.186 -  ones; this is mainly for tuning output of pretty printed theorems.
   5.187 -
   5.188 -  \end{description}
   5.189 -*}
   5.190 -
   5.191 -
   5.192 -subsection {* Low-level equational reasoning *}
   5.193 -
   5.194 -text {*
   5.195 -  \begin{matharray}{rcl}
   5.196 -    @{method_def subst} & : & @{text method} \\
   5.197 -    @{method_def hypsubst} & : & @{text method} \\
   5.198 -    @{method_def split} & : & @{text method} \\
   5.199 -  \end{matharray}
   5.200 -
   5.201 -  @{rail \<open>
   5.202 -    @@{method subst} ('(' 'asm' ')')? \<newline> ('(' (@{syntax nat}+) ')')? @{syntax thmref}
   5.203 -    ;
   5.204 -    @@{method split} @{syntax thmrefs}
   5.205 -  \<close>}
   5.206 -
   5.207 -  These methods provide low-level facilities for equational reasoning
   5.208 -  that are intended for specialized applications only.  Normally,
   5.209 -  single step calculations would be performed in a structured text
   5.210 -  (see also \secref{sec:calculation}), while the Simplifier methods
   5.211 -  provide the canonical way for automated normalization (see
   5.212 -  \secref{sec:simplifier}).
   5.213 -
   5.214 -  \begin{description}
   5.215 -
   5.216 -  \item @{method subst}~@{text eq} performs a single substitution step
   5.217 -  using rule @{text eq}, which may be either a meta or object
   5.218 -  equality.
   5.219 -
   5.220 -  \item @{method subst}~@{text "(asm) eq"} substitutes in an
   5.221 -  assumption.
   5.222 -
   5.223 -  \item @{method subst}~@{text "(i \<dots> j) eq"} performs several
   5.224 -  substitutions in the conclusion. The numbers @{text i} to @{text j}
   5.225 -  indicate the positions to substitute at.  Positions are ordered from
   5.226 -  the top of the term tree moving down from left to right. For
   5.227 -  example, in @{text "(a + b) + (c + d)"} there are three positions
   5.228 -  where commutativity of @{text "+"} is applicable: 1 refers to @{text
   5.229 -  "a + b"}, 2 to the whole term, and 3 to @{text "c + d"}.
   5.230 -
   5.231 -  If the positions in the list @{text "(i \<dots> j)"} are non-overlapping
   5.232 -  (e.g.\ @{text "(2 3)"} in @{text "(a + b) + (c + d)"}) you may
   5.233 -  assume all substitutions are performed simultaneously.  Otherwise
   5.234 -  the behaviour of @{text subst} is not specified.
   5.235 -
   5.236 -  \item @{method subst}~@{text "(asm) (i \<dots> j) eq"} performs the
   5.237 -  substitutions in the assumptions. The positions refer to the
   5.238 -  assumptions in order from left to right.  For example, given in a
   5.239 -  goal of the form @{text "P (a + b) \<Longrightarrow> P (c + d) \<Longrightarrow> \<dots>"}, position 1 of
   5.240 -  commutativity of @{text "+"} is the subterm @{text "a + b"} and
   5.241 -  position 2 is the subterm @{text "c + d"}.
   5.242 -
   5.243 -  \item @{method hypsubst} performs substitution using some
   5.244 -  assumption; this only works for equations of the form @{text "x =
   5.245 -  t"} where @{text x} is a free or bound variable.
   5.246 -
   5.247 -  \item @{method split}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} performs single-step case
   5.248 -  splitting using the given rules.  Splitting is performed in the
   5.249 -  conclusion or some assumption of the subgoal, depending of the
   5.250 -  structure of the rule.
   5.251 -  
   5.252 -  Note that the @{method simp} method already involves repeated
   5.253 -  application of split rules as declared in the current context, using
   5.254 -  @{attribute split}, for example.
   5.255 -
   5.256 -  \end{description}
   5.257 -*}
   5.258 -
   5.259 -
   5.260 -subsection {* Further tactic emulations \label{sec:tactics} *}
   5.261 -
   5.262 -text {*
   5.263 -  The following improper proof methods emulate traditional tactics.
   5.264 -  These admit direct access to the goal state, which is normally
   5.265 -  considered harmful!  In particular, this may involve both numbered
   5.266 -  goal addressing (default 1), and dynamic instantiation within the
   5.267 -  scope of some subgoal.
   5.268 -
   5.269 -  \begin{warn}
   5.270 -    Dynamic instantiations refer to universally quantified parameters
   5.271 -    of a subgoal (the dynamic context) rather than fixed variables and
   5.272 -    term abbreviations of a (static) Isar context.
   5.273 -  \end{warn}
   5.274 -
   5.275 -  Tactic emulation methods, unlike their ML counterparts, admit
   5.276 -  simultaneous instantiation from both dynamic and static contexts.
   5.277 -  If names occur in both contexts goal parameters hide locally fixed
   5.278 -  variables.  Likewise, schematic variables refer to term
   5.279 -  abbreviations, if present in the static context.  Otherwise the
   5.280 -  schematic variable is interpreted as a schematic variable and left
   5.281 -  to be solved by unification with certain parts of the subgoal.
   5.282 -
   5.283 -  Note that the tactic emulation proof methods in Isabelle/Isar are
   5.284 -  consistently named @{text foo_tac}.  Note also that variable names
   5.285 -  occurring on left hand sides of instantiations must be preceded by a
   5.286 -  question mark if they coincide with a keyword or contain dots.  This
   5.287 -  is consistent with the attribute @{attribute "where"} (see
   5.288 -  \secref{sec:pure-meth-att}).
   5.289 -
   5.290 -  \begin{matharray}{rcl}
   5.291 -    @{method_def rule_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.292 -    @{method_def erule_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.293 -    @{method_def drule_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.294 -    @{method_def frule_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.295 -    @{method_def cut_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.296 -    @{method_def thin_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.297 -    @{method_def subgoal_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.298 -    @{method_def rename_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.299 -    @{method_def rotate_tac}@{text "\<^sup>*"} & : & @{text method} \\
   5.300 -    @{method_def tactic}@{text "\<^sup>*"} & : & @{text method} \\
   5.301 -    @{method_def raw_tactic}@{text "\<^sup>*"} & : & @{text method} \\
   5.302 -  \end{matharray}
   5.303 -
   5.304 -  @{rail \<open>
   5.305 -    (@@{method rule_tac} | @@{method erule_tac} | @@{method drule_tac} |
   5.306 -      @@{method frule_tac} | @@{method cut_tac} | @@{method thin_tac}) @{syntax goal_spec}? \<newline>
   5.307 -    ( dynamic_insts @'in' @{syntax thmref} | @{syntax thmrefs} )
   5.308 -    ;
   5.309 -    @@{method subgoal_tac} @{syntax goal_spec}? (@{syntax prop} +)
   5.310 -    ;
   5.311 -    @@{method rename_tac} @{syntax goal_spec}? (@{syntax name} +)
   5.312 -    ;
   5.313 -    @@{method rotate_tac} @{syntax goal_spec}? @{syntax int}?
   5.314 -    ;
   5.315 -    (@@{method tactic} | @@{method raw_tactic}) @{syntax text}
   5.316 -    ;
   5.317 -
   5.318 -    dynamic_insts: ((@{syntax name} '=' @{syntax term}) + @'and')
   5.319 -  \<close>}
   5.320 -
   5.321 -\begin{description}
   5.322 -
   5.323 -  \item @{method rule_tac} etc. do resolution of rules with explicit
   5.324 -  instantiation.  This works the same way as the ML tactics @{ML
   5.325 -  res_inst_tac} etc. (see \cite{isabelle-implementation})
   5.326 -
   5.327 -  Multiple rules may be only given if there is no instantiation; then
   5.328 -  @{method rule_tac} is the same as @{ML resolve_tac} in ML (see
   5.329 -  \cite{isabelle-implementation}).
   5.330 -
   5.331 -  \item @{method cut_tac} inserts facts into the proof state as
   5.332 -  assumption of a subgoal; instantiations may be given as well.  Note
   5.333 -  that the scope of schematic variables is spread over the main goal
   5.334 -  statement and rule premises are turned into new subgoals.  This is
   5.335 -  in contrast to the regular method @{method insert} which inserts
   5.336 -  closed rule statements.
   5.337 -
   5.338 -  \item @{method thin_tac}~@{text \<phi>} deletes the specified premise
   5.339 -  from a subgoal.  Note that @{text \<phi>} may contain schematic
   5.340 -  variables, to abbreviate the intended proposition; the first
   5.341 -  matching subgoal premise will be deleted.  Removing useless premises
   5.342 -  from a subgoal increases its readability and can make search tactics
   5.343 -  run faster.
   5.344 -
   5.345 -  \item @{method subgoal_tac}~@{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} adds the propositions
   5.346 -  @{text "\<phi>\<^sub>1 \<dots> \<phi>\<^sub>n"} as local premises to a subgoal, and poses the same
   5.347 -  as new subgoals (in the original context).
   5.348 -
   5.349 -  \item @{method rename_tac}~@{text "x\<^sub>1 \<dots> x\<^sub>n"} renames parameters of a
   5.350 -  goal according to the list @{text "x\<^sub>1, \<dots>, x\<^sub>n"}, which refers to the
   5.351 -  \emph{suffix} of variables.
   5.352 -
   5.353 -  \item @{method rotate_tac}~@{text n} rotates the premises of a
   5.354 -  subgoal by @{text n} positions: from right to left if @{text n} is
   5.355 -  positive, and from left to right if @{text n} is negative; the
   5.356 -  default value is 1.
   5.357 -
   5.358 -  \item @{method tactic}~@{text "text"} produces a proof method from
   5.359 -  any ML text of type @{ML_type tactic}.  Apart from the usual ML
   5.360 -  environment and the current proof context, the ML code may refer to
   5.361 -  the locally bound values @{ML_text facts}, which indicates any
   5.362 -  current facts used for forward-chaining.
   5.363 -
   5.364 -  \item @{method raw_tactic} is similar to @{method tactic}, but
   5.365 -  presents the goal state in its raw internal form, where simultaneous
   5.366 -  subgoals appear as conjunction of the logical framework instead of
   5.367 -  the usual split into several subgoals.  While feature this is useful
   5.368 -  for debugging of complex method definitions, it should not never
   5.369 -  appear in production theories.
   5.370 -
   5.371 -  \end{description}
   5.372 -*}
   5.373 -
   5.374 -
   5.375 -section {* The Simplifier \label{sec:simplifier} *}
   5.376 -
   5.377 -text {* The Simplifier performs conditional and unconditional
   5.378 -  rewriting and uses contextual information: rule declarations in the
   5.379 -  background theory or local proof context are taken into account, as
   5.380 -  well as chained facts and subgoal premises (``local assumptions'').
   5.381 -  There are several general hooks that allow to modify the
   5.382 -  simplification strategy, or incorporate other proof tools that solve
   5.383 -  sub-problems, produce rewrite rules on demand etc.
   5.384 -
   5.385 -  The rewriting strategy is always strictly bottom up, except for
   5.386 -  congruence rules, which are applied while descending into a term.
   5.387 -  Conditions in conditional rewrite rules are solved recursively
   5.388 -  before the rewrite rule is applied.
   5.389 -
   5.390 -  The default Simplifier setup of major object logics (HOL, HOLCF,
   5.391 -  FOL, ZF) makes the Simplifier ready for immediate use, without
   5.392 -  engaging into the internal structures.  Thus it serves as
   5.393 -  general-purpose proof tool with the main focus on equational
   5.394 -  reasoning, and a bit more than that.
   5.395 -*}
   5.396 -
   5.397 -
   5.398 -subsection {* Simplification methods \label{sec:simp-meth} *}
   5.399 -
   5.400 -text {*
   5.401 -  \begin{matharray}{rcl}
   5.402 -    @{method_def simp} & : & @{text method} \\
   5.403 -    @{method_def simp_all} & : & @{text method} \\
   5.404 -  \end{matharray}
   5.405 -
   5.406 -  @{rail \<open>
   5.407 -    (@@{method simp} | @@{method simp_all}) opt? (@{syntax simpmod} * )
   5.408 -    ;
   5.409 -
   5.410 -    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use' | 'asm_lr' ) ')'
   5.411 -    ;
   5.412 -    @{syntax_def simpmod}: ('add' | 'del' | 'only' | 'split' (() | 'add' | 'del') |
   5.413 -      'cong' (() | 'add' | 'del')) ':' @{syntax thmrefs}
   5.414 -  \<close>}
   5.415 -
   5.416 -  \begin{description}
   5.417 -
   5.418 -  \item @{method simp} invokes the Simplifier on the first subgoal,
   5.419 -  after inserting chained facts as additional goal premises; further
   5.420 -  rule declarations may be included via @{text "(simp add: facts)"}.
   5.421 -  The proof method fails if the subgoal remains unchanged after
   5.422 -  simplification.
   5.423 -
   5.424 -  Note that the original goal premises and chained facts are subject
   5.425 -  to simplification themselves, while declarations via @{text
   5.426 -  "add"}/@{text "del"} merely follow the policies of the object-logic
   5.427 -  to extract rewrite rules from theorems, without further
   5.428 -  simplification.  This may lead to slightly different behavior in
   5.429 -  either case, which might be required precisely like that in some
   5.430 -  boundary situations to perform the intended simplification step!
   5.431 -
   5.432 -  \medskip The @{text only} modifier first removes all other rewrite
   5.433 -  rules, looper tactics (including split rules), congruence rules, and
   5.434 -  then behaves like @{text add}.  Implicit solvers remain, which means
   5.435 -  that trivial rules like reflexivity or introduction of @{text
   5.436 -  "True"} are available to solve the simplified subgoals, but also
   5.437 -  non-trivial tools like linear arithmetic in HOL.  The latter may
   5.438 -  lead to some surprise of the meaning of ``only'' in Isabelle/HOL
   5.439 -  compared to English!
   5.440 -
   5.441 -  \medskip The @{text split} modifiers add or delete rules for the
   5.442 -  Splitter (see also \secref{sec:simp-strategies} on the looper).
   5.443 -  This works only if the Simplifier method has been properly setup to
   5.444 -  include the Splitter (all major object logics such HOL, HOLCF, FOL,
   5.445 -  ZF do this already).
   5.446 -
   5.447 -  There is also a separate @{method_ref split} method available for
   5.448 -  single-step case splitting.  The effect of repeatedly applying
   5.449 -  @{text "(split thms)"} can be imitated by ``@{text "(simp only:
   5.450 -  split: thms)"}''.
   5.451 -
   5.452 -  \medskip The @{text cong} modifiers add or delete Simplifier
   5.453 -  congruence rules (see also \secref{sec:simp-rules}); the default is
   5.454 -  to add.
   5.455 -
   5.456 -  \item @{method simp_all} is similar to @{method simp}, but acts on
   5.457 -  all goals, working backwards from the last to the first one as usual
   5.458 -  in Isabelle.\footnote{The order is irrelevant for goals without
   5.459 -  schematic variables, so simplification might actually be performed
   5.460 -  in parallel here.}
   5.461 -
   5.462 -  Chained facts are inserted into all subgoals, before the
   5.463 -  simplification process starts.  Further rule declarations are the
   5.464 -  same as for @{method simp}.
   5.465 -
   5.466 -  The proof method fails if all subgoals remain unchanged after
   5.467 -  simplification.
   5.468 -
   5.469 -  \end{description}
   5.470 -
   5.471 -  By default the Simplifier methods above take local assumptions fully
   5.472 -  into account, using equational assumptions in the subsequent
   5.473 -  normalization process, or simplifying assumptions themselves.
   5.474 -  Further options allow to fine-tune the behavior of the Simplifier
   5.475 -  in this respect, corresponding to a variety of ML tactics as
   5.476 -  follows.\footnote{Unlike the corresponding Isar proof methods, the
   5.477 -  ML tactics do not insist in changing the goal state.}
   5.478 -
   5.479 -  \begin{center}
   5.480 -  \small
   5.481 -  \begin{supertabular}{|l|l|p{0.3\textwidth}|}
   5.482 -  \hline
   5.483 -  Isar method & ML tactic & behavior \\\hline
   5.484 -
   5.485 -  @{text "(simp (no_asm))"} & @{ML simp_tac} & assumptions are ignored
   5.486 -  completely \\\hline
   5.487 -
   5.488 -  @{text "(simp (no_asm_simp))"} & @{ML asm_simp_tac} & assumptions
   5.489 -  are used in the simplification of the conclusion but are not
   5.490 -  themselves simplified \\\hline
   5.491 -
   5.492 -  @{text "(simp (no_asm_use))"} & @{ML full_simp_tac} & assumptions
   5.493 -  are simplified but are not used in the simplification of each other
   5.494 -  or the conclusion \\\hline
   5.495 -
   5.496 -  @{text "(simp)"} & @{ML asm_full_simp_tac} & assumptions are used in
   5.497 -  the simplification of the conclusion and to simplify other
   5.498 -  assumptions \\\hline
   5.499 -
   5.500 -  @{text "(simp (asm_lr))"} & @{ML asm_lr_simp_tac} & compatibility
   5.501 -  mode: an assumption is only used for simplifying assumptions which
   5.502 -  are to the right of it \\\hline
   5.503 -
   5.504 -  \end{supertabular}
   5.505 -  \end{center}
   5.506 -*}
   5.507 -
   5.508 -
   5.509 -subsubsection {* Examples *}
   5.510 -
   5.511 -text {* We consider basic algebraic simplifications in Isabelle/HOL.
   5.512 -  The rather trivial goal @{prop "0 + (x + 0) = x + 0 + 0"} looks like
   5.513 -  a good candidate to be solved by a single call of @{method simp}:
   5.514 -*}
   5.515 -
   5.516 -lemma "0 + (x + 0) = x + 0 + 0" apply simp? oops
   5.517 -
   5.518 -text {* The above attempt \emph{fails}, because @{term "0"} and @{term
   5.519 -  "op +"} in the HOL library are declared as generic type class
   5.520 -  operations, without stating any algebraic laws yet.  More specific
   5.521 -  types are required to get access to certain standard simplifications
   5.522 -  of the theory context, e.g.\ like this: *}
   5.523 -
   5.524 -lemma fixes x :: nat shows "0 + (x + 0) = x + 0 + 0" by simp
   5.525 -lemma fixes x :: int shows "0 + (x + 0) = x + 0 + 0" by simp
   5.526 -lemma fixes x :: "'a :: monoid_add" shows "0 + (x + 0) = x + 0 + 0" by simp
   5.527 -
   5.528 -text {*
   5.529 -  \medskip In many cases, assumptions of a subgoal are also needed in
   5.530 -  the simplification process.  For example:
   5.531 -*}
   5.532 -
   5.533 -lemma fixes x :: nat shows "x = 0 \<Longrightarrow> x + x = 0" by simp
   5.534 -lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" apply simp oops
   5.535 -lemma fixes x :: nat assumes "x = 0" shows "x + x = 0" using assms by simp
   5.536 -
   5.537 -text {* As seen above, local assumptions that shall contribute to
   5.538 -  simplification need to be part of the subgoal already, or indicated
   5.539 -  explicitly for use by the subsequent method invocation.  Both too
   5.540 -  little or too much information can make simplification fail, for
   5.541 -  different reasons.
   5.542 -
   5.543 -  In the next example the malicious assumption @{prop "\<And>x::nat. f x =
   5.544 -  g (f (g x))"} does not contribute to solve the problem, but makes
   5.545 -  the default @{method simp} method loop: the rewrite rule @{text "f
   5.546 -  ?x \<equiv> g (f (g ?x))"} extracted from the assumption does not
   5.547 -  terminate.  The Simplifier notices certain simple forms of
   5.548 -  nontermination, but not this one.  The problem can be solved
   5.549 -  nonetheless, by ignoring assumptions via special options as
   5.550 -  explained before:
   5.551 -*}
   5.552 -
   5.553 -lemma "(\<And>x::nat. f x = g (f (g x))) \<Longrightarrow> f 0 = f 0 + 0"
   5.554 -  by (simp (no_asm))
   5.555 -
   5.556 -text {* The latter form is typical for long unstructured proof
   5.557 -  scripts, where the control over the goal content is limited.  In
   5.558 -  structured proofs it is usually better to avoid pushing too many
   5.559 -  facts into the goal state in the first place.  Assumptions in the
   5.560 -  Isar proof context do not intrude the reasoning if not used
   5.561 -  explicitly.  This is illustrated for a toplevel statement and a
   5.562 -  local proof body as follows:
   5.563 -*}
   5.564 -
   5.565 -lemma
   5.566 -  assumes "\<And>x::nat. f x = g (f (g x))"
   5.567 -  shows "f 0 = f 0 + 0" by simp
   5.568 -
   5.569 -notepad
   5.570 -begin
   5.571 -  assume "\<And>x::nat. f x = g (f (g x))"
   5.572 -  have "f 0 = f 0 + 0" by simp
   5.573 -end
   5.574 -
   5.575 -text {* \medskip Because assumptions may simplify each other, there
   5.576 -  can be very subtle cases of nontermination. For example, the regular
   5.577 -  @{method simp} method applied to @{prop "P (f x) \<Longrightarrow> y = x \<Longrightarrow> f x = f y
   5.578 -  \<Longrightarrow> Q"} gives rise to the infinite reduction sequence
   5.579 -  \[
   5.580 -  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto}
   5.581 -  @{text "P (f y)"} \stackrel{@{text "y \<equiv> x"}}{\longmapsto}
   5.582 -  @{text "P (f x)"} \stackrel{@{text "f x \<equiv> f y"}}{\longmapsto} \cdots
   5.583 -  \]
   5.584 -  whereas applying the same to @{prop "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow>
   5.585 -  Q"} terminates (without solving the goal):
   5.586 -*}
   5.587 -
   5.588 -lemma "y = x \<Longrightarrow> f x = f y \<Longrightarrow> P (f x) \<Longrightarrow> Q"
   5.589 -  apply simp
   5.590 -  oops
   5.591 -
   5.592 -text {* See also \secref{sec:simp-config} for options to enable
   5.593 -  Simplifier trace mode, which often helps to diagnose problems with
   5.594 -  rewrite systems.
   5.595 -*}
   5.596 -
   5.597 -
   5.598 -subsection {* Declaring rules \label{sec:simp-rules} *}
   5.599 -
   5.600 -text {*
   5.601 -  \begin{matharray}{rcl}
   5.602 -    @{attribute_def simp} & : & @{text attribute} \\
   5.603 -    @{attribute_def split} & : & @{text attribute} \\
   5.604 -    @{attribute_def cong} & : & @{text attribute} \\
   5.605 -    @{command_def "print_simpset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
   5.606 -  \end{matharray}
   5.607 -
   5.608 -  @{rail \<open>
   5.609 -    (@@{attribute simp} | @@{attribute split} | @@{attribute cong})
   5.610 -      (() | 'add' | 'del')
   5.611 -  \<close>}
   5.612 -
   5.613 -  \begin{description}
   5.614 -
   5.615 -  \item @{attribute simp} declares rewrite rules, by adding or
   5.616 -  deleting them from the simpset within the theory or proof context.
   5.617 -  Rewrite rules are theorems expressing some form of equality, for
   5.618 -  example:
   5.619 -
   5.620 -  @{text "Suc ?m + ?n = ?m + Suc ?n"} \\
   5.621 -  @{text "?P \<and> ?P \<longleftrightarrow> ?P"} \\
   5.622 -  @{text "?A \<union> ?B \<equiv> {x. x \<in> ?A \<or> x \<in> ?B}"}
   5.623 -
   5.624 -  \smallskip
   5.625 -  Conditional rewrites such as @{text "?m < ?n \<Longrightarrow> ?m div ?n = 0"} are
   5.626 -  also permitted; the conditions can be arbitrary formulas.
   5.627 -
   5.628 -  \medskip Internally, all rewrite rules are translated into Pure
   5.629 -  equalities, theorems with conclusion @{text "lhs \<equiv> rhs"}. The
   5.630 -  simpset contains a function for extracting equalities from arbitrary
   5.631 -  theorems, which is usually installed when the object-logic is
   5.632 -  configured initially. For example, @{text "\<not> ?x \<in> {}"} could be
   5.633 -  turned into @{text "?x \<in> {} \<equiv> False"}. Theorems that are declared as
   5.634 -  @{attribute simp} and local assumptions within a goal are treated
   5.635 -  uniformly in this respect.
   5.636 -
   5.637 -  The Simplifier accepts the following formats for the @{text "lhs"}
   5.638 -  term:
   5.639 -
   5.640 -  \begin{enumerate}
   5.641 -
   5.642 -  \item First-order patterns, considering the sublanguage of
   5.643 -  application of constant operators to variable operands, without
   5.644 -  @{text "\<lambda>"}-abstractions or functional variables.
   5.645 -  For example:
   5.646 -
   5.647 -  @{text "(?x + ?y) + ?z \<equiv> ?x + (?y + ?z)"} \\
   5.648 -  @{text "f (f ?x ?y) ?z \<equiv> f ?x (f ?y ?z)"}
   5.649 -
   5.650 -  \item Higher-order patterns in the sense of \cite{nipkow-patterns}.
   5.651 -  These are terms in @{text "\<beta>"}-normal form (this will always be the
   5.652 -  case unless you have done something strange) where each occurrence
   5.653 -  of an unknown is of the form @{text "?F x\<^sub>1 \<dots> x\<^sub>n"}, where the
   5.654 -  @{text "x\<^sub>i"} are distinct bound variables.
   5.655 -
   5.656 -  For example, @{text "(\<forall>x. ?P x \<and> ?Q x) \<equiv> (\<forall>x. ?P x) \<and> (\<forall>x. ?Q x)"}
   5.657 -  or its symmetric form, since the @{text "rhs"} is also a
   5.658 -  higher-order pattern.
   5.659 -
   5.660 -  \item Physical first-order patterns over raw @{text "\<lambda>"}-term
   5.661 -  structure without @{text "\<alpha>\<beta>\<eta>"}-equality; abstractions and bound
   5.662 -  variables are treated like quasi-constant term material.
   5.663 -
   5.664 -  For example, the rule @{text "?f ?x \<in> range ?f = True"} rewrites the
   5.665 -  term @{text "g a \<in> range g"} to @{text "True"}, but will fail to
   5.666 -  match @{text "g (h b) \<in> range (\<lambda>x. g (h x))"}. However, offending
   5.667 -  subterms (in our case @{text "?f ?x"}, which is not a pattern) can
   5.668 -  be replaced by adding new variables and conditions like this: @{text
   5.669 -  "?y = ?f ?x \<Longrightarrow> ?y \<in> range ?f = True"} is acceptable as a conditional
   5.670 -  rewrite rule of the second category since conditions can be
   5.671 -  arbitrary terms.
   5.672 -
   5.673 -  \end{enumerate}
   5.674 -
   5.675 -  \item @{attribute split} declares case split rules.
   5.676 -
   5.677 -  \item @{attribute cong} declares congruence rules to the Simplifier
   5.678 -  context.
   5.679 -
   5.680 -  Congruence rules are equalities of the form @{text [display]
   5.681 -  "\<dots> \<Longrightarrow> f ?x\<^sub>1 \<dots> ?x\<^sub>n = f ?y\<^sub>1 \<dots> ?y\<^sub>n"}
   5.682 -
   5.683 -  This controls the simplification of the arguments of @{text f}.  For
   5.684 -  example, some arguments can be simplified under additional
   5.685 -  assumptions: @{text [display] "?P\<^sub>1 \<longleftrightarrow> ?Q\<^sub>1 \<Longrightarrow> (?Q\<^sub>1 \<Longrightarrow> ?P\<^sub>2 \<longleftrightarrow> ?Q\<^sub>2) \<Longrightarrow>
   5.686 -  (?P\<^sub>1 \<longrightarrow> ?P\<^sub>2) \<longleftrightarrow> (?Q\<^sub>1 \<longrightarrow> ?Q\<^sub>2)"}
   5.687 -
   5.688 -  Given this rule, the simplifier assumes @{text "?Q\<^sub>1"} and extracts
   5.689 -  rewrite rules from it when simplifying @{text "?P\<^sub>2"}.  Such local
   5.690 -  assumptions are effective for rewriting formulae such as @{text "x =
   5.691 -  0 \<longrightarrow> y + x = y"}.
   5.692 -
   5.693 -  %FIXME
   5.694 -  %The local assumptions are also provided as theorems to the solver;
   5.695 -  %see \secref{sec:simp-solver} below.
   5.696 -
   5.697 -  \medskip The following congruence rule for bounded quantifiers also
   5.698 -  supplies contextual information --- about the bound variable:
   5.699 -  @{text [display] "(?A = ?B) \<Longrightarrow> (\<And>x. x \<in> ?B \<Longrightarrow> ?P x \<longleftrightarrow> ?Q x) \<Longrightarrow>
   5.700 -    (\<forall>x \<in> ?A. ?P x) \<longleftrightarrow> (\<forall>x \<in> ?B. ?Q x)"}
   5.701 -
   5.702 -  \medskip This congruence rule for conditional expressions can
   5.703 -  supply contextual information for simplifying the arms:
   5.704 -  @{text [display] "?p = ?q \<Longrightarrow> (?q \<Longrightarrow> ?a = ?c) \<Longrightarrow> (\<not> ?q \<Longrightarrow> ?b = ?d) \<Longrightarrow>
   5.705 -    (if ?p then ?a else ?b) = (if ?q then ?c else ?d)"}
   5.706 -
   5.707 -  A congruence rule can also \emph{prevent} simplification of some
   5.708 -  arguments.  Here is an alternative congruence rule for conditional
   5.709 -  expressions that conforms to non-strict functional evaluation:
   5.710 -  @{text [display] "?p = ?q \<Longrightarrow> (if ?p then ?a else ?b) = (if ?q then ?a else ?b)"}
   5.711 -
   5.712 -  Only the first argument is simplified; the others remain unchanged.
   5.713 -  This can make simplification much faster, but may require an extra
   5.714 -  case split over the condition @{text "?q"} to prove the goal.
   5.715 -
   5.716 -  \item @{command "print_simpset"} prints the collection of rules
   5.717 -  declared to the Simplifier, which is also known as ``simpset''
   5.718 -  internally.
   5.719 -
   5.720 -  For historical reasons, simpsets may occur independently from the
   5.721 -  current context, but are conceptually dependent on it.  When the
   5.722 -  Simplifier is invoked via one of its main entry points in the Isar
   5.723 -  source language (as proof method \secref{sec:simp-meth} or rule
   5.724 -  attribute \secref{sec:simp-meth}), its simpset is derived from the
   5.725 -  current proof context, and carries a back-reference to that for
   5.726 -  other tools that might get invoked internally (e.g.\ simplification
   5.727 -  procedures \secref{sec:simproc}).  A mismatch of the context of the
   5.728 -  simpset and the context of the problem being simplified may lead to
   5.729 -  unexpected results.
   5.730 -
   5.731 -  \end{description}
   5.732 -
   5.733 -  The implicit simpset of the theory context is propagated
   5.734 -  monotonically through the theory hierarchy: forming a new theory,
   5.735 -  the union of the simpsets of its imports are taken as starting
   5.736 -  point.  Also note that definitional packages like @{command
   5.737 -  "datatype"}, @{command "primrec"}, @{command "fun"} routinely
   5.738 -  declare Simplifier rules to the target context, while plain
   5.739 -  @{command "definition"} is an exception in \emph{not} declaring
   5.740 -  anything.
   5.741 -
   5.742 -  \medskip It is up the user to manipulate the current simpset further
   5.743 -  by explicitly adding or deleting theorems as simplification rules,
   5.744 -  or installing other tools via simplification procedures
   5.745 -  (\secref{sec:simproc}).  Good simpsets are hard to design.  Rules
   5.746 -  that obviously simplify, like @{text "?n + 0 \<equiv> ?n"} are good
   5.747 -  candidates for the implicit simpset, unless a special
   5.748 -  non-normalizing behavior of certain operations is intended.  More
   5.749 -  specific rules (such as distributive laws, which duplicate subterms)
   5.750 -  should be added only for specific proof steps.  Conversely,
   5.751 -  sometimes a rule needs to be deleted just for some part of a proof.
   5.752 -  The need of frequent additions or deletions may indicate a poorly
   5.753 -  designed simpset.
   5.754 -
   5.755 -  \begin{warn}
   5.756 -  The union of simpsets from theory imports (as described above) is
   5.757 -  not always a good starting point for the new theory.  If some
   5.758 -  ancestors have deleted simplification rules because they are no
   5.759 -  longer wanted, while others have left those rules in, then the union
   5.760 -  will contain the unwanted rules, and thus have to be deleted again
   5.761 -  in the theory body.
   5.762 -  \end{warn}
   5.763 -*}
   5.764 -
   5.765 -
   5.766 -subsection {* Ordered rewriting with permutative rules *}
   5.767 -
   5.768 -text {* A rewrite rule is \emph{permutative} if the left-hand side and
   5.769 -  right-hand side are the equal up to renaming of variables.  The most
   5.770 -  common permutative rule is commutativity: @{text "?x + ?y = ?y +
   5.771 -  ?x"}.  Other examples include @{text "(?x - ?y) - ?z = (?x - ?z) -
   5.772 -  ?y"} in arithmetic and @{text "insert ?x (insert ?y ?A) = insert ?y
   5.773 -  (insert ?x ?A)"} for sets.  Such rules are common enough to merit
   5.774 -  special attention.
   5.775 -
   5.776 -  Because ordinary rewriting loops given such rules, the Simplifier
   5.777 -  employs a special strategy, called \emph{ordered rewriting}.
   5.778 -  Permutative rules are detected and only applied if the rewriting
   5.779 -  step decreases the redex wrt.\ a given term ordering.  For example,
   5.780 -  commutativity rewrites @{text "b + a"} to @{text "a + b"}, but then
   5.781 -  stops, because the redex cannot be decreased further in the sense of
   5.782 -  the term ordering.
   5.783 -
   5.784 -  The default is lexicographic ordering of term structure, but this
   5.785 -  could be also changed locally for special applications via
   5.786 -  @{index_ML Simplifier.set_termless} in Isabelle/ML.
   5.787 -
   5.788 -  \medskip Permutative rewrite rules are declared to the Simplifier
   5.789 -  just like other rewrite rules.  Their special status is recognized
   5.790 -  automatically, and their application is guarded by the term ordering
   5.791 -  accordingly. *}
   5.792 -
   5.793 -
   5.794 -subsubsection {* Rewriting with AC operators *}
   5.795 -
   5.796 -text {* Ordered rewriting is particularly effective in the case of
   5.797 -  associative-commutative operators.  (Associativity by itself is not
   5.798 -  permutative.)  When dealing with an AC-operator @{text "f"}, keep
   5.799 -  the following points in mind:
   5.800 -
   5.801 -  \begin{itemize}
   5.802 -
   5.803 -  \item The associative law must always be oriented from left to
   5.804 -  right, namely @{text "f (f x y) z = f x (f y z)"}.  The opposite
   5.805 -  orientation, if used with commutativity, leads to looping in
   5.806 -  conjunction with the standard term order.
   5.807 -
   5.808 -  \item To complete your set of rewrite rules, you must add not just
   5.809 -  associativity (A) and commutativity (C) but also a derived rule
   5.810 -  \emph{left-commutativity} (LC): @{text "f x (f y z) = f y (f x z)"}.
   5.811 -
   5.812 -  \end{itemize}
   5.813 -
   5.814 -  Ordered rewriting with the combination of A, C, and LC sorts a term
   5.815 -  lexicographically --- the rewriting engine imitates bubble-sort.
   5.816 -*}
   5.817 -
   5.818 -locale AC_example =
   5.819 -  fixes f :: "'a \<Rightarrow> 'a \<Rightarrow> 'a"  (infix "\<bullet>" 60)
   5.820 -  assumes assoc: "(x \<bullet> y) \<bullet> z = x \<bullet> (y \<bullet> z)"
   5.821 -  assumes commute: "x \<bullet> y = y \<bullet> x"
   5.822 -begin
   5.823 -
   5.824 -lemma left_commute: "x \<bullet> (y \<bullet> z) = y \<bullet> (x \<bullet> z)"
   5.825 -proof -
   5.826 -  have "(x \<bullet> y) \<bullet> z = (y \<bullet> x) \<bullet> z" by (simp only: commute)
   5.827 -  then show ?thesis by (simp only: assoc)
   5.828 -qed
   5.829 -
   5.830 -lemmas AC_rules = assoc commute left_commute
   5.831 -
   5.832 -text {* Thus the Simplifier is able to establish equalities with
   5.833 -  arbitrary permutations of subterms, by normalizing to a common
   5.834 -  standard form.  For example: *}
   5.835 -
   5.836 -lemma "(b \<bullet> c) \<bullet> a = xxx"
   5.837 -  apply (simp only: AC_rules)
   5.838 -  txt {* @{subgoals} *}
   5.839 -  oops
   5.840 -
   5.841 -lemma "(b \<bullet> c) \<bullet> a = a \<bullet> (b \<bullet> c)" by (simp only: AC_rules)
   5.842 -lemma "(b \<bullet> c) \<bullet> a = c \<bullet> (b \<bullet> a)" by (simp only: AC_rules)
   5.843 -lemma "(b \<bullet> c) \<bullet> a = (c \<bullet> b) \<bullet> a" by (simp only: AC_rules)
   5.844 -
   5.845 -end
   5.846 -
   5.847 -text {* Martin and Nipkow \cite{martin-nipkow} discuss the theory and
   5.848 -  give many examples; other algebraic structures are amenable to
   5.849 -  ordered rewriting, such as boolean rings.  The Boyer-Moore theorem
   5.850 -  prover \cite{bm88book} also employs ordered rewriting.
   5.851 -*}
   5.852 -
   5.853 -
   5.854 -subsubsection {* Re-orienting equalities *}
   5.855 -
   5.856 -text {* Another application of ordered rewriting uses the derived rule
   5.857 -  @{thm [source] eq_commute}: @{thm [source = false] eq_commute} to
   5.858 -  reverse equations.
   5.859 -
   5.860 -  This is occasionally useful to re-orient local assumptions according
   5.861 -  to the term ordering, when other built-in mechanisms of
   5.862 -  reorientation and mutual simplification fail to apply.  *}
   5.863 -
   5.864 -
   5.865 -subsection {* Configuration options \label{sec:simp-config} *}
   5.866 -
   5.867 -text {*
   5.868 -  \begin{tabular}{rcll}
   5.869 -    @{attribute_def simp_depth_limit} & : & @{text attribute} & default @{text 100} \\
   5.870 -    @{attribute_def simp_trace} & : & @{text attribute} & default @{text false} \\
   5.871 -    @{attribute_def simp_trace_depth_limit} & : & @{text attribute} & default @{text 1} \\
   5.872 -    @{attribute_def simp_debug} & : & @{text attribute} & default @{text false} \\
   5.873 -  \end{tabular}
   5.874 -  \medskip
   5.875 -
   5.876 -  These configurations options control further aspects of the Simplifier.
   5.877 -  See also \secref{sec:config}.
   5.878 -
   5.879 -  \begin{description}
   5.880 -
   5.881 -  \item @{attribute simp_depth_limit} limits the number of recursive
   5.882 -  invocations of the Simplifier during conditional rewriting.
   5.883 -
   5.884 -  \item @{attribute simp_trace} makes the Simplifier output internal
   5.885 -  operations.  This includes rewrite steps, but also bookkeeping like
   5.886 -  modifications of the simpset.
   5.887 -
   5.888 -  \item @{attribute simp_trace_depth_limit} limits the effect of
   5.889 -  @{attribute simp_trace} to the given depth of recursive Simplifier
   5.890 -  invocations (when solving conditions of rewrite rules).
   5.891 -
   5.892 -  \item @{attribute simp_debug} makes the Simplifier output some extra
   5.893 -  information about internal operations.  This includes any attempted
   5.894 -  invocation of simplification procedures.
   5.895 -
   5.896 -  \end{description}
   5.897 -*}
   5.898 -
   5.899 -
   5.900 -subsection {* Simplification procedures \label{sec:simproc} *}
   5.901 -
   5.902 -text {* Simplification procedures are ML functions that produce proven
   5.903 -  rewrite rules on demand.  They are associated with higher-order
   5.904 -  patterns that approximate the left-hand sides of equations.  The
   5.905 -  Simplifier first matches the current redex against one of the LHS
   5.906 -  patterns; if this succeeds, the corresponding ML function is
   5.907 -  invoked, passing the Simplifier context and redex term.  Thus rules
   5.908 -  may be specifically fashioned for particular situations, resulting
   5.909 -  in a more powerful mechanism than term rewriting by a fixed set of
   5.910 -  rules.
   5.911 -
   5.912 -  Any successful result needs to be a (possibly conditional) rewrite
   5.913 -  rule @{text "t \<equiv> u"} that is applicable to the current redex.  The
   5.914 -  rule will be applied just as any ordinary rewrite rule.  It is
   5.915 -  expected to be already in \emph{internal form}, bypassing the
   5.916 -  automatic preprocessing of object-level equivalences.
   5.917 -
   5.918 -  \begin{matharray}{rcl}
   5.919 -    @{command_def "simproc_setup"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   5.920 -    simproc & : & @{text attribute} \\
   5.921 -  \end{matharray}
   5.922 -
   5.923 -  @{rail \<open>
   5.924 -    @@{command simproc_setup} @{syntax name} '(' (@{syntax term} + '|') ')' '='
   5.925 -      @{syntax text} \<newline> (@'identifier' (@{syntax nameref}+))?
   5.926 -    ;
   5.927 -
   5.928 -    @@{attribute simproc} (('add' ':')? | 'del' ':') (@{syntax name}+)
   5.929 -  \<close>}
   5.930 -
   5.931 -  \begin{description}
   5.932 -
   5.933 -  \item @{command "simproc_setup"} defines a named simplification
   5.934 -  procedure that is invoked by the Simplifier whenever any of the
   5.935 -  given term patterns match the current redex.  The implementation,
   5.936 -  which is provided as ML source text, needs to be of type @{ML_type
   5.937 -  "morphism -> simpset -> cterm -> thm option"}, where the @{ML_type
   5.938 -  cterm} represents the current redex @{text r} and the result is
   5.939 -  supposed to be some proven rewrite rule @{text "r \<equiv> r'"} (or a
   5.940 -  generalized version), or @{ML NONE} to indicate failure.  The
   5.941 -  @{ML_type simpset} argument holds the full context of the current
   5.942 -  Simplifier invocation, including the actual Isar proof context.  The
   5.943 -  @{ML_type morphism} informs about the difference of the original
   5.944 -  compilation context wrt.\ the one of the actual application later
   5.945 -  on.  The optional @{keyword "identifier"} specifies theorems that
   5.946 -  represent the logical content of the abstract theory of this
   5.947 -  simproc.
   5.948 -
   5.949 -  Morphisms and identifiers are only relevant for simprocs that are
   5.950 -  defined within a local target context, e.g.\ in a locale.
   5.951 -
   5.952 -  \item @{text "simproc add: name"} and @{text "simproc del: name"}
   5.953 -  add or delete named simprocs to the current Simplifier context.  The
   5.954 -  default is to add a simproc.  Note that @{command "simproc_setup"}
   5.955 -  already adds the new simproc to the subsequent context.
   5.956 -
   5.957 -  \end{description}
   5.958 -*}
   5.959 -
   5.960 -
   5.961 -subsubsection {* Example *}
   5.962 -
   5.963 -text {* The following simplification procedure for @{thm
   5.964 -  [source=false, show_types] unit_eq} in HOL performs fine-grained
   5.965 -  control over rule application, beyond higher-order pattern matching.
   5.966 -  Declaring @{thm unit_eq} as @{attribute simp} directly would make
   5.967 -  the simplifier loop!  Note that a version of this simplification
   5.968 -  procedure is already active in Isabelle/HOL.  *}
   5.969 -
   5.970 -simproc_setup unit ("x::unit") = {*
   5.971 -  fn _ => fn _ => fn ct =>
   5.972 -    if HOLogic.is_unit (term_of ct) then NONE
   5.973 -    else SOME (mk_meta_eq @{thm unit_eq})
   5.974 -*}
   5.975 -
   5.976 -text {* Since the Simplifier applies simplification procedures
   5.977 -  frequently, it is important to make the failure check in ML
   5.978 -  reasonably fast. *}
   5.979 -
   5.980 -
   5.981 -subsection {* Configurable Simplifier strategies \label{sec:simp-strategies} *}
   5.982 -
   5.983 -text {* The core term-rewriting engine of the Simplifier is normally
   5.984 -  used in combination with some add-on components that modify the
   5.985 -  strategy and allow to integrate other non-Simplifier proof tools.
   5.986 -  These may be reconfigured in ML as explained below.  Even if the
   5.987 -  default strategies of object-logics like Isabelle/HOL are used
   5.988 -  unchanged, it helps to understand how the standard Simplifier
   5.989 -  strategies work. *}
   5.990 -
   5.991 -
   5.992 -subsubsection {* The subgoaler *}
   5.993 -
   5.994 -text {*
   5.995 -  \begin{mldecls}
   5.996 -  @{index_ML Simplifier.set_subgoaler: "(Proof.context -> int -> tactic) ->
   5.997 -  Proof.context -> Proof.context"} \\
   5.998 -  @{index_ML Simplifier.prems_of: "Proof.context -> thm list"} \\
   5.999 -  \end{mldecls}
  5.1000 -
  5.1001 -  The subgoaler is the tactic used to solve subgoals arising out of
  5.1002 -  conditional rewrite rules or congruence rules.  The default should
  5.1003 -  be simplification itself.  In rare situations, this strategy may
  5.1004 -  need to be changed.  For example, if the premise of a conditional
  5.1005 -  rule is an instance of its conclusion, as in @{text "Suc ?m < ?n \<Longrightarrow>
  5.1006 -  ?m < ?n"}, the default strategy could loop.  % FIXME !??
  5.1007 -
  5.1008 -  \begin{description}
  5.1009 -
  5.1010 -  \item @{ML Simplifier.set_subgoaler}~@{text "tac ctxt"} sets the
  5.1011 -  subgoaler of the context to @{text "tac"}.  The tactic will
  5.1012 -  be applied to the context of the running Simplifier instance.
  5.1013 -
  5.1014 -  \item @{ML Simplifier.prems_of}~@{text "ctxt"} retrieves the current
  5.1015 -  set of premises from the context.  This may be non-empty only if
  5.1016 -  the Simplifier has been told to utilize local assumptions in the
  5.1017 -  first place (cf.\ the options in \secref{sec:simp-meth}).
  5.1018 -
  5.1019 -  \end{description}
  5.1020 -
  5.1021 -  As an example, consider the following alternative subgoaler:
  5.1022 -*}
  5.1023 -
  5.1024 -ML {*
  5.1025 -  fun subgoaler_tac ctxt =
  5.1026 -    assume_tac ORELSE'
  5.1027 -    resolve_tac (Simplifier.prems_of ctxt) ORELSE'
  5.1028 -    asm_simp_tac ctxt
  5.1029 -*}
  5.1030 -
  5.1031 -text {* This tactic first tries to solve the subgoal by assumption or
  5.1032 -  by resolving with with one of the premises, calling simplification
  5.1033 -  only if that fails. *}
  5.1034 -
  5.1035 -
  5.1036 -subsubsection {* The solver *}
  5.1037 -
  5.1038 -text {*
  5.1039 -  \begin{mldecls}
  5.1040 -  @{index_ML_type solver} \\
  5.1041 -  @{index_ML Simplifier.mk_solver: "string ->
  5.1042 -  (Proof.context -> int -> tactic) -> solver"} \\
  5.1043 -  @{index_ML_op setSolver: "Proof.context * solver -> Proof.context"} \\
  5.1044 -  @{index_ML_op addSolver: "Proof.context * solver -> Proof.context"} \\
  5.1045 -  @{index_ML_op setSSolver: "Proof.context * solver -> Proof.context"} \\
  5.1046 -  @{index_ML_op addSSolver: "Proof.context * solver -> Proof.context"} \\
  5.1047 -  \end{mldecls}
  5.1048 -
  5.1049 -  A solver is a tactic that attempts to solve a subgoal after
  5.1050 -  simplification.  Its core functionality is to prove trivial subgoals
  5.1051 -  such as @{prop "True"} and @{text "t = t"}, but object-logics might
  5.1052 -  be more ambitious.  For example, Isabelle/HOL performs a restricted
  5.1053 -  version of linear arithmetic here.
  5.1054 -
  5.1055 -  Solvers are packaged up in abstract type @{ML_type solver}, with
  5.1056 -  @{ML Simplifier.mk_solver} as the only operation to create a solver.
  5.1057 -
  5.1058 -  \medskip Rewriting does not instantiate unknowns.  For example,
  5.1059 -  rewriting alone cannot prove @{text "a \<in> ?A"} since this requires
  5.1060 -  instantiating @{text "?A"}.  The solver, however, is an arbitrary
  5.1061 -  tactic and may instantiate unknowns as it pleases.  This is the only
  5.1062 -  way the Simplifier can handle a conditional rewrite rule whose
  5.1063 -  condition contains extra variables.  When a simplification tactic is
  5.1064 -  to be combined with other provers, especially with the Classical
  5.1065 -  Reasoner, it is important whether it can be considered safe or not.
  5.1066 -  For this reason a simpset contains two solvers: safe and unsafe.
  5.1067 -
  5.1068 -  The standard simplification strategy solely uses the unsafe solver,
  5.1069 -  which is appropriate in most cases.  For special applications where
  5.1070 -  the simplification process is not allowed to instantiate unknowns
  5.1071 -  within the goal, simplification starts with the safe solver, but may
  5.1072 -  still apply the ordinary unsafe one in nested simplifications for
  5.1073 -  conditional rules or congruences. Note that in this way the overall
  5.1074 -  tactic is not totally safe: it may instantiate unknowns that appear
  5.1075 -  also in other subgoals.
  5.1076 -
  5.1077 -  \begin{description}
  5.1078 -
  5.1079 -  \item @{ML Simplifier.mk_solver}~@{text "name tac"} turns @{text
  5.1080 -  "tac"} into a solver; the @{text "name"} is only attached as a
  5.1081 -  comment and has no further significance.
  5.1082 -
  5.1083 -  \item @{text "ctxt setSSolver solver"} installs @{text "solver"} as
  5.1084 -  the safe solver of @{text "ctxt"}.
  5.1085 -
  5.1086 -  \item @{text "ctxt addSSolver solver"} adds @{text "solver"} as an
  5.1087 -  additional safe solver; it will be tried after the solvers which had
  5.1088 -  already been present in @{text "ctxt"}.
  5.1089 -
  5.1090 -  \item @{text "ctxt setSolver solver"} installs @{text "solver"} as the
  5.1091 -  unsafe solver of @{text "ctxt"}.
  5.1092 -
  5.1093 -  \item @{text "ctxt addSolver solver"} adds @{text "solver"} as an
  5.1094 -  additional unsafe solver; it will be tried after the solvers which
  5.1095 -  had already been present in @{text "ctxt"}.
  5.1096 -
  5.1097 -  \end{description}
  5.1098 -
  5.1099 -  \medskip The solver tactic is invoked with the context of the
  5.1100 -  running Simplifier.  Further operations
  5.1101 -  may be used to retrieve relevant information, such as the list of
  5.1102 -  local Simplifier premises via @{ML Simplifier.prems_of} --- this
  5.1103 -  list may be non-empty only if the Simplifier runs in a mode that
  5.1104 -  utilizes local assumptions (see also \secref{sec:simp-meth}).  The
  5.1105 -  solver is also presented the full goal including its assumptions in
  5.1106 -  any case.  Thus it can use these (e.g.\ by calling @{ML
  5.1107 -  assume_tac}), even if the Simplifier proper happens to ignore local
  5.1108 -  premises at the moment.
  5.1109 -
  5.1110 -  \medskip As explained before, the subgoaler is also used to solve
  5.1111 -  the premises of congruence rules.  These are usually of the form
  5.1112 -  @{text "s = ?x"}, where @{text "s"} needs to be simplified and
  5.1113 -  @{text "?x"} needs to be instantiated with the result.  Typically,
  5.1114 -  the subgoaler will invoke the Simplifier at some point, which will
  5.1115 -  eventually call the solver.  For this reason, solver tactics must be
  5.1116 -  prepared to solve goals of the form @{text "t = ?x"}, usually by
  5.1117 -  reflexivity.  In particular, reflexivity should be tried before any
  5.1118 -  of the fancy automated proof tools.
  5.1119 -
  5.1120 -  It may even happen that due to simplification the subgoal is no
  5.1121 -  longer an equality.  For example, @{text "False \<longleftrightarrow> ?Q"} could be
  5.1122 -  rewritten to @{text "\<not> ?Q"}.  To cover this case, the solver could
  5.1123 -  try resolving with the theorem @{text "\<not> False"} of the
  5.1124 -  object-logic.
  5.1125 -
  5.1126 -  \medskip
  5.1127 -
  5.1128 -  \begin{warn}
  5.1129 -  If a premise of a congruence rule cannot be proved, then the
  5.1130 -  congruence is ignored.  This should only happen if the rule is
  5.1131 -  \emph{conditional} --- that is, contains premises not of the form
  5.1132 -  @{text "t = ?x"}.  Otherwise it indicates that some congruence rule,
  5.1133 -  or possibly the subgoaler or solver, is faulty.
  5.1134 -  \end{warn}
  5.1135 -*}
  5.1136 -
  5.1137 -
  5.1138 -subsubsection {* The looper *}
  5.1139 -
  5.1140 -text {*
  5.1141 -  \begin{mldecls}
  5.1142 -  @{index_ML_op setloop: "Proof.context *
  5.1143 -  (Proof.context -> int -> tactic) -> Proof.context"} \\
  5.1144 -  @{index_ML_op addloop: "Proof.context *
  5.1145 -  (string * (Proof.context -> int -> tactic))
  5.1146 -  -> Proof.context"} \\
  5.1147 -  @{index_ML_op delloop: "Proof.context * string -> Proof.context"} \\
  5.1148 -  @{index_ML Splitter.add_split: "thm -> Proof.context -> Proof.context"} \\
  5.1149 -  @{index_ML Splitter.del_split: "thm -> Proof.context -> Proof.context"} \\
  5.1150 -  \end{mldecls}
  5.1151 -
  5.1152 -  The looper is a list of tactics that are applied after
  5.1153 -  simplification, in case the solver failed to solve the simplified
  5.1154 -  goal.  If the looper succeeds, the simplification process is started
  5.1155 -  all over again.  Each of the subgoals generated by the looper is
  5.1156 -  attacked in turn, in reverse order.
  5.1157 -
  5.1158 -  A typical looper is \emph{case splitting}: the expansion of a
  5.1159 -  conditional.  Another possibility is to apply an elimination rule on
  5.1160 -  the assumptions.  More adventurous loopers could start an induction.
  5.1161 -
  5.1162 -  \begin{description}
  5.1163 -
  5.1164 -  \item @{text "ctxt setloop tac"} installs @{text "tac"} as the only
  5.1165 -  looper tactic of @{text "ctxt"}.
  5.1166 -
  5.1167 -  \item @{text "ctxt addloop (name, tac)"} adds @{text "tac"} as an
  5.1168 -  additional looper tactic with name @{text "name"}, which is
  5.1169 -  significant for managing the collection of loopers.  The tactic will
  5.1170 -  be tried after the looper tactics that had already been present in
  5.1171 -  @{text "ctxt"}.
  5.1172 -
  5.1173 -  \item @{text "ctxt delloop name"} deletes the looper tactic that was
  5.1174 -  associated with @{text "name"} from @{text "ctxt"}.
  5.1175 -
  5.1176 -  \item @{ML Splitter.add_split}~@{text "thm ctxt"} adds split tactics
  5.1177 -  for @{text "thm"} as additional looper tactics of @{text "ctxt"}.
  5.1178 -
  5.1179 -  \item @{ML Splitter.del_split}~@{text "thm ctxt"} deletes the split
  5.1180 -  tactic corresponding to @{text thm} from the looper tactics of
  5.1181 -  @{text "ctxt"}.
  5.1182 -
  5.1183 -  \end{description}
  5.1184 -
  5.1185 -  The splitter replaces applications of a given function; the
  5.1186 -  right-hand side of the replacement can be anything.  For example,
  5.1187 -  here is a splitting rule for conditional expressions:
  5.1188 -
  5.1189 -  @{text [display] "?P (if ?Q ?x ?y) \<longleftrightarrow> (?Q \<longrightarrow> ?P ?x) \<and> (\<not> ?Q \<longrightarrow> ?P ?y)"}
  5.1190 -
  5.1191 -  Another example is the elimination operator for Cartesian products
  5.1192 -  (which happens to be called @{text split} in Isabelle/HOL:
  5.1193 -
  5.1194 -  @{text [display] "?P (split ?f ?p) \<longleftrightarrow> (\<forall>a b. ?p = (a, b) \<longrightarrow> ?P (f a b))"}
  5.1195 -
  5.1196 -  For technical reasons, there is a distinction between case splitting
  5.1197 -  in the conclusion and in the premises of a subgoal.  The former is
  5.1198 -  done by @{ML Splitter.split_tac} with rules like @{thm [source]
  5.1199 -  split_if} or @{thm [source] option.split}, which do not split the
  5.1200 -  subgoal, while the latter is done by @{ML Splitter.split_asm_tac}
  5.1201 -  with rules like @{thm [source] split_if_asm} or @{thm [source]
  5.1202 -  option.split_asm}, which split the subgoal.  The function @{ML
  5.1203 -  Splitter.add_split} automatically takes care of which tactic to
  5.1204 -  call, analyzing the form of the rules given as argument; it is the
  5.1205 -  same operation behind @{text "split"} attribute or method modifier
  5.1206 -  syntax in the Isar source language.
  5.1207 -
  5.1208 -  Case splits should be allowed only when necessary; they are
  5.1209 -  expensive and hard to control.  Case-splitting on if-expressions in
  5.1210 -  the conclusion is usually beneficial, so it is enabled by default in
  5.1211 -  Isabelle/HOL and Isabelle/FOL/ZF.
  5.1212 -
  5.1213 -  \begin{warn}
  5.1214 -  With @{ML Splitter.split_asm_tac} as looper component, the
  5.1215 -  Simplifier may split subgoals!  This might cause unexpected problems
  5.1216 -  in tactic expressions that silently assume 0 or 1 subgoals after
  5.1217 -  simplification.
  5.1218 -  \end{warn}
  5.1219 -*}
  5.1220 -
  5.1221 -
  5.1222 -subsection {* Forward simplification \label{sec:simp-forward} *}
  5.1223 -
  5.1224 -text {*
  5.1225 -  \begin{matharray}{rcl}
  5.1226 -    @{attribute_def simplified} & : & @{text attribute} \\
  5.1227 -  \end{matharray}
  5.1228 -
  5.1229 -  @{rail \<open>
  5.1230 -    @@{attribute simplified} opt? @{syntax thmrefs}?
  5.1231 -    ;
  5.1232 -
  5.1233 -    opt: '(' ('no_asm' | 'no_asm_simp' | 'no_asm_use') ')'
  5.1234 -  \<close>}
  5.1235 -
  5.1236 -  \begin{description}
  5.1237 -  
  5.1238 -  \item @{attribute simplified}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} causes a theorem to
  5.1239 -  be simplified, either by exactly the specified rules @{text "a\<^sub>1, \<dots>,
  5.1240 -  a\<^sub>n"}, or the implicit Simplifier context if no arguments are given.
  5.1241 -  The result is fully simplified by default, including assumptions and
  5.1242 -  conclusion; the options @{text no_asm} etc.\ tune the Simplifier in
  5.1243 -  the same way as the for the @{text simp} method.
  5.1244 -
  5.1245 -  Note that forward simplification restricts the simplifier to its
  5.1246 -  most basic operation of term rewriting; solver and looper tactics
  5.1247 -  (\secref{sec:simp-strategies}) are \emph{not} involved here.  The
  5.1248 -  @{attribute simplified} attribute should be only rarely required
  5.1249 -  under normal circumstances.
  5.1250 -
  5.1251 -  \end{description}
  5.1252 -*}
  5.1253 -
  5.1254 -
  5.1255 -section {* The Classical Reasoner \label{sec:classical} *}
  5.1256 -
  5.1257 -subsection {* Basic concepts *}
  5.1258 -
  5.1259 -text {* Although Isabelle is generic, many users will be working in
  5.1260 -  some extension of classical first-order logic.  Isabelle/ZF is built
  5.1261 -  upon theory FOL, while Isabelle/HOL conceptually contains
  5.1262 -  first-order logic as a fragment.  Theorem-proving in predicate logic
  5.1263 -  is undecidable, but many automated strategies have been developed to
  5.1264 -  assist in this task.
  5.1265 -
  5.1266 -  Isabelle's classical reasoner is a generic package that accepts
  5.1267 -  certain information about a logic and delivers a suite of automatic
  5.1268 -  proof tools, based on rules that are classified and declared in the
  5.1269 -  context.  These proof procedures are slow and simplistic compared
  5.1270 -  with high-end automated theorem provers, but they can save
  5.1271 -  considerable time and effort in practice.  They can prove theorems
  5.1272 -  such as Pelletier's \cite{pelletier86} problems 40 and 41 in a few
  5.1273 -  milliseconds (including full proof reconstruction): *}
  5.1274 -
  5.1275 -lemma "(\<exists>y. \<forall>x. F x y \<longleftrightarrow> F x x) \<longrightarrow> \<not> (\<forall>x. \<exists>y. \<forall>z. F z y \<longleftrightarrow> \<not> F z x)"
  5.1276 -  by blast
  5.1277 -
  5.1278 -lemma "(\<forall>z. \<exists>y. \<forall>x. f x y \<longleftrightarrow> f x z \<and> \<not> f x x) \<longrightarrow> \<not> (\<exists>z. \<forall>x. f x z)"
  5.1279 -  by blast
  5.1280 -
  5.1281 -text {* The proof tools are generic.  They are not restricted to
  5.1282 -  first-order logic, and have been heavily used in the development of
  5.1283 -  the Isabelle/HOL library and applications.  The tactics can be
  5.1284 -  traced, and their components can be called directly; in this manner,
  5.1285 -  any proof can be viewed interactively.  *}
  5.1286 -
  5.1287 -
  5.1288 -subsubsection {* The sequent calculus *}
  5.1289 -
  5.1290 -text {* Isabelle supports natural deduction, which is easy to use for
  5.1291 -  interactive proof.  But natural deduction does not easily lend
  5.1292 -  itself to automation, and has a bias towards intuitionism.  For
  5.1293 -  certain proofs in classical logic, it can not be called natural.
  5.1294 -  The \emph{sequent calculus}, a generalization of natural deduction,
  5.1295 -  is easier to automate.
  5.1296 -
  5.1297 -  A \textbf{sequent} has the form @{text "\<Gamma> \<turnstile> \<Delta>"}, where @{text "\<Gamma>"}
  5.1298 -  and @{text "\<Delta>"} are sets of formulae.\footnote{For first-order
  5.1299 -  logic, sequents can equivalently be made from lists or multisets of
  5.1300 -  formulae.} The sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} is
  5.1301 -  \textbf{valid} if @{text "P\<^sub>1 \<and> \<dots> \<and> P\<^sub>m"} implies @{text "Q\<^sub>1 \<or> \<dots> \<or>
  5.1302 -  Q\<^sub>n"}.  Thus @{text "P\<^sub>1, \<dots>, P\<^sub>m"} represent assumptions, each of which
  5.1303 -  is true, while @{text "Q\<^sub>1, \<dots>, Q\<^sub>n"} represent alternative goals.  A
  5.1304 -  sequent is \textbf{basic} if its left and right sides have a common
  5.1305 -  formula, as in @{text "P, Q \<turnstile> Q, R"}; basic sequents are trivially
  5.1306 -  valid.
  5.1307 -
  5.1308 -  Sequent rules are classified as \textbf{right} or \textbf{left},
  5.1309 -  indicating which side of the @{text "\<turnstile>"} symbol they operate on.
  5.1310 -  Rules that operate on the right side are analogous to natural
  5.1311 -  deduction's introduction rules, and left rules are analogous to
  5.1312 -  elimination rules.  The sequent calculus analogue of @{text "(\<longrightarrow>I)"}
  5.1313 -  is the rule
  5.1314 -  \[
  5.1315 -  \infer[@{text "(\<longrightarrow>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<longrightarrow> Q"}}{@{text "P, \<Gamma> \<turnstile> \<Delta>, Q"}}
  5.1316 -  \]
  5.1317 -  Applying the rule backwards, this breaks down some implication on
  5.1318 -  the right side of a sequent; @{text "\<Gamma>"} and @{text "\<Delta>"} stand for
  5.1319 -  the sets of formulae that are unaffected by the inference.  The
  5.1320 -  analogue of the pair @{text "(\<or>I1)"} and @{text "(\<or>I2)"} is the
  5.1321 -  single rule
  5.1322 -  \[
  5.1323 -  \infer[@{text "(\<or>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, P \<or> Q"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P, Q"}}
  5.1324 -  \]
  5.1325 -  This breaks down some disjunction on the right side, replacing it by
  5.1326 -  both disjuncts.  Thus, the sequent calculus is a kind of
  5.1327 -  multiple-conclusion logic.
  5.1328 -
  5.1329 -  To illustrate the use of multiple formulae on the right, let us
  5.1330 -  prove the classical theorem @{text "(P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}.  Working
  5.1331 -  backwards, we reduce this formula to a basic sequent:
  5.1332 -  \[
  5.1333 -  \infer[@{text "(\<or>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q) \<or> (Q \<longrightarrow> P)"}}
  5.1334 -    {\infer[@{text "(\<longrightarrow>R)"}]{@{text "\<turnstile> (P \<longrightarrow> Q), (Q \<longrightarrow> P)"}}
  5.1335 -      {\infer[@{text "(\<longrightarrow>R)"}]{@{text "P \<turnstile> Q, (Q \<longrightarrow> P)"}}
  5.1336 -        {@{text "P, Q \<turnstile> Q, P"}}}}
  5.1337 -  \]
  5.1338 -
  5.1339 -  This example is typical of the sequent calculus: start with the
  5.1340 -  desired theorem and apply rules backwards in a fairly arbitrary
  5.1341 -  manner.  This yields a surprisingly effective proof procedure.
  5.1342 -  Quantifiers add only few complications, since Isabelle handles
  5.1343 -  parameters and schematic variables.  See \cite[Chapter
  5.1344 -  10]{paulson-ml2} for further discussion.  *}
  5.1345 -
  5.1346 -
  5.1347 -subsubsection {* Simulating sequents by natural deduction *}
  5.1348 -
  5.1349 -text {* Isabelle can represent sequents directly, as in the
  5.1350 -  object-logic LK.  But natural deduction is easier to work with, and
  5.1351 -  most object-logics employ it.  Fortunately, we can simulate the
  5.1352 -  sequent @{text "P\<^sub>1, \<dots>, P\<^sub>m \<turnstile> Q\<^sub>1, \<dots>, Q\<^sub>n"} by the Isabelle formula
  5.1353 -  @{text "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>2 \<Longrightarrow> ... \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> Q\<^sub>1"} where the order of
  5.1354 -  the assumptions and the choice of @{text "Q\<^sub>1"} are arbitrary.
  5.1355 -  Elim-resolution plays a key role in simulating sequent proofs.
  5.1356 -
  5.1357 -  We can easily handle reasoning on the left.  Elim-resolution with
  5.1358 -  the rules @{text "(\<or>E)"}, @{text "(\<bottom>E)"} and @{text "(\<exists>E)"} achieves
  5.1359 -  a similar effect as the corresponding sequent rules.  For the other
  5.1360 -  connectives, we use sequent-style elimination rules instead of
  5.1361 -  destruction rules such as @{text "(\<and>E1, 2)"} and @{text "(\<forall>E)"}.
  5.1362 -  But note that the rule @{text "(\<not>L)"} has no effect under our
  5.1363 -  representation of sequents!
  5.1364 -  \[
  5.1365 -  \infer[@{text "(\<not>L)"}]{@{text "\<not> P, \<Gamma> \<turnstile> \<Delta>"}}{@{text "\<Gamma> \<turnstile> \<Delta>, P"}}
  5.1366 -  \]
  5.1367 -
  5.1368 -  What about reasoning on the right?  Introduction rules can only
  5.1369 -  affect the formula in the conclusion, namely @{text "Q\<^sub>1"}.  The
  5.1370 -  other right-side formulae are represented as negated assumptions,
  5.1371 -  @{text "\<not> Q\<^sub>2, \<dots>, \<not> Q\<^sub>n"}.  In order to operate on one of these, it
  5.1372 -  must first be exchanged with @{text "Q\<^sub>1"}.  Elim-resolution with the
  5.1373 -  @{text swap} rule has this effect: @{text "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"}
  5.1374 -
  5.1375 -  To ensure that swaps occur only when necessary, each introduction
  5.1376 -  rule is converted into a swapped form: it is resolved with the
  5.1377 -  second premise of @{text "(swap)"}.  The swapped form of @{text
  5.1378 -  "(\<and>I)"}, which might be called @{text "(\<not>\<and>E)"}, is
  5.1379 -  @{text [display] "\<not> (P \<and> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> (\<not> R \<Longrightarrow> Q) \<Longrightarrow> R"}
  5.1380 -
  5.1381 -  Similarly, the swapped form of @{text "(\<longrightarrow>I)"} is
  5.1382 -  @{text [display] "\<not> (P \<longrightarrow> Q) \<Longrightarrow> (\<not> R \<Longrightarrow> P \<Longrightarrow> Q) \<Longrightarrow> R"}
  5.1383 -
  5.1384 -  Swapped introduction rules are applied using elim-resolution, which
  5.1385 -  deletes the negated formula.  Our representation of sequents also
  5.1386 -  requires the use of ordinary introduction rules.  If we had no
  5.1387 -  regard for readability of intermediate goal states, we could treat
  5.1388 -  the right side more uniformly by representing sequents as @{text
  5.1389 -  [display] "P\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> P\<^sub>m \<Longrightarrow> \<not> Q\<^sub>1 \<Longrightarrow> \<dots> \<Longrightarrow> \<not> Q\<^sub>n \<Longrightarrow> \<bottom>"}
  5.1390 -*}
  5.1391 -
  5.1392 -
  5.1393 -subsubsection {* Extra rules for the sequent calculus *}
  5.1394 -
  5.1395 -text {* As mentioned, destruction rules such as @{text "(\<and>E1, 2)"} and
  5.1396 -  @{text "(\<forall>E)"} must be replaced by sequent-style elimination rules.
  5.1397 -  In addition, we need rules to embody the classical equivalence
  5.1398 -  between @{text "P \<longrightarrow> Q"} and @{text "\<not> P \<or> Q"}.  The introduction
  5.1399 -  rules @{text "(\<or>I1, 2)"} are replaced by a rule that simulates
  5.1400 -  @{text "(\<or>R)"}: @{text [display] "(\<not> Q \<Longrightarrow> P) \<Longrightarrow> P \<or> Q"}
  5.1401 -
  5.1402 -  The destruction rule @{text "(\<longrightarrow>E)"} is replaced by @{text [display]
  5.1403 -  "(P \<longrightarrow> Q) \<Longrightarrow> (\<not> P \<Longrightarrow> R) \<Longrightarrow> (Q \<Longrightarrow> R) \<Longrightarrow> R"}
  5.1404 -
  5.1405 -  Quantifier replication also requires special rules.  In classical
  5.1406 -  logic, @{text "\<exists>x. P x"} is equivalent to @{text "\<not> (\<forall>x. \<not> P x)"};
  5.1407 -  the rules @{text "(\<exists>R)"} and @{text "(\<forall>L)"} are dual:
  5.1408 -  \[
  5.1409 -  \infer[@{text "(\<exists>R)"}]{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x"}}{@{text "\<Gamma> \<turnstile> \<Delta>, \<exists>x. P x, P t"}}
  5.1410 -  \qquad
  5.1411 -  \infer[@{text "(\<forall>L)"}]{@{text "\<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}{@{text "P t, \<forall>x. P x, \<Gamma> \<turnstile> \<Delta>"}}
  5.1412 -  \]
  5.1413 -  Thus both kinds of quantifier may be replicated.  Theorems requiring
  5.1414 -  multiple uses of a universal formula are easy to invent; consider
  5.1415 -  @{text [display] "(\<forall>x. P x \<longrightarrow> P (f x)) \<and> P a \<longrightarrow> P (f\<^sup>n a)"} for any
  5.1416 -  @{text "n > 1"}.  Natural examples of the multiple use of an
  5.1417 -  existential formula are rare; a standard one is @{text "\<exists>x. \<forall>y. P x
  5.1418 -  \<longrightarrow> P y"}.
  5.1419 -
  5.1420 -  Forgoing quantifier replication loses completeness, but gains
  5.1421 -  decidability, since the search space becomes finite.  Many useful
  5.1422 -  theorems can be proved without replication, and the search generally
  5.1423 -  delivers its verdict in a reasonable time.  To adopt this approach,
  5.1424 -  represent the sequent rules @{text "(\<exists>R)"}, @{text "(\<exists>L)"} and
  5.1425 -  @{text "(\<forall>R)"} by @{text "(\<exists>I)"}, @{text "(\<exists>E)"} and @{text "(\<forall>I)"},
  5.1426 -  respectively, and put @{text "(\<forall>E)"} into elimination form: @{text
  5.1427 -  [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"}
  5.1428 -
  5.1429 -  Elim-resolution with this rule will delete the universal formula
  5.1430 -  after a single use.  To replicate universal quantifiers, replace the
  5.1431 -  rule by @{text [display] "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"}
  5.1432 -
  5.1433 -  To replicate existential quantifiers, replace @{text "(\<exists>I)"} by
  5.1434 -  @{text [display] "(\<not> (\<exists>x. P x) \<Longrightarrow> P t) \<Longrightarrow> \<exists>x. P x"}
  5.1435 -
  5.1436 -  All introduction rules mentioned above are also useful in swapped
  5.1437 -  form.
  5.1438 -
  5.1439 -  Replication makes the search space infinite; we must apply the rules
  5.1440 -  with care.  The classical reasoner distinguishes between safe and
  5.1441 -  unsafe rules, applying the latter only when there is no alternative.
  5.1442 -  Depth-first search may well go down a blind alley; best-first search
  5.1443 -  is better behaved in an infinite search space.  However, quantifier
  5.1444 -  replication is too expensive to prove any but the simplest theorems.
  5.1445 -*}
  5.1446 -
  5.1447 -
  5.1448 -subsection {* Rule declarations *}
  5.1449 -
  5.1450 -text {* The proof tools of the Classical Reasoner depend on
  5.1451 -  collections of rules declared in the context, which are classified
  5.1452 -  as introduction, elimination or destruction and as \emph{safe} or
  5.1453 -  \emph{unsafe}.  In general, safe rules can be attempted blindly,
  5.1454 -  while unsafe rules must be used with care.  A safe rule must never
  5.1455 -  reduce a provable goal to an unprovable set of subgoals.
  5.1456 -
  5.1457 -  The rule @{text "P \<Longrightarrow> P \<or> Q"} is unsafe because it reduces @{text "P
  5.1458 -  \<or> Q"} to @{text "P"}, which might turn out as premature choice of an
  5.1459 -  unprovable subgoal.  Any rule is unsafe whose premises contain new
  5.1460 -  unknowns.  The elimination rule @{text "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> Q) \<Longrightarrow> Q"} is
  5.1461 -  unsafe, since it is applied via elim-resolution, which discards the
  5.1462 -  assumption @{text "\<forall>x. P x"} and replaces it by the weaker
  5.1463 -  assumption @{text "P t"}.  The rule @{text "P t \<Longrightarrow> \<exists>x. P x"} is
  5.1464 -  unsafe for similar reasons.  The quantifier duplication rule @{text
  5.1465 -  "\<forall>x. P x \<Longrightarrow> (P t \<Longrightarrow> \<forall>x. P x \<Longrightarrow> Q) \<Longrightarrow> Q"} is unsafe in a different sense:
  5.1466 -  since it keeps the assumption @{text "\<forall>x. P x"}, it is prone to
  5.1467 -  looping.  In classical first-order logic, all rules are safe except
  5.1468 -  those mentioned above.
  5.1469 -
  5.1470 -  The safe~/ unsafe distinction is vague, and may be regarded merely
  5.1471 -  as a way of giving some rules priority over others.  One could argue
  5.1472 -  that @{text "(\<or>E)"} is unsafe, because repeated application of it
  5.1473 -  could generate exponentially many subgoals.  Induction rules are
  5.1474 -  unsafe because inductive proofs are difficult to set up
  5.1475 -  automatically.  Any inference is unsafe that instantiates an unknown
  5.1476 -  in the proof state --- thus matching must be used, rather than
  5.1477 -  unification.  Even proof by assumption is unsafe if it instantiates
  5.1478 -  unknowns shared with other subgoals.
  5.1479 -
  5.1480 -  \begin{matharray}{rcl}
  5.1481 -    @{command_def "print_claset"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  5.1482 -    @{attribute_def intro} & : & @{text attribute} \\
  5.1483 -    @{attribute_def elim} & : & @{text attribute} \\
  5.1484 -    @{attribute_def dest} & : & @{text attribute} \\
  5.1485 -    @{attribute_def rule} & : & @{text attribute} \\
  5.1486 -    @{attribute_def iff} & : & @{text attribute} \\
  5.1487 -    @{attribute_def swapped} & : & @{text attribute} \\
  5.1488 -  \end{matharray}
  5.1489 -
  5.1490 -  @{rail \<open>
  5.1491 -    (@@{attribute intro} | @@{attribute elim} | @@{attribute dest}) ('!' | () | '?') @{syntax nat}?
  5.1492 -    ;
  5.1493 -    @@{attribute rule} 'del'
  5.1494 -    ;
  5.1495 -    @@{attribute iff} (((() | 'add') '?'?) | 'del')
  5.1496 -  \<close>}
  5.1497 -
  5.1498 -  \begin{description}
  5.1499 -
  5.1500 -  \item @{command "print_claset"} prints the collection of rules
  5.1501 -  declared to the Classical Reasoner, i.e.\ the @{ML_type claset}
  5.1502 -  within the context.
  5.1503 -
  5.1504 -  \item @{attribute intro}, @{attribute elim}, and @{attribute dest}
  5.1505 -  declare introduction, elimination, and destruction rules,
  5.1506 -  respectively.  By default, rules are considered as \emph{unsafe}
  5.1507 -  (i.e.\ not applied blindly without backtracking), while ``@{text
  5.1508 -  "!"}'' classifies as \emph{safe}.  Rule declarations marked by
  5.1509 -  ``@{text "?"}'' coincide with those of Isabelle/Pure, cf.\
  5.1510 -  \secref{sec:pure-meth-att} (i.e.\ are only applied in single steps
  5.1511 -  of the @{method rule} method).  The optional natural number
  5.1512 -  specifies an explicit weight argument, which is ignored by the
  5.1513 -  automated reasoning tools, but determines the search order of single
  5.1514 -  rule steps.
  5.1515 -
  5.1516 -  Introduction rules are those that can be applied using ordinary
  5.1517 -  resolution.  Their swapped forms are generated internally, which
  5.1518 -  will be applied using elim-resolution.  Elimination rules are
  5.1519 -  applied using elim-resolution.  Rules are sorted by the number of
  5.1520 -  new subgoals they will yield; rules that generate the fewest
  5.1521 -  subgoals will be tried first.  Otherwise, later declarations take
  5.1522 -  precedence over earlier ones.
  5.1523 -
  5.1524 -  Rules already present in the context with the same classification
  5.1525 -  are ignored.  A warning is printed if the rule has already been
  5.1526 -  added with some other classification, but the rule is added anyway
  5.1527 -  as requested.
  5.1528 -
  5.1529 -  \item @{attribute rule}~@{text del} deletes all occurrences of a
  5.1530 -  rule from the classical context, regardless of its classification as
  5.1531 -  introduction~/ elimination~/ destruction and safe~/ unsafe.
  5.1532 -
  5.1533 -  \item @{attribute iff} declares logical equivalences to the
  5.1534 -  Simplifier and the Classical reasoner at the same time.
  5.1535 -  Non-conditional rules result in a safe introduction and elimination
  5.1536 -  pair; conditional ones are considered unsafe.  Rules with negative
  5.1537 -  conclusion are automatically inverted (using @{text "\<not>"}-elimination
  5.1538 -  internally).
  5.1539 -
  5.1540 -  The ``@{text "?"}'' version of @{attribute iff} declares rules to
  5.1541 -  the Isabelle/Pure context only, and omits the Simplifier
  5.1542 -  declaration.
  5.1543 -
  5.1544 -  \item @{attribute swapped} turns an introduction rule into an
  5.1545 -  elimination, by resolving with the classical swap principle @{text
  5.1546 -  "\<not> P \<Longrightarrow> (\<not> R \<Longrightarrow> P) \<Longrightarrow> R"} in the second position.  This is mainly for
  5.1547 -  illustrative purposes: the Classical Reasoner already swaps rules
  5.1548 -  internally as explained above.
  5.1549 -
  5.1550 -  \end{description}
  5.1551 -*}
  5.1552 -
  5.1553 -
  5.1554 -subsection {* Structured methods *}
  5.1555 -
  5.1556 -text {*
  5.1557 -  \begin{matharray}{rcl}
  5.1558 -    @{method_def rule} & : & @{text method} \\
  5.1559 -    @{method_def contradiction} & : & @{text method} \\
  5.1560 -  \end{matharray}
  5.1561 -
  5.1562 -  @{rail \<open>
  5.1563 -    @@{method rule} @{syntax thmrefs}?
  5.1564 -  \<close>}
  5.1565 -
  5.1566 -  \begin{description}
  5.1567 -
  5.1568 -  \item @{method rule} as offered by the Classical Reasoner is a
  5.1569 -  refinement over the Pure one (see \secref{sec:pure-meth-att}).  Both
  5.1570 -  versions work the same, but the classical version observes the
  5.1571 -  classical rule context in addition to that of Isabelle/Pure.
  5.1572 -
  5.1573 -  Common object logics (HOL, ZF, etc.) declare a rich collection of
  5.1574 -  classical rules (even if these would qualify as intuitionistic
  5.1575 -  ones), but only few declarations to the rule context of
  5.1576 -  Isabelle/Pure (\secref{sec:pure-meth-att}).
  5.1577 -
  5.1578 -  \item @{method contradiction} solves some goal by contradiction,
  5.1579 -  deriving any result from both @{text "\<not> A"} and @{text A}.  Chained
  5.1580 -  facts, which are guaranteed to participate, may appear in either
  5.1581 -  order.
  5.1582 -
  5.1583 -  \end{description}
  5.1584 -*}
  5.1585 -
  5.1586 -
  5.1587 -subsection {* Fully automated methods *}
  5.1588 -
  5.1589 -text {*
  5.1590 -  \begin{matharray}{rcl}
  5.1591 -    @{method_def blast} & : & @{text method} \\
  5.1592 -    @{method_def auto} & : & @{text method} \\
  5.1593 -    @{method_def force} & : & @{text method} \\
  5.1594 -    @{method_def fast} & : & @{text method} \\
  5.1595 -    @{method_def slow} & : & @{text method} \\
  5.1596 -    @{method_def best} & : & @{text method} \\
  5.1597 -    @{method_def fastforce} & : & @{text method} \\
  5.1598 -    @{method_def slowsimp} & : & @{text method} \\
  5.1599 -    @{method_def bestsimp} & : & @{text method} \\
  5.1600 -    @{method_def deepen} & : & @{text method} \\
  5.1601 -  \end{matharray}
  5.1602 -
  5.1603 -  @{rail \<open>
  5.1604 -    @@{method blast} @{syntax nat}? (@{syntax clamod} * )
  5.1605 -    ;
  5.1606 -    @@{method auto} (@{syntax nat} @{syntax nat})? (@{syntax clasimpmod} * )
  5.1607 -    ;
  5.1608 -    @@{method force} (@{syntax clasimpmod} * )
  5.1609 -    ;
  5.1610 -    (@@{method fast} | @@{method slow} | @@{method best}) (@{syntax clamod} * )
  5.1611 -    ;
  5.1612 -    (@@{method fastforce} | @@{method slowsimp} | @@{method bestsimp})
  5.1613 -      (@{syntax clasimpmod} * )
  5.1614 -    ;
  5.1615 -    @@{method deepen} (@{syntax nat} ?) (@{syntax clamod} * )
  5.1616 -    ;
  5.1617 -    @{syntax_def clamod}:
  5.1618 -      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del') ':' @{syntax thmrefs}
  5.1619 -    ;
  5.1620 -    @{syntax_def clasimpmod}: ('simp' (() | 'add' | 'del' | 'only') |
  5.1621 -      ('cong' | 'split') (() | 'add' | 'del') |
  5.1622 -      'iff' (((() | 'add') '?'?) | 'del') |
  5.1623 -      (('intro' | 'elim' | 'dest') ('!' | () | '?') | 'del')) ':' @{syntax thmrefs}
  5.1624 -  \<close>}
  5.1625 -
  5.1626 -  \begin{description}
  5.1627 -
  5.1628 -  \item @{method blast} is a separate classical tableau prover that
  5.1629 -  uses the same classical rule declarations as explained before.
  5.1630 -
  5.1631 -  Proof search is coded directly in ML using special data structures.
  5.1632 -  A successful proof is then reconstructed using regular Isabelle
  5.1633 -  inferences.  It is faster and more powerful than the other classical
  5.1634 -  reasoning tools, but has major limitations too.
  5.1635 -
  5.1636 -  \begin{itemize}
  5.1637 -
  5.1638 -  \item It does not use the classical wrapper tacticals, such as the
  5.1639 -  integration with the Simplifier of @{method fastforce}.
  5.1640 -
  5.1641 -  \item It does not perform higher-order unification, as needed by the
  5.1642 -  rule @{thm [source=false] rangeI} in HOL.  There are often
  5.1643 -  alternatives to such rules, for example @{thm [source=false]
  5.1644 -  range_eqI}.
  5.1645 -
  5.1646 -  \item Function variables may only be applied to parameters of the
  5.1647 -  subgoal.  (This restriction arises because the prover does not use
  5.1648 -  higher-order unification.)  If other function variables are present
  5.1649 -  then the prover will fail with the message \texttt{Function Var's
  5.1650 -  argument not a bound variable}.
  5.1651 -
  5.1652 -  \item Its proof strategy is more general than @{method fast} but can
  5.1653 -  be slower.  If @{method blast} fails or seems to be running forever,
  5.1654 -  try @{method fast} and the other proof tools described below.
  5.1655 -
  5.1656 -  \end{itemize}
  5.1657 -
  5.1658 -  The optional integer argument specifies a bound for the number of
  5.1659 -  unsafe steps used in a proof.  By default, @{method blast} starts
  5.1660 -  with a bound of 0 and increases it successively to 20.  In contrast,
  5.1661 -  @{text "(blast lim)"} tries to prove the goal using a search bound
  5.1662 -  of @{text "lim"}.  Sometimes a slow proof using @{method blast} can
  5.1663 -  be made much faster by supplying the successful search bound to this
  5.1664 -  proof method instead.
  5.1665 -
  5.1666 -  \item @{method auto} combines classical reasoning with
  5.1667 -  simplification.  It is intended for situations where there are a lot
  5.1668 -  of mostly trivial subgoals; it proves all the easy ones, leaving the
  5.1669 -  ones it cannot prove.  Occasionally, attempting to prove the hard
  5.1670 -  ones may take a long time.
  5.1671 -
  5.1672 -  The optional depth arguments in @{text "(auto m n)"} refer to its
  5.1673 -  builtin classical reasoning procedures: @{text m} (default 4) is for
  5.1674 -  @{method blast}, which is tried first, and @{text n} (default 2) is
  5.1675 -  for a slower but more general alternative that also takes wrappers
  5.1676 -  into account.
  5.1677 -
  5.1678 -  \item @{method force} is intended to prove the first subgoal
  5.1679 -  completely, using many fancy proof tools and performing a rather
  5.1680 -  exhaustive search.  As a result, proof attempts may take rather long
  5.1681 -  or diverge easily.
  5.1682 -
  5.1683 -  \item @{method fast}, @{method best}, @{method slow} attempt to
  5.1684 -  prove the first subgoal using sequent-style reasoning as explained
  5.1685 -  before.  Unlike @{method blast}, they construct proofs directly in
  5.1686 -  Isabelle.
  5.1687 -
  5.1688 -  There is a difference in search strategy and back-tracking: @{method
  5.1689 -  fast} uses depth-first search and @{method best} uses best-first
  5.1690 -  search (guided by a heuristic function: normally the total size of
  5.1691 -  the proof state).
  5.1692 -
  5.1693 -  Method @{method slow} is like @{method fast}, but conducts a broader
  5.1694 -  search: it may, when backtracking from a failed proof attempt, undo
  5.1695 -  even the step of proving a subgoal by assumption.
  5.1696 -
  5.1697 -  \item @{method fastforce}, @{method slowsimp}, @{method bestsimp}
  5.1698 -  are like @{method fast}, @{method slow}, @{method best},
  5.1699 -  respectively, but use the Simplifier as additional wrapper. The name
  5.1700 -  @{method fastforce}, reflects the behaviour of this popular method
  5.1701 -  better without requiring an understanding of its implementation.
  5.1702 -
  5.1703 -  \item @{method deepen} works by exhaustive search up to a certain
  5.1704 -  depth.  The start depth is 4 (unless specified explicitly), and the
  5.1705 -  depth is increased iteratively up to 10.  Unsafe rules are modified
  5.1706 -  to preserve the formula they act on, so that it be used repeatedly.
  5.1707 -  This method can prove more goals than @{method fast}, but is much
  5.1708 -  slower, for example if the assumptions have many universal
  5.1709 -  quantifiers.
  5.1710 -
  5.1711 -  \end{description}
  5.1712 -
  5.1713 -  Any of the above methods support additional modifiers of the context
  5.1714 -  of classical (and simplifier) rules, but the ones related to the
  5.1715 -  Simplifier are explicitly prefixed by @{text simp} here.  The
  5.1716 -  semantics of these ad-hoc rule declarations is analogous to the
  5.1717 -  attributes given before.  Facts provided by forward chaining are
  5.1718 -  inserted into the goal before commencing proof search.
  5.1719 -*}
  5.1720 -
  5.1721 -
  5.1722 -subsection {* Partially automated methods *}
  5.1723 -
  5.1724 -text {* These proof methods may help in situations when the
  5.1725 -  fully-automated tools fail.  The result is a simpler subgoal that
  5.1726 -  can be tackled by other means, such as by manual instantiation of
  5.1727 -  quantifiers.
  5.1728 -
  5.1729 -  \begin{matharray}{rcl}
  5.1730 -    @{method_def safe} & : & @{text method} \\
  5.1731 -    @{method_def clarify} & : & @{text method} \\
  5.1732 -    @{method_def clarsimp} & : & @{text method} \\
  5.1733 -  \end{matharray}
  5.1734 -
  5.1735 -  @{rail \<open>
  5.1736 -    (@@{method safe} | @@{method clarify}) (@{syntax clamod} * )
  5.1737 -    ;
  5.1738 -    @@{method clarsimp} (@{syntax clasimpmod} * )
  5.1739 -  \<close>}
  5.1740 -
  5.1741 -  \begin{description}
  5.1742 -
  5.1743 -  \item @{method safe} repeatedly performs safe steps on all subgoals.
  5.1744 -  It is deterministic, with at most one outcome.
  5.1745 -
  5.1746 -  \item @{method clarify} performs a series of safe steps without
  5.1747 -  splitting subgoals; see also @{method clarify_step}.
  5.1748 -
  5.1749 -  \item @{method clarsimp} acts like @{method clarify}, but also does
  5.1750 -  simplification.  Note that if the Simplifier context includes a
  5.1751 -  splitter for the premises, the subgoal may still be split.
  5.1752 -
  5.1753 -  \end{description}
  5.1754 -*}
  5.1755 -
  5.1756 -
  5.1757 -subsection {* Single-step tactics *}
  5.1758 -
  5.1759 -text {*
  5.1760 -  \begin{matharray}{rcl}
  5.1761 -    @{method_def safe_step} & : & @{text method} \\
  5.1762 -    @{method_def inst_step} & : & @{text method} \\
  5.1763 -    @{method_def step} & : & @{text method} \\
  5.1764 -    @{method_def slow_step} & : & @{text method} \\
  5.1765 -    @{method_def clarify_step} & : &  @{text method} \\
  5.1766 -  \end{matharray}
  5.1767 -
  5.1768 -  These are the primitive tactics behind the automated proof methods
  5.1769 -  of the Classical Reasoner.  By calling them yourself, you can
  5.1770 -  execute these procedures one step at a time.
  5.1771 -
  5.1772 -  \begin{description}
  5.1773 -
  5.1774 -  \item @{method safe_step} performs a safe step on the first subgoal.
  5.1775 -  The safe wrapper tacticals are applied to a tactic that may include
  5.1776 -  proof by assumption or Modus Ponens (taking care not to instantiate
  5.1777 -  unknowns), or substitution.
  5.1778 -
  5.1779 -  \item @{method inst_step} is like @{method safe_step}, but allows
  5.1780 -  unknowns to be instantiated.
  5.1781 -
  5.1782 -  \item @{method step} is the basic step of the proof procedure, it
  5.1783 -  operates on the first subgoal.  The unsafe wrapper tacticals are
  5.1784 -  applied to a tactic that tries @{method safe}, @{method inst_step},
  5.1785 -  or applies an unsafe rule from the context.
  5.1786 -
  5.1787 -  \item @{method slow_step} resembles @{method step}, but allows
  5.1788 -  backtracking between using safe rules with instantiation (@{method
  5.1789 -  inst_step}) and using unsafe rules.  The resulting search space is
  5.1790 -  larger.
  5.1791 -
  5.1792 -  \item @{method clarify_step} performs a safe step on the first
  5.1793 -  subgoal; no splitting step is applied.  For example, the subgoal
  5.1794 -  @{text "A \<and> B"} is left as a conjunction.  Proof by assumption,
  5.1795 -  Modus Ponens, etc., may be performed provided they do not
  5.1796 -  instantiate unknowns.  Assumptions of the form @{text "x = t"} may
  5.1797 -  be eliminated.  The safe wrapper tactical is applied.
  5.1798 -
  5.1799 -  \end{description}
  5.1800 -*}
  5.1801 -
  5.1802 -
  5.1803 -subsection {* Modifying the search step *}
  5.1804 -
  5.1805 -text {*
  5.1806 -  \begin{mldecls}
  5.1807 -    @{index_ML_type wrapper: "(int -> tactic) -> (int -> tactic)"} \\[0.5ex]
  5.1808 -    @{index_ML_op addSWrapper: "Proof.context *
  5.1809 -  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
  5.1810 -    @{index_ML_op addSbefore: "Proof.context *
  5.1811 -  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
  5.1812 -    @{index_ML_op addSafter: "Proof.context *
  5.1813 -  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
  5.1814 -    @{index_ML_op delSWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
  5.1815 -    @{index_ML_op addWrapper: "Proof.context *
  5.1816 -  (string * (Proof.context -> wrapper)) -> Proof.context"} \\
  5.1817 -    @{index_ML_op addbefore: "Proof.context *
  5.1818 -  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
  5.1819 -    @{index_ML_op addafter: "Proof.context *
  5.1820 -  (string * (Proof.context -> int -> tactic)) -> Proof.context"} \\
  5.1821 -    @{index_ML_op delWrapper: "Proof.context * string -> Proof.context"} \\[0.5ex]
  5.1822 -    @{index_ML addSss: "Proof.context -> Proof.context"} \\
  5.1823 -    @{index_ML addss: "Proof.context -> Proof.context"} \\
  5.1824 -  \end{mldecls}
  5.1825 -
  5.1826 -  The proof strategy of the Classical Reasoner is simple.  Perform as
  5.1827 -  many safe inferences as possible; or else, apply certain safe rules,
  5.1828 -  allowing instantiation of unknowns; or else, apply an unsafe rule.
  5.1829 -  The tactics also eliminate assumptions of the form @{text "x = t"}
  5.1830 -  by substitution if they have been set up to do so.  They may perform
  5.1831 -  a form of Modus Ponens: if there are assumptions @{text "P \<longrightarrow> Q"} and
  5.1832 -  @{text "P"}, then replace @{text "P \<longrightarrow> Q"} by @{text "Q"}.
  5.1833 -
  5.1834 -  The classical reasoning tools --- except @{method blast} --- allow
  5.1835 -  to modify this basic proof strategy by applying two lists of
  5.1836 -  arbitrary \emph{wrapper tacticals} to it.  The first wrapper list,
  5.1837 -  which is considered to contain safe wrappers only, affects @{method
  5.1838 -  safe_step} and all the tactics that call it.  The second one, which
  5.1839 -  may contain unsafe wrappers, affects the unsafe parts of @{method
  5.1840 -  step}, @{method slow_step}, and the tactics that call them.  A
  5.1841 -  wrapper transforms each step of the search, for example by
  5.1842 -  attempting other tactics before or after the original step tactic.
  5.1843 -  All members of a wrapper list are applied in turn to the respective
  5.1844 -  step tactic.
  5.1845 -
  5.1846 -  Initially the two wrapper lists are empty, which means no
  5.1847 -  modification of the step tactics. Safe and unsafe wrappers are added
  5.1848 -  to a claset with the functions given below, supplying them with
  5.1849 -  wrapper names.  These names may be used to selectively delete
  5.1850 -  wrappers.
  5.1851 -
  5.1852 -  \begin{description}
  5.1853 -
  5.1854 -  \item @{text "ctxt addSWrapper (name, wrapper)"} adds a new wrapper,
  5.1855 -  which should yield a safe tactic, to modify the existing safe step
  5.1856 -  tactic.
  5.1857 -
  5.1858 -  \item @{text "ctxt addSbefore (name, tac)"} adds the given tactic as a
  5.1859 -  safe wrapper, such that it is tried \emph{before} each safe step of
  5.1860 -  the search.
  5.1861 -
  5.1862 -  \item @{text "ctxt addSafter (name, tac)"} adds the given tactic as a
  5.1863 -  safe wrapper, such that it is tried when a safe step of the search
  5.1864 -  would fail.
  5.1865 -
  5.1866 -  \item @{text "ctxt delSWrapper name"} deletes the safe wrapper with
  5.1867 -  the given name.
  5.1868 -
  5.1869 -  \item @{text "ctxt addWrapper (name, wrapper)"} adds a new wrapper to
  5.1870 -  modify the existing (unsafe) step tactic.
  5.1871 -
  5.1872 -  \item @{text "ctxt addbefore (name, tac)"} adds the given tactic as an
  5.1873 -  unsafe wrapper, such that it its result is concatenated
  5.1874 -  \emph{before} the result of each unsafe step.
  5.1875 -
  5.1876 -  \item @{text "ctxt addafter (name, tac)"} adds the given tactic as an
  5.1877 -  unsafe wrapper, such that it its result is concatenated \emph{after}
  5.1878 -  the result of each unsafe step.
  5.1879 -
  5.1880 -  \item @{text "ctxt delWrapper name"} deletes the unsafe wrapper with
  5.1881 -  the given name.
  5.1882 -
  5.1883 -  \item @{text "addSss"} adds the simpset of the context to its
  5.1884 -  classical set. The assumptions and goal will be simplified, in a
  5.1885 -  rather safe way, after each safe step of the search.
  5.1886 -
  5.1887 -  \item @{text "addss"} adds the simpset of the context to its
  5.1888 -  classical set. The assumptions and goal will be simplified, before
  5.1889 -  the each unsafe step of the search.
  5.1890 -
  5.1891 -  \end{description}
  5.1892 -*}
  5.1893 -
  5.1894 -
  5.1895 -section {* Object-logic setup \label{sec:object-logic} *}
  5.1896 -
  5.1897 -text {*
  5.1898 -  \begin{matharray}{rcl}
  5.1899 -    @{command_def "judgment"} & : & @{text "theory \<rightarrow> theory"} \\
  5.1900 -    @{method_def atomize} & : & @{text method} \\
  5.1901 -    @{attribute_def atomize} & : & @{text attribute} \\
  5.1902 -    @{attribute_def rule_format} & : & @{text attribute} \\
  5.1903 -    @{attribute_def rulify} & : & @{text attribute} \\
  5.1904 -  \end{matharray}
  5.1905 -
  5.1906 -  The very starting point for any Isabelle object-logic is a ``truth
  5.1907 -  judgment'' that links object-level statements to the meta-logic
  5.1908 -  (with its minimal language of @{text prop} that covers universal
  5.1909 -  quantification @{text "\<And>"} and implication @{text "\<Longrightarrow>"}).
  5.1910 -
  5.1911 -  Common object-logics are sufficiently expressive to internalize rule
  5.1912 -  statements over @{text "\<And>"} and @{text "\<Longrightarrow>"} within their own
  5.1913 -  language.  This is useful in certain situations where a rule needs
  5.1914 -  to be viewed as an atomic statement from the meta-level perspective,
  5.1915 -  e.g.\ @{text "\<And>x. x \<in> A \<Longrightarrow> P x"} versus @{text "\<forall>x \<in> A. P x"}.
  5.1916 -
  5.1917 -  From the following language elements, only the @{method atomize}
  5.1918 -  method and @{attribute rule_format} attribute are occasionally
  5.1919 -  required by end-users, the rest is for those who need to setup their
  5.1920 -  own object-logic.  In the latter case existing formulations of
  5.1921 -  Isabelle/FOL or Isabelle/HOL may be taken as realistic examples.
  5.1922 -
  5.1923 -  Generic tools may refer to the information provided by object-logic
  5.1924 -  declarations internally.
  5.1925 -
  5.1926 -  @{rail \<open>
  5.1927 -    @@{command judgment} @{syntax name} '::' @{syntax type} @{syntax mixfix}?
  5.1928 -    ;
  5.1929 -    @@{attribute atomize} ('(' 'full' ')')?
  5.1930 -    ;
  5.1931 -    @@{attribute rule_format} ('(' 'noasm' ')')?
  5.1932 -  \<close>}
  5.1933 -
  5.1934 -  \begin{description}
  5.1935 -  
  5.1936 -  \item @{command "judgment"}~@{text "c :: \<sigma> (mx)"} declares constant
  5.1937 -  @{text c} as the truth judgment of the current object-logic.  Its
  5.1938 -  type @{text \<sigma>} should specify a coercion of the category of
  5.1939 -  object-level propositions to @{text prop} of the Pure meta-logic;
  5.1940 -  the mixfix annotation @{text "(mx)"} would typically just link the
  5.1941 -  object language (internally of syntactic category @{text logic})
  5.1942 -  with that of @{text prop}.  Only one @{command "judgment"}
  5.1943 -  declaration may be given in any theory development.
  5.1944 -  
  5.1945 -  \item @{method atomize} (as a method) rewrites any non-atomic
  5.1946 -  premises of a sub-goal, using the meta-level equations declared via
  5.1947 -  @{attribute atomize} (as an attribute) beforehand.  As a result,
  5.1948 -  heavily nested goals become amenable to fundamental operations such
  5.1949 -  as resolution (cf.\ the @{method (Pure) rule} method).  Giving the ``@{text
  5.1950 -  "(full)"}'' option here means to turn the whole subgoal into an
  5.1951 -  object-statement (if possible), including the outermost parameters
  5.1952 -  and assumptions as well.
  5.1953 -
  5.1954 -  A typical collection of @{attribute atomize} rules for a particular
  5.1955 -  object-logic would provide an internalization for each of the
  5.1956 -  connectives of @{text "\<And>"}, @{text "\<Longrightarrow>"}, and @{text "\<equiv>"}.
  5.1957 -  Meta-level conjunction should be covered as well (this is
  5.1958 -  particularly important for locales, see \secref{sec:locale}).
  5.1959 -
  5.1960 -  \item @{attribute rule_format} rewrites a theorem by the equalities
  5.1961 -  declared as @{attribute rulify} rules in the current object-logic.
  5.1962 -  By default, the result is fully normalized, including assumptions
  5.1963 -  and conclusions at any depth.  The @{text "(no_asm)"} option
  5.1964 -  restricts the transformation to the conclusion of a rule.
  5.1965 -
  5.1966 -  In common object-logics (HOL, FOL, ZF), the effect of @{attribute
  5.1967 -  rule_format} is to replace (bounded) universal quantification
  5.1968 -  (@{text "\<forall>"}) and implication (@{text "\<longrightarrow>"}) by the corresponding
  5.1969 -  rule statements over @{text "\<And>"} and @{text "\<Longrightarrow>"}.
  5.1970 -
  5.1971 -  \end{description}
  5.1972 -*}
  5.1973 -
  5.1974 -
  5.1975 -section {* Tracing higher-order unification *}
  5.1976 -
  5.1977 -text {*
  5.1978 -  \begin{tabular}{rcll}
  5.1979 -    @{attribute_def unify_trace_simp} & : & @{text "attribute"} & default @{text "false"} \\
  5.1980 -    @{attribute_def unify_trace_types} & : & @{text "attribute"} & default @{text "false"} \\
  5.1981 -    @{attribute_def unify_trace_bound} & : & @{text "attribute"} & default @{text "50"} \\
  5.1982 -    @{attribute_def unify_search_bound} & : & @{text "attribute"} & default @{text "60"} \\
  5.1983 -  \end{tabular}
  5.1984 -  \medskip
  5.1985 -
  5.1986 -  Higher-order unification works well in most practical situations,
  5.1987 -  but sometimes needs extra care to identify problems.  These tracing
  5.1988 -  options may help.
  5.1989 -
  5.1990 -  \begin{description}
  5.1991 -
  5.1992 -  \item @{attribute unify_trace_simp} controls tracing of the
  5.1993 -  simplification phase of higher-order unification.
  5.1994 -
  5.1995 -  \item @{attribute unify_trace_types} controls warnings of
  5.1996 -  incompleteness, when unification is not considering all possible
  5.1997 -  instantiations of schematic type variables.
  5.1998 -
  5.1999 -  \item @{attribute unify_trace_bound} determines the depth where
  5.2000 -  unification starts to print tracing information once it reaches
  5.2001 -  depth; 0 for full tracing.  At the default value, tracing
  5.2002 -  information is almost never printed in practice.
  5.2003 -
  5.2004 -  \item @{attribute unify_search_bound} prevents unification from
  5.2005 -  searching past the given depth.  Because of this bound, higher-order
  5.2006 -  unification cannot return an infinite sequence, though it can return
  5.2007 -  an exponentially long one.  The search rarely approaches the default
  5.2008 -  value in practice.  If the search is cut off, unification prints a
  5.2009 -  warning ``Unification bound exceeded''.
  5.2010 -
  5.2011 -  \end{description}
  5.2012 -
  5.2013 -  \begin{warn}
  5.2014 -  Options for unification cannot be modified in a local context.  Only
  5.2015 -  the global theory content is taken into account.
  5.2016 -  \end{warn}
  5.2017 -*}
  5.2018 -
  5.2019 -end
     6.1 --- a/src/Doc/Isar-Ref/HOL_Specific.thy	Mon Apr 07 16:37:57 2014 +0200
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,2677 +0,0 @@
     6.4 -theory HOL_Specific
     6.5 -imports Base Main "~~/src/HOL/Library/Old_Recdef" "~~/src/Tools/Adhoc_Overloading"
     6.6 -begin
     6.7 -
     6.8 -chapter {* Higher-Order Logic *}
     6.9 -
    6.10 -text {* Isabelle/HOL is based on Higher-Order Logic, a polymorphic
    6.11 -  version of Church's Simple Theory of Types.  HOL can be best
    6.12 -  understood as a simply-typed version of classical set theory.  The
    6.13 -  logic was first implemented in Gordon's HOL system
    6.14 -  \cite{mgordon-hol}.  It extends Church's original logic
    6.15 -  \cite{church40} by explicit type variables (naive polymorphism) and
    6.16 -  a sound axiomatization scheme for new types based on subsets of
    6.17 -  existing types.
    6.18 -
    6.19 -  Andrews's book \cite{andrews86} is a full description of the
    6.20 -  original Church-style higher-order logic, with proofs of correctness
    6.21 -  and completeness wrt.\ certain set-theoretic interpretations.  The
    6.22 -  particular extensions of Gordon-style HOL are explained semantically
    6.23 -  in two chapters of the 1993 HOL book \cite{pitts93}.
    6.24 -
    6.25 -  Experience with HOL over decades has demonstrated that higher-order
    6.26 -  logic is widely applicable in many areas of mathematics and computer
    6.27 -  science.  In a sense, Higher-Order Logic is simpler than First-Order
    6.28 -  Logic, because there are fewer restrictions and special cases.  Note
    6.29 -  that HOL is \emph{weaker} than FOL with axioms for ZF set theory,
    6.30 -  which is traditionally considered the standard foundation of regular
    6.31 -  mathematics, but for most applications this does not matter.  If you
    6.32 -  prefer ML to Lisp, you will probably prefer HOL to ZF.
    6.33 -
    6.34 -  \medskip The syntax of HOL follows @{text "\<lambda>"}-calculus and
    6.35 -  functional programming.  Function application is curried.  To apply
    6.36 -  the function @{text f} of type @{text "\<tau>\<^sub>1 \<Rightarrow> \<tau>\<^sub>2 \<Rightarrow> \<tau>\<^sub>3"} to the
    6.37 -  arguments @{text a} and @{text b} in HOL, you simply write @{text "f
    6.38 -  a b"} (as in ML or Haskell).  There is no ``apply'' operator; the
    6.39 -  existing application of the Pure @{text "\<lambda>"}-calculus is re-used.
    6.40 -  Note that in HOL @{text "f (a, b)"} means ``@{text "f"} applied to
    6.41 -  the pair @{text "(a, b)"} (which is notation for @{text "Pair a
    6.42 -  b"}).  The latter typically introduces extra formal efforts that can
    6.43 -  be avoided by currying functions by default.  Explicit tuples are as
    6.44 -  infrequent in HOL formalizations as in good ML or Haskell programs.
    6.45 -
    6.46 -  \medskip Isabelle/HOL has a distinct feel, compared to other
    6.47 -  object-logics like Isabelle/ZF.  It identifies object-level types
    6.48 -  with meta-level types, taking advantage of the default
    6.49 -  type-inference mechanism of Isabelle/Pure.  HOL fully identifies
    6.50 -  object-level functions with meta-level functions, with native
    6.51 -  abstraction and application.
    6.52 -
    6.53 -  These identifications allow Isabelle to support HOL particularly
    6.54 -  nicely, but they also mean that HOL requires some sophistication
    6.55 -  from the user.  In particular, an understanding of Hindley-Milner
    6.56 -  type-inference with type-classes, which are both used extensively in
    6.57 -  the standard libraries and applications.  Beginners can set
    6.58 -  @{attribute show_types} or even @{attribute show_sorts} to get more
    6.59 -  explicit information about the result of type-inference.  *}
    6.60 -
    6.61 -
    6.62 -chapter {* Derived specification elements *}
    6.63 -
    6.64 -section {* Inductive and coinductive definitions \label{sec:hol-inductive} *}
    6.65 -
    6.66 -text {*
    6.67 -  \begin{matharray}{rcl}
    6.68 -    @{command_def (HOL) "inductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    6.69 -    @{command_def (HOL) "inductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    6.70 -    @{command_def (HOL) "coinductive"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    6.71 -    @{command_def (HOL) "coinductive_set"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
    6.72 -    @{command_def "print_inductives"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    6.73 -    @{attribute_def (HOL) mono} & : & @{text attribute} \\
    6.74 -  \end{matharray}
    6.75 -
    6.76 -  An \emph{inductive definition} specifies the least predicate or set
    6.77 -  @{text R} closed under given rules: applying a rule to elements of
    6.78 -  @{text R} yields a result within @{text R}.  For example, a
    6.79 -  structural operational semantics is an inductive definition of an
    6.80 -  evaluation relation.
    6.81 -
    6.82 -  Dually, a \emph{coinductive definition} specifies the greatest
    6.83 -  predicate or set @{text R} that is consistent with given rules:
    6.84 -  every element of @{text R} can be seen as arising by applying a rule
    6.85 -  to elements of @{text R}.  An important example is using
    6.86 -  bisimulation relations to formalise equivalence of processes and
    6.87 -  infinite data structures.
    6.88 -
    6.89 -  Both inductive and coinductive definitions are based on the
    6.90 -  Knaster-Tarski fixed-point theorem for complete lattices.  The
    6.91 -  collection of introduction rules given by the user determines a
    6.92 -  functor on subsets of set-theoretic relations.  The required
    6.93 -  monotonicity of the recursion scheme is proven as a prerequisite to
    6.94 -  the fixed-point definition and the resulting consequences.  This
    6.95 -  works by pushing inclusion through logical connectives and any other
    6.96 -  operator that might be wrapped around recursive occurrences of the
    6.97 -  defined relation: there must be a monotonicity theorem of the form
    6.98 -  @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for each premise @{text "\<M> R t"} in an
    6.99 -  introduction rule.  The default rule declarations of Isabelle/HOL
   6.100 -  already take care of most common situations.
   6.101 -
   6.102 -  @{rail \<open>
   6.103 -    (@@{command (HOL) inductive} | @@{command (HOL) inductive_set} |
   6.104 -      @@{command (HOL) coinductive} | @@{command (HOL) coinductive_set})
   6.105 -    @{syntax target}? \<newline>
   6.106 -    @{syntax "fixes"} (@'for' @{syntax "fixes"})? (@'where' clauses)? \<newline>
   6.107 -    (@'monos' @{syntax thmrefs})?
   6.108 -    ;
   6.109 -    clauses: (@{syntax thmdecl}? @{syntax prop} + '|')
   6.110 -    ;
   6.111 -    @@{attribute (HOL) mono} (() | 'add' | 'del')
   6.112 -  \<close>}
   6.113 -
   6.114 -  \begin{description}
   6.115 -
   6.116 -  \item @{command (HOL) "inductive"} and @{command (HOL)
   6.117 -  "coinductive"} define (co)inductive predicates from the introduction
   6.118 -  rules.
   6.119 -
   6.120 -  The propositions given as @{text "clauses"} in the @{keyword
   6.121 -  "where"} part are either rules of the usual @{text "\<And>/\<Longrightarrow>"} format
   6.122 -  (with arbitrary nesting), or equalities using @{text "\<equiv>"}.  The
   6.123 -  latter specifies extra-logical abbreviations in the sense of
   6.124 -  @{command_ref abbreviation}.  Introducing abstract syntax
   6.125 -  simultaneously with the actual introduction rules is occasionally
   6.126 -  useful for complex specifications.
   6.127 -
   6.128 -  The optional @{keyword "for"} part contains a list of parameters of
   6.129 -  the (co)inductive predicates that remain fixed throughout the
   6.130 -  definition, in contrast to arguments of the relation that may vary
   6.131 -  in each occurrence within the given @{text "clauses"}.
   6.132 -
   6.133 -  The optional @{keyword "monos"} declaration contains additional
   6.134 -  \emph{monotonicity theorems}, which are required for each operator
   6.135 -  applied to a recursive set in the introduction rules.
   6.136 -
   6.137 -  \item @{command (HOL) "inductive_set"} and @{command (HOL)
   6.138 -  "coinductive_set"} are wrappers for to the previous commands for
   6.139 -  native HOL predicates.  This allows to define (co)inductive sets,
   6.140 -  where multiple arguments are simulated via tuples.
   6.141 -
   6.142 -  \item @{command "print_inductives"} prints (co)inductive definitions and
   6.143 -  monotonicity rules.
   6.144 -
   6.145 -  \item @{attribute (HOL) mono} declares monotonicity rules in the
   6.146 -  context.  These rule are involved in the automated monotonicity
   6.147 -  proof of the above inductive and coinductive definitions.
   6.148 -
   6.149 -  \end{description}
   6.150 -*}
   6.151 -
   6.152 -
   6.153 -subsection {* Derived rules *}
   6.154 -
   6.155 -text {* A (co)inductive definition of @{text R} provides the following
   6.156 -  main theorems:
   6.157 -
   6.158 -  \begin{description}
   6.159 -
   6.160 -  \item @{text R.intros} is the list of introduction rules as proven
   6.161 -  theorems, for the recursive predicates (or sets).  The rules are
   6.162 -  also available individually, using the names given them in the
   6.163 -  theory file;
   6.164 -
   6.165 -  \item @{text R.cases} is the case analysis (or elimination) rule;
   6.166 -
   6.167 -  \item @{text R.induct} or @{text R.coinduct} is the (co)induction
   6.168 -  rule;
   6.169 -
   6.170 -  \item @{text R.simps} is the equation unrolling the fixpoint of the
   6.171 -  predicate one step.
   6.172 -
   6.173 -  \end{description}
   6.174 -
   6.175 -  When several predicates @{text "R\<^sub>1, \<dots>, R\<^sub>n"} are
   6.176 -  defined simultaneously, the list of introduction rules is called
   6.177 -  @{text "R\<^sub>1_\<dots>_R\<^sub>n.intros"}, the case analysis rules are
   6.178 -  called @{text "R\<^sub>1.cases, \<dots>, R\<^sub>n.cases"}, and the list
   6.179 -  of mutual induction rules is called @{text
   6.180 -  "R\<^sub>1_\<dots>_R\<^sub>n.inducts"}.
   6.181 -*}
   6.182 -
   6.183 -
   6.184 -subsection {* Monotonicity theorems *}
   6.185 -
   6.186 -text {* The context maintains a default set of theorems that are used
   6.187 -  in monotonicity proofs.  New rules can be declared via the
   6.188 -  @{attribute (HOL) mono} attribute.  See the main Isabelle/HOL
   6.189 -  sources for some examples.  The general format of such monotonicity
   6.190 -  theorems is as follows:
   6.191 -
   6.192 -  \begin{itemize}
   6.193 -
   6.194 -  \item Theorems of the form @{text "A \<le> B \<Longrightarrow> \<M> A \<le> \<M> B"}, for proving
   6.195 -  monotonicity of inductive definitions whose introduction rules have
   6.196 -  premises involving terms such as @{text "\<M> R t"}.
   6.197 -
   6.198 -  \item Monotonicity theorems for logical operators, which are of the
   6.199 -  general form @{text "(\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> (\<dots> \<longrightarrow> \<dots>) \<Longrightarrow> \<dots> \<longrightarrow> \<dots>"}.  For example, in
   6.200 -  the case of the operator @{text "\<or>"}, the corresponding theorem is
   6.201 -  \[
   6.202 -  \infer{@{text "P\<^sub>1 \<or> P\<^sub>2 \<longrightarrow> Q\<^sub>1 \<or> Q\<^sub>2"}}{@{text "P\<^sub>1 \<longrightarrow> Q\<^sub>1"} & @{text "P\<^sub>2 \<longrightarrow> Q\<^sub>2"}}
   6.203 -  \]
   6.204 -
   6.205 -  \item De Morgan style equations for reasoning about the ``polarity''
   6.206 -  of expressions, e.g.
   6.207 -  \[
   6.208 -  @{prop "\<not> \<not> P \<longleftrightarrow> P"} \qquad\qquad
   6.209 -  @{prop "\<not> (P \<and> Q) \<longleftrightarrow> \<not> P \<or> \<not> Q"}
   6.210 -  \]
   6.211 -
   6.212 -  \item Equations for reducing complex operators to more primitive
   6.213 -  ones whose monotonicity can easily be proved, e.g.
   6.214 -  \[
   6.215 -  @{prop "(P \<longrightarrow> Q) \<longleftrightarrow> \<not> P \<or> Q"} \qquad\qquad
   6.216 -  @{prop "Ball A P \<equiv> \<forall>x. x \<in> A \<longrightarrow> P x"}
   6.217 -  \]
   6.218 -
   6.219 -  \end{itemize}
   6.220 -*}
   6.221 -
   6.222 -subsubsection {* Examples *}
   6.223 -
   6.224 -text {* The finite powerset operator can be defined inductively like this: *}
   6.225 -
   6.226 -inductive_set Fin :: "'a set \<Rightarrow> 'a set set" for A :: "'a set"
   6.227 -where
   6.228 -  empty: "{} \<in> Fin A"
   6.229 -| insert: "a \<in> A \<Longrightarrow> B \<in> Fin A \<Longrightarrow> insert a B \<in> Fin A"
   6.230 -
   6.231 -text {* The accessible part of a relation is defined as follows: *}
   6.232 -
   6.233 -inductive acc :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
   6.234 -  for r :: "'a \<Rightarrow> 'a \<Rightarrow> bool"  (infix "\<prec>" 50)
   6.235 -where acc: "(\<And>y. y \<prec> x \<Longrightarrow> acc r y) \<Longrightarrow> acc r x"
   6.236 -
   6.237 -text {* Common logical connectives can be easily characterized as
   6.238 -non-recursive inductive definitions with parameters, but without
   6.239 -arguments. *}
   6.240 -
   6.241 -inductive AND for A B :: bool
   6.242 -where "A \<Longrightarrow> B \<Longrightarrow> AND A B"
   6.243 -
   6.244 -inductive OR for A B :: bool
   6.245 -where "A \<Longrightarrow> OR A B"
   6.246 -  | "B \<Longrightarrow> OR A B"
   6.247 -
   6.248 -inductive EXISTS for B :: "'a \<Rightarrow> bool"
   6.249 -where "B a \<Longrightarrow> EXISTS B"
   6.250 -
   6.251 -text {* Here the @{text "cases"} or @{text "induct"} rules produced by
   6.252 -  the @{command inductive} package coincide with the expected
   6.253 -  elimination rules for Natural Deduction.  Already in the original
   6.254 -  article by Gerhard Gentzen \cite{Gentzen:1935} there is a hint that
   6.255 -  each connective can be characterized by its introductions, and the
   6.256 -  elimination can be constructed systematically. *}
   6.257 -
   6.258 -
   6.259 -section {* Recursive functions \label{sec:recursion} *}
   6.260 -
   6.261 -text {*
   6.262 -  \begin{matharray}{rcl}
   6.263 -    @{command_def (HOL) "primrec"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   6.264 -    @{command_def (HOL) "fun"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   6.265 -    @{command_def (HOL) "function"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
   6.266 -    @{command_def (HOL) "termination"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
   6.267 -    @{command_def (HOL) "fun_cases"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   6.268 -  \end{matharray}
   6.269 -
   6.270 -  @{rail \<open>
   6.271 -    @@{command (HOL) primrec} @{syntax target}? @{syntax "fixes"} @'where' equations
   6.272 -    ;
   6.273 -    (@@{command (HOL) fun} | @@{command (HOL) function}) @{syntax target}? functionopts?
   6.274 -      @{syntax "fixes"} \<newline> @'where' equations
   6.275 -    ;
   6.276 -
   6.277 -    equations: (@{syntax thmdecl}? @{syntax prop} + '|')
   6.278 -    ;
   6.279 -    functionopts: '(' (('sequential' | 'domintros') + ',') ')'
   6.280 -    ;
   6.281 -    @@{command (HOL) termination} @{syntax term}?
   6.282 -    ;
   6.283 -    @@{command (HOL) fun_cases} (@{syntax thmdecl}? @{syntax prop} + @'and')
   6.284 -  \<close>}
   6.285 -
   6.286 -  \begin{description}
   6.287 -
   6.288 -  \item @{command (HOL) "primrec"} defines primitive recursive
   6.289 -  functions over datatypes (see also @{command_ref (HOL) datatype} and
   6.290 -  @{command_ref (HOL) rep_datatype}).  The given @{text equations}
   6.291 -  specify reduction rules that are produced by instantiating the
   6.292 -  generic combinator for primitive recursion that is available for
   6.293 -  each datatype.
   6.294 -
   6.295 -  Each equation needs to be of the form:
   6.296 -
   6.297 -  @{text [display] "f x\<^sub>1 \<dots> x\<^sub>m (C y\<^sub>1 \<dots> y\<^sub>k) z\<^sub>1 \<dots> z\<^sub>n = rhs"}
   6.298 -
   6.299 -  such that @{text C} is a datatype constructor, @{text rhs} contains
   6.300 -  only the free variables on the left-hand side (or from the context),
   6.301 -  and all recursive occurrences of @{text "f"} in @{text "rhs"} are of
   6.302 -  the form @{text "f \<dots> y\<^sub>i \<dots>"} for some @{text i}.  At most one
   6.303 -  reduction rule for each constructor can be given.  The order does
   6.304 -  not matter.  For missing constructors, the function is defined to
   6.305 -  return a default value, but this equation is made difficult to
   6.306 -  access for users.
   6.307 -
   6.308 -  The reduction rules are declared as @{attribute simp} by default,
   6.309 -  which enables standard proof methods like @{method simp} and
   6.310 -  @{method auto} to normalize expressions of @{text "f"} applied to
   6.311 -  datatype constructions, by simulating symbolic computation via
   6.312 -  rewriting.
   6.313 -
   6.314 -  \item @{command (HOL) "function"} defines functions by general
   6.315 -  wellfounded recursion. A detailed description with examples can be
   6.316 -  found in \cite{isabelle-function}. The function is specified by a
   6.317 -  set of (possibly conditional) recursive equations with arbitrary
   6.318 -  pattern matching. The command generates proof obligations for the
   6.319 -  completeness and the compatibility of patterns.
   6.320 -
   6.321 -  The defined function is considered partial, and the resulting
   6.322 -  simplification rules (named @{text "f.psimps"}) and induction rule
   6.323 -  (named @{text "f.pinduct"}) are guarded by a generated domain
   6.324 -  predicate @{text "f_dom"}. The @{command (HOL) "termination"}
   6.325 -  command can then be used to establish that the function is total.
   6.326 -
   6.327 -  \item @{command (HOL) "fun"} is a shorthand notation for ``@{command
   6.328 -  (HOL) "function"}~@{text "(sequential)"}, followed by automated
   6.329 -  proof attempts regarding pattern matching and termination.  See
   6.330 -  \cite{isabelle-function} for further details.
   6.331 -
   6.332 -  \item @{command (HOL) "termination"}~@{text f} commences a
   6.333 -  termination proof for the previously defined function @{text f}.  If
   6.334 -  this is omitted, the command refers to the most recent function
   6.335 -  definition.  After the proof is closed, the recursive equations and
   6.336 -  the induction principle is established.
   6.337 -
   6.338 -  \item @{command (HOL) "fun_cases"} generates specialized elimination
   6.339 -  rules for function equations. It expects one or more function equations
   6.340 -  and produces rules that eliminate the given equalities, following the cases
   6.341 -  given in the function definition.
   6.342 -  \end{description}
   6.343 -
   6.344 -  Recursive definitions introduced by the @{command (HOL) "function"}
   6.345 -  command accommodate reasoning by induction (cf.\ @{method induct}):
   6.346 -  rule @{text "f.induct"} refers to a specific induction rule, with
   6.347 -  parameters named according to the user-specified equations. Cases
   6.348 -  are numbered starting from 1.  For @{command (HOL) "primrec"}, the
   6.349 -  induction principle coincides with structural recursion on the
   6.350 -  datatype where the recursion is carried out.
   6.351 -
   6.352 -  The equations provided by these packages may be referred later as
   6.353 -  theorem list @{text "f.simps"}, where @{text f} is the (collective)
   6.354 -  name of the functions defined.  Individual equations may be named
   6.355 -  explicitly as well.
   6.356 -
   6.357 -  The @{command (HOL) "function"} command accepts the following
   6.358 -  options.
   6.359 -
   6.360 -  \begin{description}
   6.361 -
   6.362 -  \item @{text sequential} enables a preprocessor which disambiguates
   6.363 -  overlapping patterns by making them mutually disjoint.  Earlier
   6.364 -  equations take precedence over later ones.  This allows to give the
   6.365 -  specification in a format very similar to functional programming.
   6.366 -  Note that the resulting simplification and induction rules
   6.367 -  correspond to the transformed specification, not the one given
   6.368 -  originally. This usually means that each equation given by the user
   6.369 -  may result in several theorems.  Also note that this automatic
   6.370 -  transformation only works for ML-style datatype patterns.
   6.371 -
   6.372 -  \item @{text domintros} enables the automated generation of
   6.373 -  introduction rules for the domain predicate. While mostly not
   6.374 -  needed, they can be helpful in some proofs about partial functions.
   6.375 -
   6.376 -  \end{description}
   6.377 -*}
   6.378 -
   6.379 -subsubsection {* Example: evaluation of expressions *}
   6.380 -
   6.381 -text {* Subsequently, we define mutual datatypes for arithmetic and
   6.382 -  boolean expressions, and use @{command primrec} for evaluation
   6.383 -  functions that follow the same recursive structure. *}
   6.384 -
   6.385 -datatype 'a aexp =
   6.386 -    IF "'a bexp"  "'a aexp"  "'a aexp"
   6.387 -  | Sum "'a aexp"  "'a aexp"
   6.388 -  | Diff "'a aexp"  "'a aexp"
   6.389 -  | Var 'a
   6.390 -  | Num nat
   6.391 -and 'a bexp =
   6.392 -    Less "'a aexp"  "'a aexp"
   6.393 -  | And "'a bexp"  "'a bexp"
   6.394 -  | Neg "'a bexp"
   6.395 -
   6.396 -
   6.397 -text {* \medskip Evaluation of arithmetic and boolean expressions *}
   6.398 -
   6.399 -primrec evala :: "('a \<Rightarrow> nat) \<Rightarrow> 'a aexp \<Rightarrow> nat"
   6.400 -  and evalb :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bexp \<Rightarrow> bool"
   6.401 -where
   6.402 -  "evala env (IF b a1 a2) = (if evalb env b then evala env a1 else evala env a2)"
   6.403 -| "evala env (Sum a1 a2) = evala env a1 + evala env a2"
   6.404 -| "evala env (Diff a1 a2) = evala env a1 - evala env a2"
   6.405 -| "evala env (Var v) = env v"
   6.406 -| "evala env (Num n) = n"
   6.407 -| "evalb env (Less a1 a2) = (evala env a1 < evala env a2)"
   6.408 -| "evalb env (And b1 b2) = (evalb env b1 \<and> evalb env b2)"
   6.409 -| "evalb env (Neg b) = (\<not> evalb env b)"
   6.410 -
   6.411 -text {* Since the value of an expression depends on the value of its
   6.412 -  variables, the functions @{const evala} and @{const evalb} take an
   6.413 -  additional parameter, an \emph{environment} that maps variables to
   6.414 -  their values.
   6.415 -
   6.416 -  \medskip Substitution on expressions can be defined similarly.  The
   6.417 -  mapping @{text f} of type @{typ "'a \<Rightarrow> 'a aexp"} given as a
   6.418 -  parameter is lifted canonically on the types @{typ "'a aexp"} and
   6.419 -  @{typ "'a bexp"}, respectively.
   6.420 -*}
   6.421 -
   6.422 -primrec substa :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a aexp \<Rightarrow> 'b aexp"
   6.423 -  and substb :: "('a \<Rightarrow> 'b aexp) \<Rightarrow> 'a bexp \<Rightarrow> 'b bexp"
   6.424 -where
   6.425 -  "substa f (IF b a1 a2) = IF (substb f b) (substa f a1) (substa f a2)"
   6.426 -| "substa f (Sum a1 a2) = Sum (substa f a1) (substa f a2)"
   6.427 -| "substa f (Diff a1 a2) = Diff (substa f a1) (substa f a2)"
   6.428 -| "substa f (Var v) = f v"
   6.429 -| "substa f (Num n) = Num n"
   6.430 -| "substb f (Less a1 a2) = Less (substa f a1) (substa f a2)"
   6.431 -| "substb f (And b1 b2) = And (substb f b1) (substb f b2)"
   6.432 -| "substb f (Neg b) = Neg (substb f b)"
   6.433 -
   6.434 -text {* In textbooks about semantics one often finds substitution
   6.435 -  theorems, which express the relationship between substitution and
   6.436 -  evaluation.  For @{typ "'a aexp"} and @{typ "'a bexp"}, we can prove
   6.437 -  such a theorem by mutual induction, followed by simplification.
   6.438 -*}
   6.439 -
   6.440 -lemma subst_one:
   6.441 -  "evala env (substa (Var (v := a')) a) = evala (env (v := evala env a')) a"
   6.442 -  "evalb env (substb (Var (v := a')) b) = evalb (env (v := evala env a')) b"
   6.443 -  by (induct a and b) simp_all
   6.444 -
   6.445 -lemma subst_all:
   6.446 -  "evala env (substa s a) = evala (\<lambda>x. evala env (s x)) a"
   6.447 -  "evalb env (substb s b) = evalb (\<lambda>x. evala env (s x)) b"
   6.448 -  by (induct a and b) simp_all
   6.449 -
   6.450 -
   6.451 -subsubsection {* Example: a substitution function for terms *}
   6.452 -
   6.453 -text {* Functions on datatypes with nested recursion are also defined
   6.454 -  by mutual primitive recursion. *}
   6.455 -
   6.456 -datatype ('a, 'b) "term" = Var 'a | App 'b "('a, 'b) term list"
   6.457 -
   6.458 -text {* A substitution function on type @{typ "('a, 'b) term"} can be
   6.459 -  defined as follows, by working simultaneously on @{typ "('a, 'b)
   6.460 -  term list"}: *}
   6.461 -
   6.462 -primrec subst_term :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term \<Rightarrow> ('a, 'b) term" and
   6.463 -  subst_term_list :: "('a \<Rightarrow> ('a, 'b) term) \<Rightarrow> ('a, 'b) term list \<Rightarrow> ('a, 'b) term list"
   6.464 -where
   6.465 -  "subst_term f (Var a) = f a"
   6.466 -| "subst_term f (App b ts) = App b (subst_term_list f ts)"
   6.467 -| "subst_term_list f [] = []"
   6.468 -| "subst_term_list f (t # ts) = subst_term f t # subst_term_list f ts"
   6.469 -
   6.470 -text {* The recursion scheme follows the structure of the unfolded
   6.471 -  definition of type @{typ "('a, 'b) term"}.  To prove properties of this
   6.472 -  substitution function, mutual induction is needed:
   6.473 -*}
   6.474 -
   6.475 -lemma "subst_term (subst_term f1 \<circ> f2) t = subst_term f1 (subst_term f2 t)" and
   6.476 -  "subst_term_list (subst_term f1 \<circ> f2) ts = subst_term_list f1 (subst_term_list f2 ts)"
   6.477 -  by (induct t and ts) simp_all
   6.478 -
   6.479 -
   6.480 -subsubsection {* Example: a map function for infinitely branching trees *}
   6.481 -
   6.482 -text {* Defining functions on infinitely branching datatypes by
   6.483 -  primitive recursion is just as easy.
   6.484 -*}
   6.485 -
   6.486 -datatype 'a tree = Atom 'a | Branch "nat \<Rightarrow> 'a tree"
   6.487 -
   6.488 -primrec map_tree :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a tree \<Rightarrow> 'b tree"
   6.489 -where
   6.490 -  "map_tree f (Atom a) = Atom (f a)"
   6.491 -| "map_tree f (Branch ts) = Branch (\<lambda>x. map_tree f (ts x))"
   6.492 -
   6.493 -text {* Note that all occurrences of functions such as @{text ts}
   6.494 -  above must be applied to an argument.  In particular, @{term
   6.495 -  "map_tree f \<circ> ts"} is not allowed here. *}
   6.496 -
   6.497 -text {* Here is a simple composition lemma for @{term map_tree}: *}
   6.498 -
   6.499 -lemma "map_tree g (map_tree f t) = map_tree (g \<circ> f) t"
   6.500 -  by (induct t) simp_all
   6.501 -
   6.502 -
   6.503 -subsection {* Proof methods related to recursive definitions *}
   6.504 -
   6.505 -text {*
   6.506 -  \begin{matharray}{rcl}
   6.507 -    @{method_def (HOL) pat_completeness} & : & @{text method} \\
   6.508 -    @{method_def (HOL) relation} & : & @{text method} \\
   6.509 -    @{method_def (HOL) lexicographic_order} & : & @{text method} \\
   6.510 -    @{method_def (HOL) size_change} & : & @{text method} \\
   6.511 -    @{method_def (HOL) induction_schema} & : & @{text method} \\
   6.512 -  \end{matharray}
   6.513 -
   6.514 -  @{rail \<open>
   6.515 -    @@{method (HOL) relation} @{syntax term}
   6.516 -    ;
   6.517 -    @@{method (HOL) lexicographic_order} (@{syntax clasimpmod} * )
   6.518 -    ;
   6.519 -    @@{method (HOL) size_change} ( orders (@{syntax clasimpmod} * ) )
   6.520 -    ;
   6.521 -    @@{method (HOL) induction_schema}
   6.522 -    ;
   6.523 -    orders: ( 'max' | 'min' | 'ms' ) *
   6.524 -  \<close>}
   6.525 -
   6.526 -  \begin{description}
   6.527 -
   6.528 -  \item @{method (HOL) pat_completeness} is a specialized method to
   6.529 -  solve goals regarding the completeness of pattern matching, as
   6.530 -  required by the @{command (HOL) "function"} package (cf.\
   6.531 -  \cite{isabelle-function}).
   6.532 -
   6.533 -  \item @{method (HOL) relation}~@{text R} introduces a termination
   6.534 -  proof using the relation @{text R}.  The resulting proof state will
   6.535 -  contain goals expressing that @{text R} is wellfounded, and that the
   6.536 -  arguments of recursive calls decrease with respect to @{text R}.
   6.537 -  Usually, this method is used as the initial proof step of manual
   6.538 -  termination proofs.
   6.539 -
   6.540 -  \item @{method (HOL) "lexicographic_order"} attempts a fully
   6.541 -  automated termination proof by searching for a lexicographic
   6.542 -  combination of size measures on the arguments of the function. The
   6.543 -  method accepts the same arguments as the @{method auto} method,
   6.544 -  which it uses internally to prove local descents.  The @{syntax
   6.545 -  clasimpmod} modifiers are accepted (as for @{method auto}).
   6.546 -
   6.547 -  In case of failure, extensive information is printed, which can help
   6.548 -  to analyse the situation (cf.\ \cite{isabelle-function}).
   6.549 -
   6.550 -  \item @{method (HOL) "size_change"} also works on termination goals,
   6.551 -  using a variation of the size-change principle, together with a
   6.552 -  graph decomposition technique (see \cite{krauss_phd} for details).
   6.553 -  Three kinds of orders are used internally: @{text max}, @{text min},
   6.554 -  and @{text ms} (multiset), which is only available when the theory
   6.555 -  @{text Multiset} is loaded. When no order kinds are given, they are
   6.556 -  tried in order. The search for a termination proof uses SAT solving
   6.557 -  internally.
   6.558 -
   6.559 -  For local descent proofs, the @{syntax clasimpmod} modifiers are
   6.560 -  accepted (as for @{method auto}).
   6.561 -
   6.562 -  \item @{method (HOL) induction_schema} derives user-specified
   6.563 -  induction rules from well-founded induction and completeness of
   6.564 -  patterns. This factors out some operations that are done internally
   6.565 -  by the function package and makes them available separately. See
   6.566 -  @{file "~~/src/HOL/ex/Induction_Schema.thy"} for examples.
   6.567 -
   6.568 -  \end{description}
   6.569 -*}
   6.570 -
   6.571 -
   6.572 -subsection {* Functions with explicit partiality *}
   6.573 -
   6.574 -text {*
   6.575 -  \begin{matharray}{rcl}
   6.576 -    @{command_def (HOL) "partial_function"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
   6.577 -    @{attribute_def (HOL) "partial_function_mono"} & : & @{text attribute} \\
   6.578 -  \end{matharray}
   6.579 -
   6.580 -  @{rail \<open>
   6.581 -    @@{command (HOL) partial_function} @{syntax target}?
   6.582 -      '(' @{syntax nameref} ')' @{syntax "fixes"} \<newline>
   6.583 -      @'where' @{syntax thmdecl}? @{syntax prop}
   6.584 -  \<close>}
   6.585 -
   6.586 -  \begin{description}
   6.587 -
   6.588 -  \item @{command (HOL) "partial_function"}~@{text "(mode)"} defines
   6.589 -  recursive functions based on fixpoints in complete partial
   6.590 -  orders. No termination proof is required from the user or
   6.591 -  constructed internally. Instead, the possibility of non-termination
   6.592 -  is modelled explicitly in the result type, which contains an
   6.593 -  explicit bottom element.
   6.594 -
   6.595 -  Pattern matching and mutual recursion are currently not supported.
   6.596 -  Thus, the specification consists of a single function described by a
   6.597 -  single recursive equation.
   6.598 -
   6.599 -  There are no fixed syntactic restrictions on the body of the
   6.600 -  function, but the induced functional must be provably monotonic
   6.601 -  wrt.\ the underlying order.  The monotonicity proof is performed
   6.602 -  internally, and the definition is rejected when it fails. The proof
   6.603 -  can be influenced by declaring hints using the
   6.604 -  @{attribute (HOL) partial_function_mono} attribute.
   6.605 -
   6.606 -  The mandatory @{text mode} argument specifies the mode of operation
   6.607 -  of the command, which directly corresponds to a complete partial
   6.608 -  order on the result type. By default, the following modes are
   6.609 -  defined:
   6.610 -
   6.611 -  \begin{description}
   6.612 -
   6.613 -  \item @{text option} defines functions that map into the @{type
   6.614 -  option} type. Here, the value @{term None} is used to model a
   6.615 -  non-terminating computation. Monotonicity requires that if @{term
   6.616 -  None} is returned by a recursive call, then the overall result must
   6.617 -  also be @{term None}. This is best achieved through the use of the
   6.618 -  monadic operator @{const "Option.bind"}.
   6.619 -
   6.620 -  \item @{text tailrec} defines functions with an arbitrary result
   6.621 -  type and uses the slightly degenerated partial order where @{term
   6.622 -  "undefined"} is the bottom element.  Now, monotonicity requires that
   6.623 -  if @{term undefined} is returned by a recursive call, then the
   6.624 -  overall result must also be @{term undefined}. In practice, this is
   6.625 -  only satisfied when each recursive call is a tail call, whose result
   6.626 -  is directly returned. Thus, this mode of operation allows the
   6.627 -  definition of arbitrary tail-recursive functions.
   6.628 -
   6.629 -  \end{description}
   6.630 -
   6.631 -  Experienced users may define new modes by instantiating the locale
   6.632 -  @{const "partial_function_definitions"} appropriately.
   6.633 -
   6.634 -  \item @{attribute (HOL) partial_function_mono} declares rules for
   6.635 -  use in the internal monotonicity proofs of partial function
   6.636 -  definitions.
   6.637 -
   6.638 -  \end{description}
   6.639 -
   6.640 -*}
   6.641 -
   6.642 -
   6.643 -subsection {* Old-style recursive function definitions (TFL) *}
   6.644 -
   6.645 -text {*
   6.646 -  \begin{matharray}{rcl}
   6.647 -    @{command_def (HOL) "recdef"} & : & @{text "theory \<rightarrow> theory)"} \\
   6.648 -    @{command_def (HOL) "recdef_tc"}@{text "\<^sup>*"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
   6.649 -  \end{matharray}
   6.650 -
   6.651 -  The old TFL commands @{command (HOL) "recdef"} and @{command (HOL)
   6.652 -  "recdef_tc"} for defining recursive are mostly obsolete; @{command
   6.653 -  (HOL) "function"} or @{command (HOL) "fun"} should be used instead.
   6.654 -
   6.655 -  @{rail \<open>
   6.656 -    @@{command (HOL) recdef} ('(' @'permissive' ')')? \<newline>
   6.657 -      @{syntax name} @{syntax term} (@{syntax prop} +) hints?
   6.658 -    ;
   6.659 -    recdeftc @{syntax thmdecl}? tc
   6.660 -    ;
   6.661 -    hints: '(' @'hints' ( recdefmod * ) ')'
   6.662 -    ;
   6.663 -    recdefmod: (('recdef_simp' | 'recdef_cong' | 'recdef_wf')
   6.664 -      (() | 'add' | 'del') ':' @{syntax thmrefs}) | @{syntax clasimpmod}
   6.665 -    ;
   6.666 -    tc: @{syntax nameref} ('(' @{syntax nat} ')')?
   6.667 -  \<close>}
   6.668 -
   6.669 -  \begin{description}
   6.670 -
   6.671 -  \item @{command (HOL) "recdef"} defines general well-founded
   6.672 -  recursive functions (using the TFL package), see also
   6.673 -  \cite{isabelle-HOL}.  The ``@{text "(permissive)"}'' option tells
   6.674 -  TFL to recover from failed proof attempts, returning unfinished
   6.675 -  results.  The @{text recdef_simp}, @{text recdef_cong}, and @{text
   6.676 -  recdef_wf} hints refer to auxiliary rules to be used in the internal
   6.677 -  automated proof process of TFL.  Additional @{syntax clasimpmod}
   6.678 -  declarations may be given to tune the context of the Simplifier
   6.679 -  (cf.\ \secref{sec:simplifier}) and Classical reasoner (cf.\
   6.680 -  \secref{sec:classical}).
   6.681 -
   6.682 -  \item @{command (HOL) "recdef_tc"}~@{text "c (i)"} recommences the
   6.683 -  proof for leftover termination condition number @{text i} (default
   6.684 -  1) as generated by a @{command (HOL) "recdef"} definition of
   6.685 -  constant @{text c}.
   6.686 -
   6.687 -  Note that in most cases, @{command (HOL) "recdef"} is able to finish
   6.688 -  its internal proofs without manual intervention.
   6.689 -
   6.690 -  \end{description}
   6.691 -
   6.692 -  \medskip Hints for @{command (HOL) "recdef"} may be also declared
   6.693 -  globally, using the following attributes.
   6.694 -
   6.695 -  \begin{matharray}{rcl}
   6.696 -    @{attribute_def (HOL) recdef_simp} & : & @{text attribute} \\
   6.697 -    @{attribute_def (HOL) recdef_cong} & : & @{text attribute} \\
   6.698 -    @{attribute_def (HOL) recdef_wf} & : & @{text attribute} \\
   6.699 -  \end{matharray}
   6.700 -
   6.701 -  @{rail \<open>
   6.702 -    (@@{attribute (HOL) recdef_simp} | @@{attribute (HOL) recdef_cong} |
   6.703 -      @@{attribute (HOL) recdef_wf}) (() | 'add' | 'del')
   6.704 -  \<close>}
   6.705 -*}
   6.706 -
   6.707 -
   6.708 -section {* Datatypes \label{sec:hol-datatype} *}
   6.709 -
   6.710 -text {*
   6.711 -  \begin{matharray}{rcl}
   6.712 -    @{command_def (HOL) "datatype"} & : & @{text "theory \<rightarrow> theory"} \\
   6.713 -    @{command_def (HOL) "rep_datatype"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
   6.714 -  \end{matharray}
   6.715 -
   6.716 -  @{rail \<open>
   6.717 -    @@{command (HOL) datatype} (spec + @'and')
   6.718 -    ;
   6.719 -    @@{command (HOL) rep_datatype} ('(' (@{syntax name} +) ')')? (@{syntax term} +)
   6.720 -    ;
   6.721 -
   6.722 -    spec: @{syntax typespec_sorts} @{syntax mixfix}? '=' (cons + '|')
   6.723 -    ;
   6.724 -    cons: @{syntax name} (@{syntax type} * ) @{syntax mixfix}?
   6.725 -  \<close>}
   6.726 -
   6.727 -  \begin{description}
   6.728 -
   6.729 -  \item @{command (HOL) "datatype"} defines inductive datatypes in
   6.730 -  HOL.
   6.731 -
   6.732 -  \item @{command (HOL) "rep_datatype"} represents existing types as
   6.733 -  datatypes.
   6.734 -
   6.735 -  For foundational reasons, some basic types such as @{typ nat}, @{typ
   6.736 -  "'a \<times> 'b"}, @{typ "'a + 'b"}, @{typ bool} and @{typ unit} are
   6.737 -  introduced by more primitive means using @{command_ref typedef}.  To
   6.738 -  recover the rich infrastructure of @{command datatype} (e.g.\ rules
   6.739 -  for @{method cases} and @{method induct} and the primitive recursion
   6.740 -  combinators), such types may be represented as actual datatypes
   6.741 -  later.  This is done by specifying the constructors of the desired
   6.742 -  type, and giving a proof of the induction rule, distinctness and
   6.743 -  injectivity of constructors.
   6.744 -
   6.745 -  For example, see @{file "~~/src/HOL/Sum_Type.thy"} for the
   6.746 -  representation of the primitive sum type as fully-featured datatype.
   6.747 -
   6.748 -  \end{description}
   6.749 -
   6.750 -  The generated rules for @{method induct} and @{method cases} provide
   6.751 -  case names according to the given constructors, while parameters are
   6.752 -  named after the types (see also \secref{sec:cases-induct}).
   6.753 -
   6.754 -  See \cite{isabelle-HOL} for more details on datatypes, but beware of
   6.755 -  the old-style theory syntax being used there!  Apart from proper
   6.756 -  proof methods for case-analysis and induction, there are also
   6.757 -  emulations of ML tactics @{method (HOL) case_tac} and @{method (HOL)
   6.758 -  induct_tac} available, see \secref{sec:hol-induct-tac}; these admit
   6.759 -  to refer directly to the internal structure of subgoals (including
   6.760 -  internally bound parameters).
   6.761 -*}
   6.762 -
   6.763 -
   6.764 -subsubsection {* Examples *}
   6.765 -
   6.766 -text {* We define a type of finite sequences, with slightly different
   6.767 -  names than the existing @{typ "'a list"} that is already in @{theory
   6.768 -  Main}: *}
   6.769 -
   6.770 -datatype 'a seq = Empty | Seq 'a "'a seq"
   6.771 -
   6.772 -text {* We can now prove some simple lemma by structural induction: *}
   6.773 -
   6.774 -lemma "Seq x xs \<noteq> xs"
   6.775 -proof (induct xs arbitrary: x)
   6.776 -  case Empty
   6.777 -  txt {* This case can be proved using the simplifier: the freeness
   6.778 -    properties of the datatype are already declared as @{attribute
   6.779 -    simp} rules. *}
   6.780 -  show "Seq x Empty \<noteq> Empty"
   6.781 -    by simp
   6.782 -next
   6.783 -  case (Seq y ys)
   6.784 -  txt {* The step case is proved similarly. *}
   6.785 -  show "Seq x (Seq y ys) \<noteq> Seq y ys"
   6.786 -    using `Seq y ys \<noteq> ys` by simp
   6.787 -qed
   6.788 -
   6.789 -text {* Here is a more succinct version of the same proof: *}
   6.790 -
   6.791 -lemma "Seq x xs \<noteq> xs"
   6.792 -  by (induct xs arbitrary: x) simp_all
   6.793 -
   6.794 -
   6.795 -section {* Records \label{sec:hol-record} *}
   6.796 -
   6.797 -text {*
   6.798 -  In principle, records merely generalize the concept of tuples, where
   6.799 -  components may be addressed by labels instead of just position.  The
   6.800 -  logical infrastructure of records in Isabelle/HOL is slightly more
   6.801 -  advanced, though, supporting truly extensible record schemes.  This
   6.802 -  admits operations that are polymorphic with respect to record
   6.803 -  extension, yielding ``object-oriented'' effects like (single)
   6.804 -  inheritance.  See also \cite{NaraschewskiW-TPHOLs98} for more
   6.805 -  details on object-oriented verification and record subtyping in HOL.
   6.806 -*}
   6.807 -
   6.808 -
   6.809 -subsection {* Basic concepts *}
   6.810 -
   6.811 -text {*
   6.812 -  Isabelle/HOL supports both \emph{fixed} and \emph{schematic} records
   6.813 -  at the level of terms and types.  The notation is as follows:
   6.814 -
   6.815 -  \begin{center}
   6.816 -  \begin{tabular}{l|l|l}
   6.817 -    & record terms & record types \\ \hline
   6.818 -    fixed & @{text "\<lparr>x = a, y = b\<rparr>"} & @{text "\<lparr>x :: A, y :: B\<rparr>"} \\
   6.819 -    schematic & @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} &
   6.820 -      @{text "\<lparr>x :: A, y :: B, \<dots> :: M\<rparr>"} \\
   6.821 -  \end{tabular}
   6.822 -  \end{center}
   6.823 -
   6.824 -  \noindent The ASCII representation of @{text "\<lparr>x = a\<rparr>"} is @{text
   6.825 -  "(| x = a |)"}.
   6.826 -
   6.827 -  A fixed record @{text "\<lparr>x = a, y = b\<rparr>"} has field @{text x} of value
   6.828 -  @{text a} and field @{text y} of value @{text b}.  The corresponding
   6.829 -  type is @{text "\<lparr>x :: A, y :: B\<rparr>"}, assuming that @{text "a :: A"}
   6.830 -  and @{text "b :: B"}.
   6.831 -
   6.832 -  A record scheme like @{text "\<lparr>x = a, y = b, \<dots> = m\<rparr>"} contains fields
   6.833 -  @{text x} and @{text y} as before, but also possibly further fields
   6.834 -  as indicated by the ``@{text "\<dots>"}'' notation (which is actually part
   6.835 -  of the syntax).  The improper field ``@{text "\<dots>"}'' of a record
   6.836 -  scheme is called the \emph{more part}.  Logically it is just a free
   6.837 -  variable, which is occasionally referred to as ``row variable'' in
   6.838 -  the literature.  The more part of a record scheme may be
   6.839 -  instantiated by zero or more further components.  For example, the
   6.840 -  previous scheme may get instantiated to @{text "\<lparr>x = a, y = b, z =
   6.841 -  c, \<dots> = m'\<rparr>"}, where @{text m'} refers to a different more part.
   6.842 -  Fixed records are special instances of record schemes, where
   6.843 -  ``@{text "\<dots>"}'' is properly terminated by the @{text "() :: unit"}
   6.844 -  element.  In fact, @{text "\<lparr>x = a, y = b\<rparr>"} is just an abbreviation
   6.845 -  for @{text "\<lparr>x = a, y = b, \<dots> = ()\<rparr>"}.
   6.846 -
   6.847 -  \medskip Two key observations make extensible records in a simply
   6.848 -  typed language like HOL work out:
   6.849 -
   6.850 -  \begin{enumerate}
   6.851 -
   6.852 -  \item the more part is internalized, as a free term or type
   6.853 -  variable,
   6.854 -
   6.855 -  \item field names are externalized, they cannot be accessed within
   6.856 -  the logic as first-class values.
   6.857 -
   6.858 -  \end{enumerate}
   6.859 -
   6.860 -  \medskip In Isabelle/HOL record types have to be defined explicitly,
   6.861 -  fixing their field names and types, and their (optional) parent
   6.862 -  record.  Afterwards, records may be formed using above syntax, while
   6.863 -  obeying the canonical order of fields as given by their declaration.
   6.864 -  The record package provides several standard operations like
   6.865 -  selectors and updates.  The common setup for various generic proof
   6.866 -  tools enable succinct reasoning patterns.  See also the Isabelle/HOL
   6.867 -  tutorial \cite{isabelle-hol-book} for further instructions on using
   6.868 -  records in practice.
   6.869 -*}
   6.870 -
   6.871 -
   6.872 -subsection {* Record specifications *}
   6.873 -
   6.874 -text {*
   6.875 -  \begin{matharray}{rcl}
   6.876 -    @{command_def (HOL) "record"} & : & @{text "theory \<rightarrow> theory"} \\
   6.877 -  \end{matharray}
   6.878 -
   6.879 -  @{rail \<open>
   6.880 -    @@{command (HOL) record} @{syntax typespec_sorts} '=' \<newline>
   6.881 -      (@{syntax type} '+')? (constdecl +)
   6.882 -    ;
   6.883 -    constdecl: @{syntax name} '::' @{syntax type} @{syntax mixfix}?
   6.884 -  \<close>}
   6.885 -
   6.886 -  \begin{description}
   6.887 -
   6.888 -  \item @{command (HOL) "record"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t = \<tau> + c\<^sub>1 :: \<sigma>\<^sub>1
   6.889 -  \<dots> c\<^sub>n :: \<sigma>\<^sub>n"} defines extensible record type @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"},
   6.890 -  derived from the optional parent record @{text "\<tau>"} by adding new
   6.891 -  field components @{text "c\<^sub>i :: \<sigma>\<^sub>i"} etc.
   6.892 -
   6.893 -  The type variables of @{text "\<tau>"} and @{text "\<sigma>\<^sub>i"} need to be
   6.894 -  covered by the (distinct) parameters @{text "\<alpha>\<^sub>1, \<dots>,
   6.895 -  \<alpha>\<^sub>m"}.  Type constructor @{text t} has to be new, while @{text
   6.896 -  \<tau>} needs to specify an instance of an existing record type.  At
   6.897 -  least one new field @{text "c\<^sub>i"} has to be specified.
   6.898 -  Basically, field names need to belong to a unique record.  This is
   6.899 -  not a real restriction in practice, since fields are qualified by
   6.900 -  the record name internally.
   6.901 -
   6.902 -  The parent record specification @{text \<tau>} is optional; if omitted
   6.903 -  @{text t} becomes a root record.  The hierarchy of all records
   6.904 -  declared within a theory context forms a forest structure, i.e.\ a
   6.905 -  set of trees starting with a root record each.  There is no way to
   6.906 -  merge multiple parent records!
   6.907 -
   6.908 -  For convenience, @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} is made a
   6.909 -  type abbreviation for the fixed record type @{text "\<lparr>c\<^sub>1 ::
   6.910 -  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n\<rparr>"}, likewise is @{text
   6.911 -  "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m, \<zeta>) t_scheme"} made an abbreviation for
   6.912 -  @{text "\<lparr>c\<^sub>1 :: \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n, \<dots> ::
   6.913 -  \<zeta>\<rparr>"}.
   6.914 -
   6.915 -  \end{description}
   6.916 -*}
   6.917 -
   6.918 -
   6.919 -subsection {* Record operations *}
   6.920 -
   6.921 -text {*
   6.922 -  Any record definition of the form presented above produces certain
   6.923 -  standard operations.  Selectors and updates are provided for any
   6.924 -  field, including the improper one ``@{text more}''.  There are also
   6.925 -  cumulative record constructor functions.  To simplify the
   6.926 -  presentation below, we assume for now that @{text "(\<alpha>\<^sub>1, \<dots>,
   6.927 -  \<alpha>\<^sub>m) t"} is a root record with fields @{text "c\<^sub>1 ::
   6.928 -  \<sigma>\<^sub>1, \<dots>, c\<^sub>n :: \<sigma>\<^sub>n"}.
   6.929 -
   6.930 -  \medskip \textbf{Selectors} and \textbf{updates} are available for
   6.931 -  any field (including ``@{text more}''):
   6.932 -
   6.933 -  \begin{matharray}{lll}
   6.934 -    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
   6.935 -    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
   6.936 -  \end{matharray}
   6.937 -
   6.938 -  There is special syntax for application of updates: @{text "r\<lparr>x :=
   6.939 -  a\<rparr>"} abbreviates term @{text "x_update a r"}.  Further notation for
   6.940 -  repeated updates is also available: @{text "r\<lparr>x := a\<rparr>\<lparr>y := b\<rparr>\<lparr>z :=
   6.941 -  c\<rparr>"} may be written @{text "r\<lparr>x := a, y := b, z := c\<rparr>"}.  Note that
   6.942 -  because of postfix notation the order of fields shown here is
   6.943 -  reverse than in the actual term.  Since repeated updates are just
   6.944 -  function applications, fields may be freely permuted in @{text "\<lparr>x
   6.945 -  := a, y := b, z := c\<rparr>"}, as far as logical equality is concerned.
   6.946 -  Thus commutativity of independent updates can be proven within the
   6.947 -  logic for any two fields, but not as a general theorem.
   6.948 -
   6.949 -  \medskip The \textbf{make} operation provides a cumulative record
   6.950 -  constructor function:
   6.951 -
   6.952 -  \begin{matharray}{lll}
   6.953 -    @{text "t.make"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   6.954 -  \end{matharray}
   6.955 -
   6.956 -  \medskip We now reconsider the case of non-root records, which are
   6.957 -  derived of some parent.  In general, the latter may depend on
   6.958 -  another parent as well, resulting in a list of \emph{ancestor
   6.959 -  records}.  Appending the lists of fields of all ancestors results in
   6.960 -  a certain field prefix.  The record package automatically takes care
   6.961 -  of this by lifting operations over this context of ancestor fields.
   6.962 -  Assuming that @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>m) t"} has ancestor
   6.963 -  fields @{text "b\<^sub>1 :: \<rho>\<^sub>1, \<dots>, b\<^sub>k :: \<rho>\<^sub>k"},
   6.964 -  the above record operations will get the following types:
   6.965 -
   6.966 -  \medskip
   6.967 -  \begin{tabular}{lll}
   6.968 -    @{text "c\<^sub>i"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<sigma>\<^sub>i"} \\
   6.969 -    @{text "c\<^sub>i_update"} & @{text "::"} & @{text "\<sigma>\<^sub>i \<Rightarrow>
   6.970 -      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow>
   6.971 -      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
   6.972 -    @{text "t.make"} & @{text "::"} & @{text "\<rho>\<^sub>1 \<Rightarrow> \<dots> \<rho>\<^sub>k \<Rightarrow> \<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow>
   6.973 -      \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   6.974 -  \end{tabular}
   6.975 -  \medskip
   6.976 -
   6.977 -  \noindent Some further operations address the extension aspect of a
   6.978 -  derived record scheme specifically: @{text "t.fields"} produces a
   6.979 -  record fragment consisting of exactly the new fields introduced here
   6.980 -  (the result may serve as a more part elsewhere); @{text "t.extend"}
   6.981 -  takes a fixed record and adds a given more part; @{text
   6.982 -  "t.truncate"} restricts a record scheme to a fixed record.
   6.983 -
   6.984 -  \medskip
   6.985 -  \begin{tabular}{lll}
   6.986 -    @{text "t.fields"} & @{text "::"} & @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>n \<Rightarrow> \<lparr>\<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   6.987 -    @{text "t.extend"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr> \<Rightarrow>
   6.988 -      \<zeta> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr>"} \\
   6.989 -    @{text "t.truncate"} & @{text "::"} & @{text "\<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>, \<dots> :: \<zeta>\<rparr> \<Rightarrow> \<lparr>\<^vec>b :: \<^vec>\<rho>, \<^vec>c :: \<^vec>\<sigma>\<rparr>"} \\
   6.990 -  \end{tabular}
   6.991 -  \medskip
   6.992 -
   6.993 -  \noindent Note that @{text "t.make"} and @{text "t.fields"} coincide
   6.994 -  for root records.
   6.995 -*}
   6.996 -
   6.997 -
   6.998 -subsection {* Derived rules and proof tools *}
   6.999 -
  6.1000 -text {*
  6.1001 -  The record package proves several results internally, declaring
  6.1002 -  these facts to appropriate proof tools.  This enables users to
  6.1003 -  reason about record structures quite conveniently.  Assume that
  6.1004 -  @{text t} is a record type as specified above.
  6.1005 -
  6.1006 -  \begin{enumerate}
  6.1007 -
  6.1008 -  \item Standard conversions for selectors or updates applied to
  6.1009 -  record constructor terms are made part of the default Simplifier
  6.1010 -  context; thus proofs by reduction of basic operations merely require
  6.1011 -  the @{method simp} method without further arguments.  These rules
  6.1012 -  are available as @{text "t.simps"}, too.
  6.1013 -
  6.1014 -  \item Selectors applied to updated records are automatically reduced
  6.1015 -  by an internal simplification procedure, which is also part of the
  6.1016 -  standard Simplifier setup.
  6.1017 -
  6.1018 -  \item Inject equations of a form analogous to @{prop "(x, y) = (x',
  6.1019 -  y') \<equiv> x = x' \<and> y = y'"} are declared to the Simplifier and Classical
  6.1020 -  Reasoner as @{attribute iff} rules.  These rules are available as
  6.1021 -  @{text "t.iffs"}.
  6.1022 -
  6.1023 -  \item The introduction rule for record equality analogous to @{text
  6.1024 -  "x r = x r' \<Longrightarrow> y r = y r' \<dots> \<Longrightarrow> r = r'"} is declared to the Simplifier,
  6.1025 -  and as the basic rule context as ``@{attribute intro}@{text "?"}''.
  6.1026 -  The rule is called @{text "t.equality"}.
  6.1027 -
  6.1028 -  \item Representations of arbitrary record expressions as canonical
  6.1029 -  constructor terms are provided both in @{method cases} and @{method
  6.1030 -  induct} format (cf.\ the generic proof methods of the same name,
  6.1031 -  \secref{sec:cases-induct}).  Several variations are available, for
  6.1032 -  fixed records, record schemes, more parts etc.
  6.1033 -
  6.1034 -  The generic proof methods are sufficiently smart to pick the most
  6.1035 -  sensible rule according to the type of the indicated record
  6.1036 -  expression: users just need to apply something like ``@{text "(cases
  6.1037 -  r)"}'' to a certain proof problem.
  6.1038 -
  6.1039 -  \item The derived record operations @{text "t.make"}, @{text
  6.1040 -  "t.fields"}, @{text "t.extend"}, @{text "t.truncate"} are \emph{not}
  6.1041 -  treated automatically, but usually need to be expanded by hand,
  6.1042 -  using the collective fact @{text "t.defs"}.
  6.1043 -
  6.1044 -  \end{enumerate}
  6.1045 -*}
  6.1046 -
  6.1047 -
  6.1048 -subsubsection {* Examples *}
  6.1049 -
  6.1050 -text {* See @{file "~~/src/HOL/ex/Records.thy"}, for example. *}
  6.1051 -
  6.1052 -section {* Typedef axiomatization \label{sec:hol-typedef} *}
  6.1053 -
  6.1054 -text {*
  6.1055 -  \begin{matharray}{rcl}
  6.1056 -    @{command_def (HOL) "typedef"} & : & @{text "local_theory \<rightarrow> proof(prove)"} \\
  6.1057 -  \end{matharray}
  6.1058 -
  6.1059 -  A Gordon/HOL-style type definition is a certain axiom scheme that
  6.1060 -  identifies a new type with a subset of an existing type.  More
  6.1061 -  precisely, the new type is defined by exhibiting an existing type
  6.1062 -  @{text \<tau>}, a set @{text "A :: \<tau> set"}, and a theorem that proves
  6.1063 -  @{prop "\<exists>x. x \<in> A"}.  Thus @{text A} is a non-empty subset of @{text
  6.1064 -  \<tau>}, and the new type denotes this subset.  New functions are
  6.1065 -  postulated that establish an isomorphism between the new type and
  6.1066 -  the subset.  In general, the type @{text \<tau>} may involve type
  6.1067 -  variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} which means that the type definition
  6.1068 -  produces a type constructor @{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t"} depending on
  6.1069 -  those type arguments.
  6.1070 -
  6.1071 -  The axiomatization can be considered a ``definition'' in the sense
  6.1072 -  of the particular set-theoretic interpretation of HOL
  6.1073 -  \cite{pitts93}, where the universe of types is required to be
  6.1074 -  downwards-closed wrt.\ arbitrary non-empty subsets.  Thus genuinely
  6.1075 -  new types introduced by @{command "typedef"} stay within the range
  6.1076 -  of HOL models by construction.  Note that @{command_ref
  6.1077 -  type_synonym} from Isabelle/Pure merely introduces syntactic
  6.1078 -  abbreviations, without any logical significance.
  6.1079 -
  6.1080 -  @{rail \<open>
  6.1081 -    @@{command (HOL) typedef} abs_type '=' rep_set
  6.1082 -    ;
  6.1083 -    abs_type: @{syntax typespec_sorts} @{syntax mixfix}?
  6.1084 -    ;
  6.1085 -    rep_set: @{syntax term} (@'morphisms' @{syntax name} @{syntax name})?
  6.1086 -  \<close>}
  6.1087 -
  6.1088 -  \begin{description}
  6.1089 -
  6.1090 -  \item @{command (HOL) "typedef"}~@{text "(\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t = A"}
  6.1091 -  axiomatizes a type definition in the background theory of the
  6.1092 -  current context, depending on a non-emptiness result of the set
  6.1093 -  @{text A} that needs to be proven here.  The set @{text A} may
  6.1094 -  contain type variables @{text "\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n"} as specified on the LHS,
  6.1095 -  but no term variables.
  6.1096 -
  6.1097 -  Even though a local theory specification, the newly introduced type
  6.1098 -  constructor cannot depend on parameters or assumptions of the
  6.1099 -  context: this is structurally impossible in HOL.  In contrast, the
  6.1100 -  non-emptiness proof may use local assumptions in unusual situations,
  6.1101 -  which could result in different interpretations in target contexts:
  6.1102 -  the meaning of the bijection between the representing set @{text A}
  6.1103 -  and the new type @{text t} may then change in different application
  6.1104 -  contexts.
  6.1105 -
  6.1106 -  For @{command (HOL) "typedef"}~@{text "t = A"} the newly introduced
  6.1107 -  type @{text t} is accompanied by a pair of morphisms to relate it to
  6.1108 -  the representing set over the old type.  By default, the injection
  6.1109 -  from type to set is called @{text Rep_t} and its inverse @{text
  6.1110 -  Abs_t}: An explicit @{keyword (HOL) "morphisms"} specification
  6.1111 -  allows to provide alternative names.
  6.1112 -
  6.1113 -  The core axiomatization uses the locale predicate @{const
  6.1114 -  type_definition} as defined in Isabelle/HOL.  Various basic
  6.1115 -  consequences of that are instantiated accordingly, re-using the
  6.1116 -  locale facts with names derived from the new type constructor.  Thus
  6.1117 -  the generic @{thm type_definition.Rep} is turned into the specific
  6.1118 -  @{text "Rep_t"}, for example.
  6.1119 -
  6.1120 -  Theorems @{thm type_definition.Rep}, @{thm
  6.1121 -  type_definition.Rep_inverse}, and @{thm type_definition.Abs_inverse}
  6.1122 -  provide the most basic characterization as a corresponding
  6.1123 -  injection/surjection pair (in both directions).  The derived rules
  6.1124 -  @{thm type_definition.Rep_inject} and @{thm
  6.1125 -  type_definition.Abs_inject} provide a more convenient version of
  6.1126 -  injectivity, suitable for automated proof tools (e.g.\ in
  6.1127 -  declarations involving @{attribute simp} or @{attribute iff}).
  6.1128 -  Furthermore, the rules @{thm type_definition.Rep_cases}~/ @{thm
  6.1129 -  type_definition.Rep_induct}, and @{thm type_definition.Abs_cases}~/
  6.1130 -  @{thm type_definition.Abs_induct} provide alternative views on
  6.1131 -  surjectivity.  These rules are already declared as set or type rules
  6.1132 -  for the generic @{method cases} and @{method induct} methods,
  6.1133 -  respectively.
  6.1134 -
  6.1135 -  \end{description}
  6.1136 -
  6.1137 -  \begin{warn}
  6.1138 -  If you introduce a new type axiomatically, i.e.\ via @{command_ref
  6.1139 -  typedecl} and @{command_ref axiomatization}, the minimum requirement
  6.1140 -  is that it has a non-empty model, to avoid immediate collapse of the
  6.1141 -  HOL logic.  Moreover, one needs to demonstrate that the
  6.1142 -  interpretation of such free-form axiomatizations can coexist with
  6.1143 -  that of the regular @{command_def typedef} scheme, and any extension
  6.1144 -  that other people might have introduced elsewhere.
  6.1145 -  \end{warn}
  6.1146 -*}
  6.1147 -
  6.1148 -subsubsection {* Examples *}
  6.1149 -
  6.1150 -text {* Type definitions permit the introduction of abstract data
  6.1151 -  types in a safe way, namely by providing models based on already
  6.1152 -  existing types.  Given some abstract axiomatic description @{text P}
  6.1153 -  of a type, this involves two steps:
  6.1154 -
  6.1155 -  \begin{enumerate}
  6.1156 -
  6.1157 -  \item Find an appropriate type @{text \<tau>} and subset @{text A} which
  6.1158 -  has the desired properties @{text P}, and make a type definition
  6.1159 -  based on this representation.
  6.1160 -
  6.1161 -  \item Prove that @{text P} holds for @{text \<tau>} by lifting @{text P}
  6.1162 -  from the representation.
  6.1163 -
  6.1164 -  \end{enumerate}
  6.1165 -
  6.1166 -  You can later forget about the representation and work solely in
  6.1167 -  terms of the abstract properties @{text P}.
  6.1168 -
  6.1169 -  \medskip The following trivial example pulls a three-element type
  6.1170 -  into existence within the formal logical environment of HOL. *}
  6.1171 -
  6.1172 -typedef three = "{(True, True), (True, False), (False, True)}"
  6.1173 -  by blast
  6.1174 -
  6.1175 -definition "One = Abs_three (True, True)"
  6.1176 -definition "Two = Abs_three (True, False)"
  6.1177 -definition "Three = Abs_three (False, True)"
  6.1178 -
  6.1179 -lemma three_distinct: "One \<noteq> Two"  "One \<noteq> Three"  "Two \<noteq> Three"
  6.1180 -  by (simp_all add: One_def Two_def Three_def Abs_three_inject)
  6.1181 -
  6.1182 -lemma three_cases:
  6.1183 -  fixes x :: three obtains "x = One" | "x = Two" | "x = Three"
  6.1184 -  by (cases x) (auto simp: One_def Two_def Three_def Abs_three_inject)
  6.1185 -
  6.1186 -text {* Note that such trivial constructions are better done with
  6.1187 -  derived specification mechanisms such as @{command datatype}: *}
  6.1188 -
  6.1189 -datatype three' = One' | Two' | Three'
  6.1190 -
  6.1191 -text {* This avoids re-doing basic definitions and proofs from the
  6.1192 -  primitive @{command typedef} above. *}
  6.1193 -
  6.1194 -
  6.1195 -
  6.1196 -section {* Functorial structure of types *}
  6.1197 -
  6.1198 -text {*
  6.1199 -  \begin{matharray}{rcl}
  6.1200 -    @{command_def (HOL) "functor"} & : & @{text "local_theory \<rightarrow> proof(prove)"}
  6.1201 -  \end{matharray}
  6.1202 -
  6.1203 -  @{rail \<open>
  6.1204 -    @@{command (HOL) functor} (@{syntax name} ':')? @{syntax term}
  6.1205 -  \<close>}
  6.1206 -
  6.1207 -  \begin{description}
  6.1208 -
  6.1209 -  \item @{command (HOL) "functor"}~@{text "prefix: m"} allows to
  6.1210 -  prove and register properties about the functorial structure of type
  6.1211 -  constructors.  These properties then can be used by other packages
  6.1212 -  to deal with those type constructors in certain type constructions.
  6.1213 -  Characteristic theorems are noted in the current local theory.  By
  6.1214 -  default, they are prefixed with the base name of the type
  6.1215 -  constructor, an explicit prefix can be given alternatively.
  6.1216 -
  6.1217 -  The given term @{text "m"} is considered as \emph{mapper} for the
  6.1218 -  corresponding type constructor and must conform to the following
  6.1219 -  type pattern:
  6.1220 -
  6.1221 -  \begin{matharray}{lll}
  6.1222 -    @{text "m"} & @{text "::"} &
  6.1223 -      @{text "\<sigma>\<^sub>1 \<Rightarrow> \<dots> \<sigma>\<^sub>k \<Rightarrow> (\<^vec>\<alpha>\<^sub>n) t \<Rightarrow> (\<^vec>\<beta>\<^sub>n) t"} \\
  6.1224 -  \end{matharray}
  6.1225 -
  6.1226 -  \noindent where @{text t} is the type constructor, @{text
  6.1227 -  "\<^vec>\<alpha>\<^sub>n"} and @{text "\<^vec>\<beta>\<^sub>n"} are distinct
  6.1228 -  type variables free in the local theory and @{text "\<sigma>\<^sub>1"},
  6.1229 -  \ldots, @{text "\<sigma>\<^sub>k"} is a subsequence of @{text "\<alpha>\<^sub>1 \<Rightarrow>
  6.1230 -  \<beta>\<^sub>1"}, @{text "\<beta>\<^sub>1 \<Rightarrow> \<alpha>\<^sub>1"}, \ldots,
  6.1231 -  @{text "\<alpha>\<^sub>n \<Rightarrow> \<beta>\<^sub>n"}, @{text "\<beta>\<^sub>n \<Rightarrow>
  6.1232 -  \<alpha>\<^sub>n"}.
  6.1233 -
  6.1234 -  \end{description}
  6.1235 -*}
  6.1236 -
  6.1237 -
  6.1238 -section {* Quotient types *}
  6.1239 -
  6.1240 -text {*
  6.1241 -  \begin{matharray}{rcl}
  6.1242 -    @{command_def (HOL) "quotient_type"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
  6.1243 -    @{command_def (HOL) "quotient_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
  6.1244 -    @{command_def (HOL) "print_quotmapsQ3"} & : & @{text "context \<rightarrow>"}\\
  6.1245 -    @{command_def (HOL) "print_quotientsQ3"} & : & @{text "context \<rightarrow>"}\\
  6.1246 -    @{command_def (HOL) "print_quotconsts"} & : & @{text "context \<rightarrow>"}\\
  6.1247 -    @{method_def (HOL) "lifting"} & : & @{text method} \\
  6.1248 -    @{method_def (HOL) "lifting_setup"} & : & @{text method} \\
  6.1249 -    @{method_def (HOL) "descending"} & : & @{text method} \\
  6.1250 -    @{method_def (HOL) "descending_setup"} & : & @{text method} \\
  6.1251 -    @{method_def (HOL) "partiality_descending"} & : & @{text method} \\
  6.1252 -    @{method_def (HOL) "partiality_descending_setup"} & : & @{text method} \\
  6.1253 -    @{method_def (HOL) "regularize"} & : & @{text method} \\
  6.1254 -    @{method_def (HOL) "injection"} & : & @{text method} \\
  6.1255 -    @{method_def (HOL) "cleaning"} & : & @{text method} \\
  6.1256 -    @{attribute_def (HOL) "quot_thm"} & : & @{text attribute} \\
  6.1257 -    @{attribute_def (HOL) "quot_lifted"} & : & @{text attribute} \\
  6.1258 -    @{attribute_def (HOL) "quot_respect"} & : & @{text attribute} \\
  6.1259 -    @{attribute_def (HOL) "quot_preserve"} & : & @{text attribute} \\
  6.1260 -  \end{matharray}
  6.1261 -
  6.1262 -  The quotient package defines a new quotient type given a raw type
  6.1263 -  and a partial equivalence relation. The package also historically 
  6.1264 -  includes automation for transporting definitions and theorems. 
  6.1265 -  But most of this automation was superseded by the Lifting and Transfer
  6.1266 -  packages. The user should consider using these two new packages for
  6.1267 -  lifting definitions and transporting theorems.
  6.1268 -
  6.1269 -  @{rail \<open>
  6.1270 -    @@{command (HOL) quotient_type} (spec)
  6.1271 -    ;
  6.1272 -    spec: @{syntax typespec} @{syntax mixfix}? '=' \<newline>
  6.1273 -     @{syntax type} '/' ('partial' ':')? @{syntax term} \<newline>
  6.1274 -     (@'morphisms' @{syntax name} @{syntax name})? (@'parametric' @{syntax thmref})?
  6.1275 -  \<close>}
  6.1276 -
  6.1277 -  @{rail \<open>
  6.1278 -    @@{command (HOL) quotient_definition} constdecl? @{syntax thmdecl}? \<newline>
  6.1279 -    @{syntax term} 'is' @{syntax term}
  6.1280 -    ;
  6.1281 -    constdecl: @{syntax name} ('::' @{syntax type})? @{syntax mixfix}?
  6.1282 -  \<close>}
  6.1283 -
  6.1284 -  @{rail \<open>
  6.1285 -    @@{method (HOL) lifting} @{syntax thmrefs}?
  6.1286 -    ;
  6.1287 -    @@{method (HOL) lifting_setup} @{syntax thmrefs}?
  6.1288 -  \<close>}
  6.1289 -
  6.1290 -  \begin{description}
  6.1291 -
  6.1292 -  \item @{command (HOL) "quotient_type"} defines a new quotient type @{text \<tau>}. The
  6.1293 -  injection from a quotient type to a raw type is called @{text
  6.1294 -  rep_\<tau>}, its inverse @{text abs_\<tau>} unless explicit @{keyword (HOL)
  6.1295 -  "morphisms"} specification provides alternative names. @{command
  6.1296 -  (HOL) "quotient_type"} requires the user to prove that the relation
  6.1297 -  is an equivalence relation (predicate @{text equivp}), unless the
  6.1298 -  user specifies explicitly @{text partial} in which case the
  6.1299 -  obligation is @{text part_equivp}.  A quotient defined with @{text
  6.1300 -  partial} is weaker in the sense that less things can be proved
  6.1301 -  automatically.
  6.1302 -
  6.1303 -  The command internally proves a Quotient theorem and sets up the Lifting
  6.1304 -  package by the command @{command (HOL) setup_lifting}. Thus the Lifting 
  6.1305 -  and Transfer packages can be used also with quotient types defined by
  6.1306 -  @{command (HOL) "quotient_type"} without any extra set-up. The parametricity 
  6.1307 -  theorem for the equivalence relation R can be provided as an extra argument 
  6.1308 -  of the command and is passed to the corresponding internal call of @{command (HOL) setup_lifting}.
  6.1309 -  This theorem allows the Lifting package to generate a stronger transfer rule for equality.
  6.1310 -  
  6.1311 -  \end{description}
  6.1312 -
  6.1313 -  The most of the rest of the package was superseded by the Lifting and Transfer
  6.1314 -  packages. The user should consider using these two new packages for
  6.1315 -  lifting definitions and transporting theorems.
  6.1316 -
  6.1317 -  \begin{description}  
  6.1318 -
  6.1319 -  \item @{command (HOL) "quotient_definition"} defines a constant on
  6.1320 -  the quotient type.
  6.1321 -
  6.1322 -  \item @{command (HOL) "print_quotmapsQ3"} prints quotient map
  6.1323 -  functions.
  6.1324 -
  6.1325 -  \item @{command (HOL) "print_quotientsQ3"} prints quotients.
  6.1326 -
  6.1327 -  \item @{command (HOL) "print_quotconsts"} prints quotient constants.
  6.1328 -
  6.1329 -  \item @{method (HOL) "lifting"} and @{method (HOL) "lifting_setup"}
  6.1330 -    methods match the current goal with the given raw theorem to be
  6.1331 -    lifted producing three new subgoals: regularization, injection and
  6.1332 -    cleaning subgoals. @{method (HOL) "lifting"} tries to apply the
  6.1333 -    heuristics for automatically solving these three subgoals and
  6.1334 -    leaves only the subgoals unsolved by the heuristics to the user as
  6.1335 -    opposed to @{method (HOL) "lifting_setup"} which leaves the three
  6.1336 -    subgoals unsolved.
  6.1337 -
  6.1338 -  \item @{method (HOL) "descending"} and @{method (HOL)
  6.1339 -    "descending_setup"} try to guess a raw statement that would lift
  6.1340 -    to the current subgoal. Such statement is assumed as a new subgoal
  6.1341 -    and @{method (HOL) "descending"} continues in the same way as
  6.1342 -    @{method (HOL) "lifting"} does. @{method (HOL) "descending"} tries
  6.1343 -    to solve the arising regularization, injection and cleaning
  6.1344 -    subgoals with the analogous method @{method (HOL)
  6.1345 -    "descending_setup"} which leaves the four unsolved subgoals.
  6.1346 -
  6.1347 -  \item @{method (HOL) "partiality_descending"} finds the regularized
  6.1348 -    theorem that would lift to the current subgoal, lifts it and
  6.1349 -    leaves as a subgoal. This method can be used with partial
  6.1350 -    equivalence quotients where the non regularized statements would
  6.1351 -    not be true. @{method (HOL) "partiality_descending_setup"} leaves
  6.1352 -    the injection and cleaning subgoals unchanged.
  6.1353 -
  6.1354 -  \item @{method (HOL) "regularize"} applies the regularization
  6.1355 -    heuristics to the current subgoal.
  6.1356 -
  6.1357 -  \item @{method (HOL) "injection"} applies the injection heuristics
  6.1358 -    to the current goal using the stored quotient respectfulness
  6.1359 -    theorems.
  6.1360 -
  6.1361 -  \item @{method (HOL) "cleaning"} applies the injection cleaning
  6.1362 -    heuristics to the current subgoal using the stored quotient
  6.1363 -    preservation theorems.
  6.1364 -
  6.1365 -  \item @{attribute (HOL) quot_lifted} attribute tries to
  6.1366 -    automatically transport the theorem to the quotient type.
  6.1367 -    The attribute uses all the defined quotients types and quotient
  6.1368 -    constants often producing undesired results or theorems that
  6.1369 -    cannot be lifted.
  6.1370 -
  6.1371 -  \item @{attribute (HOL) quot_respect} and @{attribute (HOL)
  6.1372 -    quot_preserve} attributes declare a theorem as a respectfulness
  6.1373 -    and preservation theorem respectively.  These are stored in the
  6.1374 -    local theory store and used by the @{method (HOL) "injection"}
  6.1375 -    and @{method (HOL) "cleaning"} methods respectively.
  6.1376 -
  6.1377 -  \item @{attribute (HOL) quot_thm} declares that a certain theorem
  6.1378 -    is a quotient extension theorem. Quotient extension theorems
  6.1379 -    allow for quotienting inside container types. Given a polymorphic
  6.1380 -    type that serves as a container, a map function defined for this
  6.1381 -    container using @{command (HOL) "functor"} and a relation
  6.1382 -    map defined for for the container type, the quotient extension
  6.1383 -    theorem should be @{term "Quotient3 R Abs Rep \<Longrightarrow> Quotient3
  6.1384 -    (rel_map R) (map Abs) (map Rep)"}. Quotient extension theorems
  6.1385 -    are stored in a database and are used all the steps of lifting
  6.1386 -    theorems.
  6.1387 -
  6.1388 -  \end{description}
  6.1389 -*}
  6.1390 -
  6.1391 -
  6.1392 -section {* Definition by specification \label{sec:hol-specification} *}
  6.1393 -
  6.1394 -text {*
  6.1395 -  \begin{matharray}{rcl}
  6.1396 -    @{command_def (HOL) "specification"} & : & @{text "theory \<rightarrow> proof(prove)"} \\
  6.1397 -  \end{matharray}
  6.1398 -
  6.1399 -  @{rail \<open>
  6.1400 -    @@{command (HOL) specification} '(' (decl +) ')' \<newline>
  6.1401 -      (@{syntax thmdecl}? @{syntax prop} +)
  6.1402 -    ;
  6.1403 -    decl: (@{syntax name} ':')? @{syntax term} ('(' @'overloaded' ')')?
  6.1404 -  \<close>}
  6.1405 -
  6.1406 -  \begin{description}
  6.1407 -
  6.1408 -  \item @{command (HOL) "specification"}~@{text "decls \<phi>"} sets up a
  6.1409 -  goal stating the existence of terms with the properties specified to
  6.1410 -  hold for the constants given in @{text decls}.  After finishing the
  6.1411 -  proof, the theory will be augmented with definitions for the given
  6.1412 -  constants, as well as with theorems stating the properties for these
  6.1413 -  constants.
  6.1414 -
  6.1415 -  @{text decl} declares a constant to be defined by the
  6.1416 -  specification given.  The definition for the constant @{text c} is
  6.1417 -  bound to the name @{text c_def} unless a theorem name is given in
  6.1418 -  the declaration.  Overloaded constants should be declared as such.
  6.1419 -
  6.1420 -  \end{description}
  6.1421 -*}
  6.1422 -
  6.1423 -
  6.1424 -section {* Adhoc overloading of constants *}
  6.1425 -
  6.1426 -text {*
  6.1427 -  \begin{tabular}{rcll}
  6.1428 -  @{command_def "adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
  6.1429 -  @{command_def "no_adhoc_overloading"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
  6.1430 -  @{attribute_def "show_variants"} & : & @{text "attribute"} & default @{text false} \\
  6.1431 -  \end{tabular}
  6.1432 -
  6.1433 -  \medskip
  6.1434 -
  6.1435 -  Adhoc overloading allows to overload a constant depending on
  6.1436 -  its type. Typically this involves the introduction of an
  6.1437 -  uninterpreted constant (used for input and output) and the addition
  6.1438 -  of some variants (used internally). For examples see
  6.1439 -  @{file "~~/src/HOL/ex/Adhoc_Overloading_Examples.thy"} and
  6.1440 -  @{file "~~/src/HOL/Library/Monad_Syntax.thy"}.
  6.1441 -
  6.1442 -  @{rail \<open>
  6.1443 -    (@@{command adhoc_overloading} | @@{command no_adhoc_overloading})
  6.1444 -      (@{syntax nameref} (@{syntax term} + ) + @'and')
  6.1445 -  \<close>}
  6.1446 -
  6.1447 -  \begin{description}
  6.1448 -
  6.1449 -  \item @{command "adhoc_overloading"}~@{text "c v\<^sub>1 ... v\<^sub>n"}
  6.1450 -  associates variants with an existing constant.
  6.1451 -
  6.1452 -  \item @{command "no_adhoc_overloading"} is similar to
  6.1453 -  @{command "adhoc_overloading"}, but removes the specified variants
  6.1454 -  from the present context.
  6.1455 -  
  6.1456 -  \item @{attribute "show_variants"} controls printing of variants
  6.1457 -  of overloaded constants. If enabled, the internally used variants
  6.1458 -  are printed instead of their respective overloaded constants. This
  6.1459 -  is occasionally useful to check whether the system agrees with a
  6.1460 -  user's expectations about derived variants.
  6.1461 -
  6.1462 -  \end{description}
  6.1463 -*}
  6.1464 -
  6.1465 -chapter {* Proof tools *}
  6.1466 -
  6.1467 -section {* Adhoc tuples *}
  6.1468 -
  6.1469 -text {*
  6.1470 -  \begin{matharray}{rcl}
  6.1471 -    @{attribute_def (HOL) split_format}@{text "\<^sup>*"} & : & @{text attribute} \\
  6.1472 -  \end{matharray}
  6.1473 -
  6.1474 -  @{rail \<open>
  6.1475 -    @@{attribute (HOL) split_format} ('(' 'complete' ')')?
  6.1476 -  \<close>}
  6.1477 -
  6.1478 -  \begin{description}
  6.1479 -
  6.1480 -  \item @{attribute (HOL) split_format}\ @{text "(complete)"} causes
  6.1481 -  arguments in function applications to be represented canonically
  6.1482 -  according to their tuple type structure.
  6.1483 -
  6.1484 -  Note that this operation tends to invent funny names for new local
  6.1485 -  parameters introduced.
  6.1486 -
  6.1487 -  \end{description}
  6.1488 -*}
  6.1489 -
  6.1490 -
  6.1491 -section {* Transfer package *}
  6.1492 -
  6.1493 -text {*
  6.1494 -  \begin{matharray}{rcl}
  6.1495 -    @{method_def (HOL) "transfer"} & : & @{text method} \\
  6.1496 -    @{method_def (HOL) "transfer'"} & : & @{text method} \\
  6.1497 -    @{method_def (HOL) "transfer_prover"} & : & @{text method} \\
  6.1498 -    @{attribute_def (HOL) "Transfer.transferred"} & : & @{text attribute} \\
  6.1499 -    @{attribute_def (HOL) "untransferred"} & : & @{text attribute} \\
  6.1500 -    @{attribute_def (HOL) "transfer_rule"} & : & @{text attribute} \\
  6.1501 -    @{attribute_def (HOL) "transfer_domain_rule"} & : & @{text attribute} \\
  6.1502 -    @{attribute_def (HOL) "relator_eq"} & : & @{text attribute} \\
  6.1503 -    @{attribute_def (HOL) "relator_domain"} & : & @{text attribute} \\
  6.1504 -  \end{matharray}
  6.1505 -
  6.1506 -  \begin{description}
  6.1507 -
  6.1508 -  \item @{method (HOL) "transfer"} method replaces the current subgoal
  6.1509 -    with a logically equivalent one that uses different types and
  6.1510 -    constants. The replacement of types and constants is guided by the
  6.1511 -    database of transfer rules. Goals are generalized over all free
  6.1512 -    variables by default; this is necessary for variables whose types
  6.1513 -    change, but can be overridden for specific variables with e.g.
  6.1514 -    @{text "transfer fixing: x y z"}.
  6.1515 -
  6.1516 -  \item @{method (HOL) "transfer'"} is a variant of @{method (HOL)
  6.1517 -    transfer} that allows replacing a subgoal with one that is
  6.1518 -    logically stronger (rather than equivalent). For example, a
  6.1519 -    subgoal involving equality on a quotient type could be replaced
  6.1520 -    with a subgoal involving equality (instead of the corresponding
  6.1521 -    equivalence relation) on the underlying raw type.
  6.1522 -
  6.1523 -  \item @{method (HOL) "transfer_prover"} method assists with proving
  6.1524 -    a transfer rule for a new constant, provided the constant is
  6.1525 -    defined in terms of other constants that already have transfer
  6.1526 -    rules. It should be applied after unfolding the constant
  6.1527 -    definitions.
  6.1528 -
  6.1529 -  \item @{attribute (HOL) "untransferred"} proves the same equivalent theorem
  6.1530 -     as @{method (HOL) "transfer"} internally does.
  6.1531 -
  6.1532 -  \item @{attribute (HOL) Transfer.transferred} works in the opposite
  6.1533 -    direction than @{method (HOL) "transfer'"}. E.g., given the transfer
  6.1534 -    relation @{text "ZN x n \<equiv> (x = int n)"}, corresponding transfer rules and the theorem
  6.1535 -    @{text "\<forall>x::int \<in> {0..}. x < x + 1"}, the attribute would prove 
  6.1536 -    @{text "\<forall>n::nat. n < n + 1"}. The attribute is still in experimental
  6.1537 -    phase of development.
  6.1538 -
  6.1539 -  \item @{attribute (HOL) "transfer_rule"} attribute maintains a
  6.1540 -    collection of transfer rules, which relate constants at two
  6.1541 -    different types. Typical transfer rules may relate different type
  6.1542 -    instances of the same polymorphic constant, or they may relate an
  6.1543 -    operation on a raw type to a corresponding operation on an
  6.1544 -    abstract type (quotient or subtype). For example:
  6.1545 -
  6.1546 -    @{text "((A ===> B) ===> list_all2 A ===> list_all2 B) map map"}\\
  6.1547 -    @{text "(cr_int ===> cr_int ===> cr_int) (\<lambda>(x,y) (u,v). (x+u, y+v)) plus"}
  6.1548 -
  6.1549 -    Lemmas involving predicates on relations can also be registered
  6.1550 -    using the same attribute. For example:
  6.1551 -
  6.1552 -    @{text "bi_unique A \<Longrightarrow> (list_all2 A ===> op =) distinct distinct"}\\
  6.1553 -    @{text "\<lbrakk>bi_unique A; bi_unique B\<rbrakk> \<Longrightarrow> bi_unique (rel_prod A B)"}
  6.1554 -
  6.1555 -  \item @{attribute (HOL) "transfer_domain_rule"} attribute maintains a collection
  6.1556 -    of rules, which specify a domain of a transfer relation by a predicate.
  6.1557 -    E.g., given the transfer relation @{text "ZN x n \<equiv> (x = int n)"}, 
  6.1558 -    one can register the following transfer domain rule: 
  6.1559 -    @{text "Domainp ZN = (\<lambda>x. x \<ge> 0)"}. The rules allow the package to produce
  6.1560 -    more readable transferred goals, e.g., when quantifiers are transferred.
  6.1561 -
  6.1562 -  \item @{attribute (HOL) relator_eq} attribute collects identity laws
  6.1563 -    for relators of various type constructors, e.g. @{text "list_all2
  6.1564 -    (op =) = (op =)"}. The @{method (HOL) transfer} method uses these
  6.1565 -    lemmas to infer transfer rules for non-polymorphic constants on
  6.1566 -    the fly.
  6.1567 -
  6.1568 -  \item @{attribute_def (HOL) "relator_domain"} attribute collects rules 
  6.1569 -    describing domains of relators by predicators. E.g., @{text "Domainp A = P \<Longrightarrow>
  6.1570 -    Domainp (list_all2 A) = (list_all P)"}. This allows the package to lift transfer
  6.1571 -    domain rules through type constructors.
  6.1572 -
  6.1573 -  \end{description}
  6.1574 -
  6.1575 -  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
  6.1576 -*}
  6.1577 -
  6.1578 -
  6.1579 -section {* Lifting package *}
  6.1580 -
  6.1581 -text {*
  6.1582 -  The Lifting package allows users to lift terms of the raw type to the abstract type, which is 
  6.1583 -  a necessary step in building a library for an abstract type. Lifting defines a new constant 
  6.1584 -  by combining coercion functions (Abs and Rep) with the raw term. It also proves an appropriate 
  6.1585 -  transfer rule for the Transfer package and, if possible, an equation for the code generator.
  6.1586 -
  6.1587 -  The Lifting package provides two main commands: @{command (HOL) "setup_lifting"} for initializing 
  6.1588 -  the package to work with a new type, and @{command (HOL) "lift_definition"} for lifting constants. 
  6.1589 -  The Lifting package works with all four kinds of type abstraction: type copies, subtypes, 
  6.1590 -  total quotients and partial quotients.
  6.1591 -
  6.1592 -  Theoretical background can be found in \cite{Huffman-Kuncar:2013:lifting_transfer}.
  6.1593 -
  6.1594 -  \begin{matharray}{rcl}
  6.1595 -    @{command_def (HOL) "setup_lifting"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
  6.1596 -    @{command_def (HOL) "lift_definition"} & : & @{text "local_theory \<rightarrow> proof(prove)"}\\
  6.1597 -    @{command_def (HOL) "lifting_forget"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
  6.1598 -    @{command_def (HOL) "lifting_update"} & : & @{text "local_theory \<rightarrow> local_theory"}\\
  6.1599 -    @{command_def (HOL) "print_quot_maps"} & : & @{text "context \<rightarrow>"}\\
  6.1600 -    @{command_def (HOL) "print_quotients"} & : & @{text "context \<rightarrow>"}\\
  6.1601 -    @{attribute_def (HOL) "quot_map"} & : & @{text attribute} \\
  6.1602 -    @{attribute_def (HOL) "invariant_commute"} & : & @{text attribute} \\
  6.1603 -    @{attribute_def (HOL) "reflexivity_rule"} & : & @{text attribute} \\
  6.1604 -    @{attribute_def (HOL) "relator_mono"} & : & @{text attribute} \\
  6.1605 -    @{attribute_def (HOL) "relator_distr"} & : & @{text attribute} \\
  6.1606 -    @{attribute_def (HOL) "quot_del"} & : & @{text attribute} \\
  6.1607 -    @{attribute_def (HOL) "lifting_restore"} & : & @{text attribute} \\   
  6.1608 -  \end{matharray}
  6.1609 -
  6.1610 -  @{rail \<open>
  6.1611 -    @@{command (HOL) setup_lifting} ('(' 'no_code' ')')? \<newline>
  6.1612 -      @{syntax thmref} @{syntax thmref}? (@'parametric' @{syntax thmref})?;
  6.1613 -  \<close>}
  6.1614 -
  6.1615 -  @{rail \<open>
  6.1616 -    @@{command (HOL) lift_definition} @{syntax name} '::' @{syntax type}  @{syntax mixfix}? \<newline>
  6.1617 -      'is' @{syntax term} (@'parametric' @{syntax thmref})?;
  6.1618 -  \<close>}
  6.1619 -
  6.1620 -  @{rail \<open>
  6.1621 -    @@{command (HOL) lifting_forget} @{syntax nameref};
  6.1622 -  \<close>}
  6.1623 -
  6.1624 -  @{rail \<open>
  6.1625 -    @@{command (HOL) lifting_update} @{syntax nameref};
  6.1626 -  \<close>}
  6.1627 -
  6.1628 -  @{rail \<open>
  6.1629 -    @@{attribute (HOL) lifting_restore} @{syntax thmref} (@{syntax thmref} @{syntax thmref})?;
  6.1630 -  \<close>}
  6.1631 -
  6.1632 -  \begin{description}
  6.1633 -
  6.1634 -  \item @{command (HOL) "setup_lifting"} Sets up the Lifting package
  6.1635 -    to work with a user-defined type. 
  6.1636 -    The command supports two modes. The first one is a low-level mode when 
  6.1637 -    the user must provide as a first
  6.1638 -    argument of @{command (HOL) "setup_lifting"} a
  6.1639 -    quotient theorem @{text "Quotient R Abs Rep T"}. The
  6.1640 -    package configures a transfer rule for equality, a domain transfer
  6.1641 -    rules and sets up the @{command_def (HOL) "lift_definition"}
  6.1642 -    command to work with the abstract type. An optional theorem @{text "reflp R"}, which certifies that 
  6.1643 -    the equivalence relation R is total,
  6.1644 -    can be provided as a second argument. This allows the package to generate stronger transfer
  6.1645 -    rules. And finally, the parametricity theorem for R can be provided as a third argument.
  6.1646 -    This allows the package to generate a stronger transfer rule for equality.
  6.1647 -
  6.1648 -    Users generally will not prove the @{text Quotient} theorem manually for 
  6.1649 -    new types, as special commands exist to automate the process.
  6.1650 -    
  6.1651 -    When a new subtype is defined by @{command (HOL) typedef}, @{command (HOL) "lift_definition"} 
  6.1652 -    can be used in its
  6.1653 -    second mode, where only the type_definition theorem @{text "type_definition Rep Abs A"}
  6.1654 -    is used as an argument of the command. The command internally proves the corresponding 
  6.1655 -    Quotient theorem and registers it with @{command (HOL) setup_lifting} using its first mode.
  6.1656 -
  6.1657 -    For quotients, the command @{command (HOL) quotient_type} can be used. The command defines 
  6.1658 -    a new quotient type and similarly to the previous case, the corresponding Quotient theorem is proved 
  6.1659 -    and registered by @{command (HOL) setup_lifting}.
  6.1660 -    
  6.1661 -    The command @{command (HOL) "setup_lifting"} also sets up the code generator
  6.1662 -    for the new type. Later on, when a new constant is defined by @{command (HOL) "lift_definition"},
  6.1663 -    the Lifting package proves and registers a code equation (if there is one) for the new constant.
  6.1664 -    If the option @{text "no_code"} is specified, the Lifting package does not set up the code
  6.1665 -    generator and as a consequence no code equations involving an abstract type are registered
  6.1666 -    by @{command (HOL) "lift_definition"}.
  6.1667 -
  6.1668 -  \item @{command (HOL) "lift_definition"} @{text "f :: \<tau>"} @{keyword (HOL) "is"} @{text t}
  6.1669 -    Defines a new function @{text f} with an abstract type @{text \<tau>}
  6.1670 -    in terms of a corresponding operation @{text t} on a
  6.1671 -    representation type. More formally, if @{text "t :: \<sigma>"}, then
  6.1672 -    the command builds a term @{text "F"} as a corresponding combination of abstraction 
  6.1673 -    and representation functions such that @{text "F :: \<sigma> \<Rightarrow> \<tau>" } and 
  6.1674 -    defines @{text f} is as @{text "f \<equiv> F t"}.
  6.1675 -    The term @{text t} does not have to be necessarily a constant but it can be any term.
  6.1676 -
  6.1677 -    The command opens a proof environment and the user must discharge 
  6.1678 -    a respectfulness proof obligation. For a type copy, i.e., a typedef with @{text
  6.1679 -    UNIV}, the obligation is discharged automatically. The proof goal is
  6.1680 -    presented in a user-friendly, readable form. A respectfulness
  6.1681 -    theorem in the standard format @{text f.rsp} and a transfer rule
  6.1682 -    @{text f.transfer} for the Transfer package are generated by the
  6.1683 -    package.
  6.1684 -
  6.1685 -    The user can specify a parametricity theorem for @{text t} after the keyword 
  6.1686 -    @{keyword "parametric"}, which allows the command
  6.1687 -    to generate a parametric transfer rule for @{text f}.
  6.1688 -
  6.1689 -    For each constant defined through trivial quotients (type copies or
  6.1690 -    subtypes) @{text f.rep_eq} is generated. The equation is a code certificate
  6.1691 -    that defines @{text f} using the representation function.
  6.1692 -
  6.1693 -    For each constant @{text f.abs_eq} is generated. The equation is unconditional
  6.1694 -    for total quotients. The equation defines @{text f} using
  6.1695 -    the abstraction function.
  6.1696 -
  6.1697 -    Integration with [@{attribute code} abstract]: For subtypes (e.g.,
  6.1698 -    corresponding to a datatype invariant, such as dlist), @{command
  6.1699 -    (HOL) "lift_definition"} uses a code certificate theorem
  6.1700 -    @{text f.rep_eq} as a code equation.
  6.1701 -
  6.1702 -    Integration with [@{attribute code} equation]: For total quotients, @{command
  6.1703 -    (HOL) "lift_definition"} uses @{text f.abs_eq} as a code equation.
  6.1704 -
  6.1705 -  \item @{command (HOL) lifting_forget} and  @{command (HOL) lifting_update}
  6.1706 -    These two commands serve for storing and deleting the set-up of
  6.1707 -    the Lifting package and corresponding transfer rules defined by this package.
  6.1708 -    This is useful for hiding of type construction details of an abstract type 
  6.1709 -    when the construction is finished but it still allows additions to this construction
  6.1710 -    when this is later necessary.
  6.1711 -
  6.1712 -    Whenever the Lifting package is set up with a new abstract type @{text "\<tau>"} by  
  6.1713 -    @{command_def (HOL) "lift_definition"}, the package defines a new bundle
  6.1714 -    that is called @{text "\<tau>.lifting"}. This bundle already includes set-up for the Lifting package. 
  6.1715 -    The new transfer rules
  6.1716 -    introduced by @{command (HOL) "lift_definition"} can be stored in the bundle by
  6.1717 -    the command @{command (HOL) "lifting_update"} @{text "\<tau>.lifting"}.
  6.1718 -
  6.1719 -    The command @{command (HOL) "lifting_forget"} @{text "\<tau>.lifting"} deletes set-up of the Lifting 
  6.1720 -    package
  6.1721 -    for @{text \<tau>} and deletes all the transfer rules that were introduced
  6.1722 -    by @{command (HOL) "lift_definition"} using @{text \<tau>} as an abstract type.
  6.1723 -
  6.1724 -    The stored set-up in a bundle can be reintroduced by the Isar commands for including a bundle
  6.1725 -    (@{command "include"}, @{keyword "includes"} and @{command "including"}).
  6.1726 -
  6.1727 -  \item @{command (HOL) "print_quot_maps"} prints stored quotient map
  6.1728 -    theorems.
  6.1729 -
  6.1730 -  \item @{command (HOL) "print_quotients"} prints stored quotient
  6.1731 -    theorems.
  6.1732 -
  6.1733 -  \item @{attribute (HOL) quot_map} registers a quotient map
  6.1734 -    theorem. E.g., @{text "Quotient R Abs Rep T \<Longrightarrow> 
  6.1735 -    Quotient (list_all2 R) (map Abs) (map Rep) (list_all2 T)"}. 
  6.1736 -    For examples see @{file
  6.1737 -    "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files
  6.1738 -    in the same directory.
  6.1739 -
  6.1740 -  \item @{attribute (HOL) invariant_commute} registers a theorem that
  6.1741 -    shows a relationship between the constant @{text
  6.1742 -    Lifting.invariant} (used for internal encoding of proper subtypes)
  6.1743 -    and a relator.  Such theorems allows the package to hide @{text
  6.1744 -    Lifting.invariant} from a user in a user-readable form of a
  6.1745 -    respectfulness theorem. For examples see @{file
  6.1746 -    "~~/src/HOL/List.thy"} or Lifting_*.thy files in the same directory.
  6.1747 -
  6.1748 -  \item @{attribute (HOL) reflexivity_rule} registers a theorem that shows
  6.1749 -    that a relator respects left-totality and left_uniqueness. For examples 
  6.1750 -    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} or Lifting_*.thy files 
  6.1751 -    in the same directory.
  6.1752 -    The property is used in a reflexivity prover, which is used for discharging respectfulness
  6.1753 -    theorems for type copies and also for discharging assumptions of abstraction function equations.
  6.1754 -
  6.1755 -  \item @{attribute (HOL) "relator_mono"} registers a property describing a monotonicity of a relator.
  6.1756 -    E.g., @{text "A \<le> B \<Longrightarrow> list_all2 A \<le> list_all2 B"}. For examples 
  6.1757 -    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
  6.1758 -    or Lifting_*.thy files in the same directory.
  6.1759 -    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
  6.1760 -    when a parametricity theorem for the raw term is specified.
  6.1761 -
  6.1762 -  \item @{attribute (HOL) "relator_distr"} registers a property describing a distributivity
  6.1763 -    of the relation composition and a relator. E.g., 
  6.1764 -    @{text "list_all2 R \<circ>\<circ> list_all2 S = list_all2 (R \<circ>\<circ> S)"}. 
  6.1765 -    This property is needed for proving a stronger transfer rule in @{command_def (HOL) "lift_definition"}
  6.1766 -    when a parametricity theorem for the raw term is specified.
  6.1767 -    When this equality does not hold unconditionally (e.g., for the function type), the user can specified
  6.1768 -    each direction separately and also register multiple theorems with different set of assumptions.
  6.1769 -    This attribute can be used only after the monotonicity property was already registered by
  6.1770 -    @{attribute (HOL) "relator_mono"}. For examples 
  6.1771 -    see @{file "~~/src/HOL/List.thy"} or @{file "~~/src/HOL/Lifting.thy"} 
  6.1772 -    or Lifting_*.thy files in the same directory.
  6.1773 -
  6.1774 -  \item @{attribute (HOL) quot_del} deletes a corresponding Quotient theorem
  6.1775 -    from the Lifting infrastructure and thus de-register the corresponding quotient. 
  6.1776 -    This effectively causes that @{command (HOL) lift_definition}  will not
  6.1777 -    do any lifting for the corresponding type. This attribute is rather used for low-level
  6.1778 -    manipulation with set-up of the Lifting package because @{command (HOL) lifting_forget} is
  6.1779 -    preferred for normal usage.
  6.1780 -
  6.1781 -  \item @{attribute (HOL) lifting_restore} @{text "Quotient_thm pcr_def pcr_cr_eq_thm"} 
  6.1782 -    registers the Quotient theorem @{text Quotient_thm} in the Lifting infrastructure 
  6.1783 -    and thus sets up lifting for an abstract type @{text \<tau>} (that is defined by @{text Quotient_thm}).
  6.1784 -    Optional theorems @{text pcr_def} and @{text pcr_cr_eq_thm} can be specified to register 
  6.1785 -    the parametrized
  6.1786 -    correspondence relation for @{text \<tau>}. E.g., for @{text "'a dlist"}, @{text pcr_def} is
  6.1787 -    @{text "pcr_dlist A \<equiv> list_all2 A \<circ>\<circ> cr_dlist"} and @{text pcr_cr_eq_thm} is 
  6.1788 -    @{text "pcr_dlist op= = op="}.
  6.1789 -    This attribute is rather used for low-level
  6.1790 -    manipulation with set-up of the Lifting package because using of the bundle @{text \<tau>.lifting} 
  6.1791 -    together with the commands @{command (HOL) lifting_forget} and @{command (HOL) lifting_update} is
  6.1792 -    preferred for normal usage.
  6.1793 -
  6.1794 -  \end{description}
  6.1795 -*}
  6.1796 -
  6.1797 -
  6.1798 -section {* Coercive subtyping *}
  6.1799 -
  6.1800 -text {*
  6.1801 -  \begin{matharray}{rcl}
  6.1802 -    @{attribute_def (HOL) coercion} & : & @{text attribute} \\
  6.1803 -    @{attribute_def (HOL) coercion_enabled} & : & @{text attribute} \\
  6.1804 -    @{attribute_def (HOL) coercion_map} & : & @{text attribute} \\
  6.1805 -  \end{matharray}
  6.1806 -
  6.1807 -  Coercive subtyping allows the user to omit explicit type
  6.1808 -  conversions, also called \emph{coercions}.  Type inference will add
  6.1809 -  them as necessary when parsing a term. See
  6.1810 -  \cite{traytel-berghofer-nipkow-2011} for details.
  6.1811 -
  6.1812 -  @{rail \<open>
  6.1813 -    @@{attribute (HOL) coercion} (@{syntax term})?
  6.1814 -    ;
  6.1815 -    @@{attribute (HOL) coercion_map} (@{syntax term})?
  6.1816 -  \<close>}
  6.1817 -
  6.1818 -  \begin{description}
  6.1819 -
  6.1820 -  \item @{attribute (HOL) "coercion"}~@{text "f"} registers a new
  6.1821 -  coercion function @{text "f :: \<sigma>\<^sub>1 \<Rightarrow> \<sigma>\<^sub>2"} where @{text "\<sigma>\<^sub>1"} and
  6.1822 -  @{text "\<sigma>\<^sub>2"} are type constructors without arguments.  Coercions are
  6.1823 -  composed by the inference algorithm if needed.  Note that the type
  6.1824 -  inference algorithm is complete only if the registered coercions
  6.1825 -  form a lattice.
  6.1826 -
  6.1827 -  \item @{attribute (HOL) "coercion_map"}~@{text "map"} registers a
  6.1828 -  new map function to lift coercions through type constructors. The
  6.1829 -  function @{text "map"} must conform to the following type pattern
  6.1830 -
  6.1831 -  \begin{matharray}{lll}
  6.1832 -    @{text "map"} & @{text "::"} &
  6.1833 -      @{text "f\<^sub>1 \<Rightarrow> \<dots> \<Rightarrow> f\<^sub>n \<Rightarrow> (\<alpha>\<^sub>1, \<dots>, \<alpha>\<^sub>n) t \<Rightarrow> (\<beta>\<^sub>1, \<dots>, \<beta>\<^sub>n) t"} \\
  6.1834 -  \end{matharray}
  6.1835 -
  6.1836 -  where @{text "t"} is a type constructor and @{text "f\<^sub>i"} is of type
  6.1837 -  @{text "\<alpha>\<^sub>i \<Rightarrow> \<beta>\<^sub>i"} or @{text "\<beta>\<^sub>i \<Rightarrow> \<alpha>\<^sub>i"}.  Registering a map function
  6.1838 -  overwrites any existing map function for this particular type
  6.1839 -  constructor.
  6.1840 -
  6.1841 -  \item @{attribute (HOL) "coercion_enabled"} enables the coercion
  6.1842 -  inference algorithm.
  6.1843 -
  6.1844 -  \end{description}
  6.1845 -*}
  6.1846 -
  6.1847 -
  6.1848 -section {* Arithmetic proof support *}
  6.1849 -
  6.1850 -text {*
  6.1851 -  \begin{matharray}{rcl}
  6.1852 -    @{method_def (HOL) arith} & : & @{text method} \\
  6.1853 -    @{attribute_def (HOL) arith} & : & @{text attribute} \\
  6.1854 -    @{attribute_def (HOL) arith_split} & : & @{text attribute} \\
  6.1855 -  \end{matharray}
  6.1856 -
  6.1857 -  \begin{description}
  6.1858 -
  6.1859 -  \item @{method (HOL) arith} decides linear arithmetic problems (on
  6.1860 -  types @{text nat}, @{text int}, @{text real}).  Any current facts
  6.1861 -  are inserted into the goal before running the procedure.
  6.1862 -
  6.1863 -  \item @{attribute (HOL) arith} declares facts that are supplied to
  6.1864 -  the arithmetic provers implicitly.
  6.1865 -
  6.1866 -  \item @{attribute (HOL) arith_split} attribute declares case split
  6.1867 -  rules to be expanded before @{method (HOL) arith} is invoked.
  6.1868 -
  6.1869 -  \end{description}
  6.1870 -
  6.1871 -  Note that a simpler (but faster) arithmetic prover is already
  6.1872 -  invoked by the Simplifier.
  6.1873 -*}
  6.1874 -
  6.1875 -
  6.1876 -section {* Intuitionistic proof search *}
  6.1877 -
  6.1878 -text {*
  6.1879 -  \begin{matharray}{rcl}
  6.1880 -    @{method_def (HOL) iprover} & : & @{text method} \\
  6.1881 -  \end{matharray}
  6.1882 -
  6.1883 -  @{rail \<open>
  6.1884 -    @@{method (HOL) iprover} (@{syntax rulemod} *)
  6.1885 -  \<close>}
  6.1886 -
  6.1887 -  \begin{description}
  6.1888 -
  6.1889 -  \item @{method (HOL) iprover} performs intuitionistic proof search,
  6.1890 -  depending on specifically declared rules from the context, or given
  6.1891 -  as explicit arguments.  Chained facts are inserted into the goal
  6.1892 -  before commencing proof search.
  6.1893 -
  6.1894 -  Rules need to be classified as @{attribute (Pure) intro},
  6.1895 -  @{attribute (Pure) elim}, or @{attribute (Pure) dest}; here the
  6.1896 -  ``@{text "!"}'' indicator refers to ``safe'' rules, which may be
  6.1897 -  applied aggressively (without considering back-tracking later).
  6.1898 -  Rules declared with ``@{text "?"}'' are ignored in proof search (the
  6.1899 -  single-step @{method (Pure) rule} method still observes these).  An
  6.1900 -  explicit weight annotation may be given as well; otherwise the
  6.1901 -  number of rule premises will be taken into account here.
  6.1902 -
  6.1903 -  \end{description}
  6.1904 -*}
  6.1905 -
  6.1906 -
  6.1907 -section {* Model Elimination and Resolution *}
  6.1908 -
  6.1909 -text {*
  6.1910 -  \begin{matharray}{rcl}
  6.1911 -    @{method_def (HOL) "meson"} & : & @{text method} \\
  6.1912 -    @{method_def (HOL) "metis"} & : & @{text method} \\
  6.1913 -  \end{matharray}
  6.1914 -
  6.1915 -  @{rail \<open>
  6.1916 -    @@{method (HOL) meson} @{syntax thmrefs}?
  6.1917 -    ;
  6.1918 -    @@{method (HOL) metis}
  6.1919 -      ('(' ('partial_types' | 'full_types' | 'no_types' | @{syntax name}) ')')?
  6.1920 -      @{syntax thmrefs}?
  6.1921 -  \<close>}
  6.1922 -
  6.1923 -  \begin{description}
  6.1924 -
  6.1925 -  \item @{method (HOL) meson} implements Loveland's model elimination
  6.1926 -  procedure \cite{loveland-78}.  See @{file
  6.1927 -  "~~/src/HOL/ex/Meson_Test.thy"} for examples.
  6.1928 -
  6.1929 -  \item @{method (HOL) metis} combines ordered resolution and ordered
  6.1930 -  paramodulation to find first-order (or mildly higher-order) proofs.
  6.1931 -  The first optional argument specifies a type encoding; see the
  6.1932 -  Sledgehammer manual \cite{isabelle-sledgehammer} for details.  The
  6.1933 -  directory @{file "~~/src/HOL/Metis_Examples"} contains several small
  6.1934 -  theories developed to a large extent using @{method (HOL) metis}.
  6.1935 -
  6.1936 -  \end{description}
  6.1937 -*}
  6.1938 -
  6.1939 -
  6.1940 -section {* Algebraic reasoning via Gr\"obner bases *}
  6.1941 -
  6.1942 -text {*
  6.1943 -  \begin{matharray}{rcl}
  6.1944 -    @{method_def (HOL) "algebra"} & : & @{text method} \\
  6.1945 -    @{attribute_def (HOL) algebra} & : & @{text attribute} \\
  6.1946 -  \end{matharray}
  6.1947 -
  6.1948 -  @{rail \<open>
  6.1949 -    @@{method (HOL) algebra}
  6.1950 -      ('add' ':' @{syntax thmrefs})?
  6.1951 -      ('del' ':' @{syntax thmrefs})?
  6.1952 -    ;
  6.1953 -    @@{attribute (HOL) algebra} (() | 'add' | 'del')
  6.1954 -  \<close>}
  6.1955 -
  6.1956 -  \begin{description}
  6.1957 -
  6.1958 -  \item @{method (HOL) algebra} performs algebraic reasoning via
  6.1959 -  Gr\"obner bases, see also \cite{Chaieb-Wenzel:2007} and
  6.1960 -  \cite[\S3.2]{Chaieb-thesis}. The method handles deals with two main
  6.1961 -  classes of problems:
  6.1962 -
  6.1963 -  \begin{enumerate}
  6.1964 -
  6.1965 -  \item Universal problems over multivariate polynomials in a
  6.1966 -  (semi)-ring/field/idom; the capabilities of the method are augmented
  6.1967 -  according to properties of these structures. For this problem class
  6.1968 -  the method is only complete for algebraically closed fields, since
  6.1969 -  the underlying method is based on Hilbert's Nullstellensatz, where
  6.1970 -  the equivalence only holds for algebraically closed fields.
  6.1971 -
  6.1972 -  The problems can contain equations @{text "p = 0"} or inequations
  6.1973 -  @{text "q \<noteq> 0"} anywhere within a universal problem statement.
  6.1974 -
  6.1975 -  \item All-exists problems of the following restricted (but useful)
  6.1976 -  form:
  6.1977 -
  6.1978 -  @{text [display] "\<forall>x\<^sub>1 \<dots> x\<^sub>n.
  6.1979 -    e\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<and> \<dots> \<and> e\<^sub>m(x\<^sub>1, \<dots>, x\<^sub>n) = 0 \<longrightarrow>
  6.1980 -    (\<exists>y\<^sub>1 \<dots> y\<^sub>k.
  6.1981 -      p\<^sub>1\<^sub>1(x\<^sub>1, \<dots> ,x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>1\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0 \<and>
  6.1982 -      \<dots> \<and>
  6.1983 -      p\<^sub>t\<^sub>1(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>1 + \<dots> + p\<^sub>t\<^sub>k(x\<^sub>1, \<dots>, x\<^sub>n) * y\<^sub>k = 0)"}
  6.1984 -
  6.1985 -  Here @{text "e\<^sub>1, \<dots>, e\<^sub>n"} and the @{text "p\<^sub>i\<^sub>j"} are multivariate
  6.1986 -  polynomials only in the variables mentioned as arguments.
  6.1987 -
  6.1988 -  \end{enumerate}
  6.1989 -
  6.1990 -  The proof method is preceded by a simplification step, which may be
  6.1991 -  modified by using the form @{text "(algebra add: ths\<^sub>1 del: ths\<^sub>2)"}.
  6.1992 -  This acts like declarations for the Simplifier
  6.1993 -  (\secref{sec:simplifier}) on a private simpset for this tool.
  6.1994 -
  6.1995 -  \item @{attribute algebra} (as attribute) manages the default
  6.1996 -  collection of pre-simplification rules of the above proof method.
  6.1997 -
  6.1998 -  \end{description}
  6.1999 -*}
  6.2000 -
  6.2001 -
  6.2002 -subsubsection {* Example *}
  6.2003 -
  6.2004 -text {* The subsequent example is from geometry: collinearity is
  6.2005 -  invariant by rotation.  *}
  6.2006 -
  6.2007 -type_synonym point = "int \<times> int"
  6.2008 -
  6.2009 -fun collinear :: "point \<Rightarrow> point \<Rightarrow> point \<Rightarrow> bool" where
  6.2010 -  "collinear (Ax, Ay) (Bx, By) (Cx, Cy) \<longleftrightarrow>
  6.2011 -    (Ax - Bx) * (By - Cy) = (Ay - By) * (Bx - Cx)"
  6.2012 -
  6.2013 -lemma collinear_inv_rotation:
  6.2014 -  assumes "collinear (Ax, Ay) (Bx, By) (Cx, Cy)" and "c\<^sup>2 + s\<^sup>2 = 1"
  6.2015 -  shows "collinear (Ax * c - Ay * s, Ay * c + Ax * s)
  6.2016 -    (Bx * c - By * s, By * c + Bx * s) (Cx * c - Cy * s, Cy * c + Cx * s)"
  6.2017 -  using assms by (algebra add: collinear.simps)
  6.2018 -
  6.2019 -text {*
  6.2020 - See also @{file "~~/src/HOL/ex/Groebner_Examples.thy"}.
  6.2021 -*}
  6.2022 -
  6.2023 -
  6.2024 -section {* Coherent Logic *}
  6.2025 -
  6.2026 -text {*
  6.2027 -  \begin{matharray}{rcl}
  6.2028 -    @{method_def (HOL) "coherent"} & : & @{text method} \\
  6.2029 -  \end{matharray}
  6.2030 -
  6.2031 -  @{rail \<open>
  6.2032 -    @@{method (HOL) coherent} @{syntax thmrefs}?
  6.2033 -  \<close>}
  6.2034 -
  6.2035 -  \begin{description}
  6.2036 -
  6.2037 -  \item @{method (HOL) coherent} solves problems of \emph{Coherent
  6.2038 -  Logic} \cite{Bezem-Coquand:2005}, which covers applications in
  6.2039 -  confluence theory, lattice theory and projective geometry.  See
  6.2040 -  @{file "~~/src/HOL/ex/Coherent.thy"} for some examples.
  6.2041 -
  6.2042 -  \end{description}
  6.2043 -*}
  6.2044 -
  6.2045 -
  6.2046 -section {* Proving propositions *}
  6.2047 -
  6.2048 -text {*
  6.2049 -  In addition to the standard proof methods, a number of diagnosis
  6.2050 -  tools search for proofs and provide an Isar proof snippet on success.
  6.2051 -  These tools are available via the following commands.
  6.2052 -
  6.2053 -  \begin{matharray}{rcl}
  6.2054 -    @{command_def (HOL) "solve_direct"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
  6.2055 -    @{command_def (HOL) "try"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
  6.2056 -    @{command_def (HOL) "try0"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
  6.2057 -    @{command_def (HOL) "sledgehammer"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
  6.2058 -    @{command_def (HOL) "sledgehammer_params"} & : & @{text "theory \<rightarrow> theory"}
  6.2059 -  \end{matharray}
  6.2060 -
  6.2061 -  @{rail \<open>
  6.2062 -    @@{command (HOL) try}
  6.2063 -    ;
  6.2064 -
  6.2065 -    @@{command (HOL) try0} ( ( ( 'simp' | 'intro' | 'elim' | 'dest' ) ':' @{syntax thmrefs} ) + ) ?
  6.2066 -      @{syntax nat}?
  6.2067 -    ;
  6.2068 -
  6.2069 -    @@{command (HOL) sledgehammer} ( '[' args ']' )? facts? @{syntax nat}?
  6.2070 -    ;
  6.2071 -
  6.2072 -    @@{command (HOL) sledgehammer_params} ( ( '[' args ']' ) ? )
  6.2073 -    ;
  6.2074 -    args: ( @{syntax name} '=' value + ',' )
  6.2075 -    ;
  6.2076 -    facts: '(' ( ( ( ( 'add' | 'del' ) ':' ) ? @{syntax thmrefs} ) + ) ? ')'
  6.2077 -  \<close>} % FIXME check args "value"
  6.2078 -
  6.2079 -  \begin{description}
  6.2080 -
  6.2081 -  \item @{command (HOL) "solve_direct"} checks whether the current
  6.2082 -  subgoals can be solved directly by an existing theorem. Duplicate
  6.2083 -  lemmas can be detected in this way.
  6.2084 -
  6.2085 -  \item @{command (HOL) "try0"} attempts to prove a subgoal
  6.2086 -  using a combination of standard proof methods (@{method auto},
  6.2087 -  @{method simp}, @{method blast}, etc.).  Additional facts supplied
  6.2088 -  via @{text "simp:"}, @{text "intro:"}, @{text "elim:"}, and @{text
  6.2089 -  "dest:"} are passed to the appropriate proof methods.
  6.2090 -
  6.2091 -  \item @{command (HOL) "try"} attempts to prove or disprove a subgoal
  6.2092 -  using a combination of provers and disprovers (@{command (HOL)
  6.2093 -  "solve_direct"}, @{command (HOL) "quickcheck"}, @{command (HOL)
  6.2094 -  "try0"}, @{command (HOL) "sledgehammer"}, @{command (HOL)
  6.2095 -  "nitpick"}).
  6.2096 -
  6.2097 -  \item @{command (HOL) "sledgehammer"} attempts to prove a subgoal
  6.2098 -  using external automatic provers (resolution provers and SMT
  6.2099 -  solvers). See the Sledgehammer manual \cite{isabelle-sledgehammer}
  6.2100 -  for details.
  6.2101 -
  6.2102 -  \item @{command (HOL) "sledgehammer_params"} changes @{command (HOL)
  6.2103 -  "sledgehammer"} configuration options persistently.
  6.2104 -
  6.2105 -  \end{description}
  6.2106 -*}
  6.2107 -
  6.2108 -
  6.2109 -section {* Checking and refuting propositions *}
  6.2110 -
  6.2111 -text {*
  6.2112 -  Identifying incorrect propositions usually involves evaluation of
  6.2113 -  particular assignments and systematic counterexample search.  This
  6.2114 -  is supported by the following commands.
  6.2115 -
  6.2116 -  \begin{matharray}{rcl}
  6.2117 -    @{command_def (HOL) "value"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2118 -    @{command_def (HOL) "values"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2119 -    @{command_def (HOL) "quickcheck"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
  6.2120 -    @{command_def (HOL) "nitpick"}@{text "\<^sup>*"} & : & @{text "proof \<rightarrow>"} \\
  6.2121 -    @{command_def (HOL) "quickcheck_params"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2122 -    @{command_def (HOL) "nitpick_params"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2123 -    @{command_def (HOL) "quickcheck_generator"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2124 -    @{command_def (HOL) "find_unused_assms"} & : & @{text "context \<rightarrow>"}
  6.2125 -  \end{matharray}
  6.2126 -
  6.2127 -  @{rail \<open>
  6.2128 -    @@{command (HOL) value} ( '[' @{syntax name} ']' )? modes? @{syntax term}
  6.2129 -    ;
  6.2130 -
  6.2131 -    @@{command (HOL) values} modes? @{syntax nat}? @{syntax term}
  6.2132 -    ;
  6.2133 -
  6.2134 -    (@@{command (HOL) quickcheck} | @@{command (HOL) nitpick})
  6.2135 -      ( '[' args ']' )? @{syntax nat}?
  6.2136 -    ;
  6.2137 -
  6.2138 -    (@@{command (HOL) quickcheck_params} |
  6.2139 -      @@{command (HOL) nitpick_params}) ( '[' args ']' )?
  6.2140 -    ;
  6.2141 -
  6.2142 -    @@{command (HOL) quickcheck_generator} @{syntax nameref} \<newline>
  6.2143 -      'operations:' ( @{syntax term} +)
  6.2144 -    ;
  6.2145 -
  6.2146 -    @@{command (HOL) find_unused_assms} @{syntax name}?
  6.2147 -    ;
  6.2148 -    modes: '(' (@{syntax name} +) ')'
  6.2149 -    ;
  6.2150 -    args: ( @{syntax name} '=' value + ',' )
  6.2151 -  \<close>} % FIXME check "value"
  6.2152 -
  6.2153 -  \begin{description}
  6.2154 -
  6.2155 -  \item @{command (HOL) "value"}~@{text t} evaluates and prints a
  6.2156 -  term; optionally @{text modes} can be specified, which are appended
  6.2157 -  to the current print mode; see \secref{sec:print-modes}.
  6.2158 -  Internally, the evaluation is performed by registered evaluators,
  6.2159 -  which are invoked sequentially until a result is returned.
  6.2160 -  Alternatively a specific evaluator can be selected using square
  6.2161 -  brackets; typical evaluators use the current set of code equations
  6.2162 -  to normalize and include @{text simp} for fully symbolic evaluation
  6.2163 -  using the simplifier, @{text nbe} for \emph{normalization by
  6.2164 -  evaluation} and \emph{code} for code generation in SML.
  6.2165 -
  6.2166 -  \item @{command (HOL) "values"}~@{text t} enumerates a set
  6.2167 -  comprehension by evaluation and prints its values up to the given
  6.2168 -  number of solutions; optionally @{text modes} can be specified,
  6.2169 -  which are appended to the current print mode; see
  6.2170 -  \secref{sec:print-modes}.
  6.2171 -
  6.2172 -  \item @{command (HOL) "quickcheck"} tests the current goal for
  6.2173 -  counterexamples using a series of assignments for its free
  6.2174 -  variables; by default the first subgoal is tested, an other can be
  6.2175 -  selected explicitly using an optional goal index.  Assignments can
  6.2176 -  be chosen exhausting the search space up to a given size, or using a
  6.2177 -  fixed number of random assignments in the search space, or exploring
  6.2178 -  the search space symbolically using narrowing.  By default,
  6.2179 -  quickcheck uses exhaustive testing.  A number of configuration
  6.2180 -  options are supported for @{command (HOL) "quickcheck"}, notably:
  6.2181 -
  6.2182 -    \begin{description}
  6.2183 -
  6.2184 -    \item[@{text tester}] specifies which testing approach to apply.
  6.2185 -    There are three testers, @{text exhaustive}, @{text random}, and
  6.2186 -    @{text narrowing}.  An unknown configuration option is treated as
  6.2187 -    an argument to tester, making @{text "tester ="} optional.  When
  6.2188 -    multiple testers are given, these are applied in parallel.  If no
  6.2189 -    tester is specified, quickcheck uses the testers that are set
  6.2190 -    active, i.e., configurations @{attribute
  6.2191 -    quickcheck_exhaustive_active}, @{attribute
  6.2192 -    quickcheck_random_active}, @{attribute
  6.2193 -    quickcheck_narrowing_active} are set to true.
  6.2194 -
  6.2195 -    \item[@{text size}] specifies the maximum size of the search space
  6.2196 -    for assignment values.
  6.2197 -
  6.2198 -    \item[@{text genuine_only}] sets quickcheck only to return genuine
  6.2199 -    counterexample, but not potentially spurious counterexamples due
  6.2200 -    to underspecified functions.
  6.2201 -
  6.2202 -    \item[@{text abort_potential}] sets quickcheck to abort once it
  6.2203 -    found a potentially spurious counterexample and to not continue
  6.2204 -    to search for a further genuine counterexample.
  6.2205 -    For this option to be effective, the @{text genuine_only} option
  6.2206 -    must be set to false.
  6.2207 -
  6.2208 -    \item[@{text eval}] takes a term or a list of terms and evaluates
  6.2209 -    these terms under the variable assignment found by quickcheck.
  6.2210 -    This option is currently only supported by the default
  6.2211 -    (exhaustive) tester.
  6.2212 -
  6.2213 -    \item[@{text iterations}] sets how many sets of assignments are
  6.2214 -    generated for each particular size.
  6.2215 -
  6.2216 -    \item[@{text no_assms}] specifies whether assumptions in
  6.2217 -    structured proofs should be ignored.
  6.2218 -
  6.2219 -    \item[@{text locale}] specifies how to process conjectures in
  6.2220 -    a locale context, i.e., they can be interpreted or expanded.
  6.2221 -    The option is a whitespace-separated list of the two words
  6.2222 -    @{text interpret} and @{text expand}. The list determines the
  6.2223 -    order they are employed. The default setting is to first use
  6.2224 -    interpretations and then test the expanded conjecture.
  6.2225 -    The option is only provided as attribute declaration, but not
  6.2226 -    as parameter to the command.
  6.2227 -
  6.2228 -    \item[@{text timeout}] sets the time limit in seconds.
  6.2229 -
  6.2230 -    \item[@{text default_type}] sets the type(s) generally used to
  6.2231 -    instantiate type variables.
  6.2232 -
  6.2233 -    \item[@{text report}] if set quickcheck reports how many tests
  6.2234 -    fulfilled the preconditions.
  6.2235 -
  6.2236 -    \item[@{text use_subtype}] if set quickcheck automatically lifts
  6.2237 -    conjectures to registered subtypes if possible, and tests the
  6.2238 -    lifted conjecture.
  6.2239 -
  6.2240 -    \item[@{text quiet}] if set quickcheck does not output anything
  6.2241 -    while testing.
  6.2242 -
  6.2243 -    \item[@{text verbose}] if set quickcheck informs about the current
  6.2244 -    size and cardinality while testing.
  6.2245 -
  6.2246 -    \item[@{text expect}] can be used to check if the user's
  6.2247 -    expectation was met (@{text no_expectation}, @{text
  6.2248 -    no_counterexample}, or @{text counterexample}).
  6.2249 -
  6.2250 -    \end{description}
  6.2251 -
  6.2252 -  These option can be given within square brackets.
  6.2253 -
  6.2254 -  Using the following type classes, the testers generate values and convert
  6.2255 -  them back into Isabelle terms for displaying counterexamples.
  6.2256 -    \begin{description}
  6.2257 -    \item[@{text exhaustive}] The parameters of the type classes @{class exhaustive}
  6.2258 -      and @{class full_exhaustive} implement the testing. They take a 
  6.2259 -      testing function as a parameter, which takes a value of type @{typ "'a"}
  6.2260 -      and optionally produces a counterexample, and a size parameter for the test values.
  6.2261 -      In @{class full_exhaustive}, the testing function parameter additionally 
  6.2262 -      expects a lazy term reconstruction in the type @{typ Code_Evaluation.term}
  6.2263 -      of the tested value.
  6.2264 -
  6.2265 -      The canonical implementation for @{text exhaustive} testers calls the given
  6.2266 -      testing function on all values up to the given size and stops as soon
  6.2267 -      as a counterexample is found.
  6.2268 -
  6.2269 -    \item[@{text random}] The operation @{const Quickcheck_Random.random}
  6.2270 -      of the type class @{class random} generates a pseudo-random
  6.2271 -      value of the given size and a lazy term reconstruction of the value
  6.2272 -      in the type @{typ Code_Evaluation.term}. A pseudo-randomness generator
  6.2273 -      is defined in theory @{theory Random}.
  6.2274 -      
  6.2275 -    \item[@{text narrowing}] implements Haskell's Lazy Smallcheck~\cite{runciman-naylor-lindblad}
  6.2276 -      using the type classes @{class narrowing} and @{class partial_term_of}.
  6.2277 -      Variables in the current goal are initially represented as symbolic variables.
  6.2278 -      If the execution of the goal tries to evaluate one of them, the test engine
  6.2279 -      replaces it with refinements provided by @{const narrowing}.
  6.2280 -      Narrowing views every value as a sum-of-products which is expressed using the operations
  6.2281 -      @{const Quickcheck_Narrowing.cons} (embedding a value),
  6.2282 -      @{const Quickcheck_Narrowing.apply} (product) and @{const Quickcheck_Narrowing.sum} (sum).
  6.2283 -      The refinement should enable further evaluation of the goal.
  6.2284 -
  6.2285 -      For example, @{const narrowing} for the list type @{typ "'a :: narrowing list"}
  6.2286 -      can be recursively defined as
  6.2287 -      @{term "Quickcheck_Narrowing.sum (Quickcheck_Narrowing.cons [])
  6.2288 -                (Quickcheck_Narrowing.apply
  6.2289 -                  (Quickcheck_Narrowing.apply
  6.2290 -                    (Quickcheck_Narrowing.cons (op #))
  6.2291 -                    narrowing)
  6.2292 -                  narrowing)"}.
  6.2293 -      If a symbolic variable of type @{typ "_ list"} is evaluated, it is replaced by (i)~the empty
  6.2294 -      list @{term "[]"} and (ii)~by a non-empty list whose head and tail can then be recursively
  6.2295 -      refined if needed.
  6.2296 -
  6.2297 -      To reconstruct counterexamples, the operation @{const partial_term_of} transforms
  6.2298 -      @{text narrowing}'s deep representation of terms to the type @{typ Code_Evaluation.term}.
  6.2299 -      The deep representation models symbolic variables as
  6.2300 -      @{const Quickcheck_Narrowing.Narrowing_variable}, which are normally converted to
  6.2301 -      @{const Code_Evaluation.Free}, and refined values as
  6.2302 -      @{term "Quickcheck_Narrowing.Narrowing_constructor i args"}, where @{term "i :: integer"}
  6.2303 -      denotes the index in the sum of refinements. In the above example for lists,
  6.2304 -      @{term "0"} corresponds to @{term "[]"} and @{term "1"}
  6.2305 -      to @{term "op #"}.
  6.2306 -
  6.2307 -      The command @{command (HOL) "code_datatype"} sets up @{const partial_term_of}
  6.2308 -      such that the @{term "i"}-th refinement is interpreted as the @{term "i"}-th constructor,
  6.2309 -      but it does not ensures consistency with @{const narrowing}.
  6.2310 -    \end{description}
  6.2311 -
  6.2312 -  \item @{command (HOL) "quickcheck_params"} changes @{command (HOL)
  6.2313 -  "quickcheck"} configuration options persistently.
  6.2314 -
  6.2315 -  \item @{command (HOL) "quickcheck_generator"} creates random and
  6.2316 -  exhaustive value generators for a given type and operations.  It
  6.2317 -  generates values by using the operations as if they were
  6.2318 -  constructors of that type.
  6.2319 -
  6.2320 -  \item @{command (HOL) "nitpick"} tests the current goal for
  6.2321 -  counterexamples using a reduction to first-order relational
  6.2322 -  logic. See the Nitpick manual \cite{isabelle-nitpick} for details.
  6.2323 -
  6.2324 -  \item @{command (HOL) "nitpick_params"} changes @{command (HOL)
  6.2325 -  "nitpick"} configuration options persistently.
  6.2326 -
  6.2327 -  \item @{command (HOL) "find_unused_assms"} finds potentially superfluous
  6.2328 -  assumptions in theorems using quickcheck.
  6.2329 -  It takes the theory name to be checked for superfluous assumptions as
  6.2330 -  optional argument. If not provided, it checks the current theory.
  6.2331 -  Options to the internal quickcheck invocations can be changed with
  6.2332 -  common configuration declarations.
  6.2333 -
  6.2334 -  \end{description}
  6.2335 -*}
  6.2336 -
  6.2337 -
  6.2338 -section {* Unstructured case analysis and induction \label{sec:hol-induct-tac} *}
  6.2339 -
  6.2340 -text {*
  6.2341 -  The following tools of Isabelle/HOL support cases analysis and
  6.2342 -  induction in unstructured tactic scripts; see also
  6.2343 -  \secref{sec:cases-induct} for proper Isar versions of similar ideas.
  6.2344 -
  6.2345 -  \begin{matharray}{rcl}
  6.2346 -    @{method_def (HOL) case_tac}@{text "\<^sup>*"} & : & @{text method} \\
  6.2347 -    @{method_def (HOL) induct_tac}@{text "\<^sup>*"} & : & @{text method} \\
  6.2348 -    @{method_def (HOL) ind_cases}@{text "\<^sup>*"} & : & @{text method} \\
  6.2349 -    @{command_def (HOL) "inductive_cases"}@{text "\<^sup>*"} & : & @{text "local_theory \<rightarrow> local_theory"} \\
  6.2350 -  \end{matharray}
  6.2351 -
  6.2352 -  @{rail \<open>
  6.2353 -    @@{method (HOL) case_tac} @{syntax goal_spec}? @{syntax term} rule?
  6.2354 -    ;
  6.2355 -    @@{method (HOL) induct_tac} @{syntax goal_spec}? (@{syntax insts} * @'and') rule?
  6.2356 -    ;
  6.2357 -    @@{method (HOL) ind_cases} (@{syntax prop}+) (@'for' (@{syntax name}+))?
  6.2358 -    ;
  6.2359 -    @@{command (HOL) inductive_cases} (@{syntax thmdecl}? (@{syntax prop}+) + @'and')
  6.2360 -    ;
  6.2361 -    rule: 'rule' ':' @{syntax thmref}
  6.2362 -  \<close>}
  6.2363 -
  6.2364 -  \begin{description}
  6.2365 -
  6.2366 -  \item @{method (HOL) case_tac} and @{method (HOL) induct_tac} admit
  6.2367 -  to reason about inductive types.  Rules are selected according to
  6.2368 -  the declarations by the @{attribute cases} and @{attribute induct}
  6.2369 -  attributes, cf.\ \secref{sec:cases-induct}.  The @{command (HOL)
  6.2370 -  datatype} package already takes care of this.
  6.2371 -
  6.2372 -  These unstructured tactics feature both goal addressing and dynamic
  6.2373 -  instantiation.  Note that named rule cases are \emph{not} provided
  6.2374 -  as would be by the proper @{method cases} and @{method induct} proof
  6.2375 -  methods (see \secref{sec:cases-induct}).  Unlike the @{method
  6.2376 -  induct} method, @{method induct_tac} does not handle structured rule
  6.2377 -  statements, only the compact object-logic conclusion of the subgoal
  6.2378 -  being addressed.
  6.2379 -
  6.2380 -  \item @{method (HOL) ind_cases} and @{command (HOL)
  6.2381 -  "inductive_cases"} provide an interface to the internal @{ML_text
  6.2382 -  mk_cases} operation.  Rules are simplified in an unrestricted
  6.2383 -  forward manner.
  6.2384 -
  6.2385 -  While @{method (HOL) ind_cases} is a proof method to apply the
  6.2386 -  result immediately as elimination rules, @{command (HOL)
  6.2387 -  "inductive_cases"} provides case split theorems at the theory level
  6.2388 -  for later use.  The @{keyword "for"} argument of the @{method (HOL)
  6.2389 -  ind_cases} method allows to specify a list of variables that should
  6.2390 -  be generalized before applying the resulting rule.
  6.2391 -
  6.2392 -  \end{description}
  6.2393 -*}
  6.2394 -
  6.2395 -
  6.2396 -chapter {* Executable code *}
  6.2397 -
  6.2398 -text {* For validation purposes, it is often useful to \emph{execute}
  6.2399 -  specifications.  In principle, execution could be simulated by
  6.2400 -  Isabelle's inference kernel, i.e. by a combination of resolution and
  6.2401 -  simplification.  Unfortunately, this approach is rather inefficient.
  6.2402 -  A more efficient way of executing specifications is to translate
  6.2403 -  them into a functional programming language such as ML.
  6.2404 -
  6.2405 -  Isabelle provides a generic framework to support code generation
  6.2406 -  from executable specifications.  Isabelle/HOL instantiates these
  6.2407 -  mechanisms in a way that is amenable to end-user applications.  Code
  6.2408 -  can be generated for functional programs (including overloading
  6.2409 -  using type classes) targeting SML \cite{SML}, OCaml \cite{OCaml},
  6.2410 -  Haskell \cite{haskell-revised-report} and Scala
  6.2411 -  \cite{scala-overview-tech-report}.  Conceptually, code generation is
  6.2412 -  split up in three steps: \emph{selection} of code theorems,
  6.2413 -  \emph{translation} into an abstract executable view and
  6.2414 -  \emph{serialization} to a specific \emph{target language}.
  6.2415 -  Inductive specifications can be executed using the predicate
  6.2416 -  compiler which operates within HOL.  See \cite{isabelle-codegen} for
  6.2417 -  an introduction.
  6.2418 -
  6.2419 -  \begin{matharray}{rcl}
  6.2420 -    @{command_def (HOL) "export_code"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2421 -    @{attribute_def (HOL) code} & : & @{text attribute} \\
  6.2422 -    @{command_def (HOL) "code_datatype"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2423 -    @{command_def (HOL) "print_codesetup"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2424 -    @{attribute_def (HOL) code_unfold} & : & @{text attribute} \\
  6.2425 -    @{attribute_def (HOL) code_post} & : & @{text attribute} \\
  6.2426 -    @{attribute_def (HOL) code_abbrev} & : & @{text attribute} \\
  6.2427 -    @{command_def (HOL) "print_codeproc"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2428 -    @{command_def (HOL) "code_thms"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2429 -    @{command_def (HOL) "code_deps"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
  6.2430 -    @{command_def (HOL) "code_reserved"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2431 -    @{command_def (HOL) "code_printing"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2432 -    @{command_def (HOL) "code_identifier"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2433 -    @{command_def (HOL) "code_monad"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2434 -    @{command_def (HOL) "code_reflect"} & : & @{text "theory \<rightarrow> theory"} \\
  6.2435 -    @{command_def (HOL) "code_pred"} & : & @{text "theory \<rightarrow> proof(prove)"}
  6.2436 -  \end{matharray}
  6.2437 -
  6.2438 -  @{rail \<open>
  6.2439 -    @@{command (HOL) export_code} ( @'open' ) ? ( constexpr + ) \<newline>
  6.2440 -       ( ( @'in' target ( @'module_name' @{syntax string} ) ? \<newline>
  6.2441 -        ( @'file' @{syntax string} ) ? ( '(' args ')' ) ?) + ) ?
  6.2442 -    ;
  6.2443 -
  6.2444 -    const: @{syntax term}
  6.2445 -    ;
  6.2446 -
  6.2447 -    constexpr: ( const | 'name._' | '_' )
  6.2448 -    ;
  6.2449 -
  6.2450 -    typeconstructor: @{syntax nameref}
  6.2451 -    ;
  6.2452 -
  6.2453 -    class: @{syntax nameref}
  6.2454 -    ;
  6.2455 -
  6.2456 -    target: 'SML' | 'OCaml' | 'Haskell' | 'Scala' | 'Eval'
  6.2457 -    ;
  6.2458 -
  6.2459 -    @@{attribute (HOL) code} ( 'del' | 'equation' | 'abstype' | 'abstract'
  6.2460 -      | 'drop:' ( const + ) | 'abort:' ( const + ) )?
  6.2461 -    ;
  6.2462 -
  6.2463 -    @@{command (HOL) code_datatype} ( const + )
  6.2464 -    ;
  6.2465 -
  6.2466 -    @@{attribute (HOL) code_unfold} ( 'del' ) ?
  6.2467 -    ;
  6.2468 -
  6.2469 -    @@{attribute (HOL) code_post} ( 'del' ) ?
  6.2470 -    ;
  6.2471 -
  6.2472 -    @@{attribute (HOL) code_abbrev}
  6.2473 -    ;
  6.2474 -
  6.2475 -    @@{command (HOL) code_thms} ( constexpr + ) ?
  6.2476 -    ;
  6.2477 -
  6.2478 -    @@{command (HOL) code_deps} ( constexpr + ) ?
  6.2479 -    ;
  6.2480 -
  6.2481 -    @@{command (HOL) code_reserved} target ( @{syntax string} + )
  6.2482 -    ;
  6.2483 -
  6.2484 -    symbol_const: ( @'constant' const )
  6.2485 -    ;
  6.2486 -
  6.2487 -    symbol_typeconstructor: ( @'type_constructor' typeconstructor )
  6.2488 -    ;
  6.2489 -
  6.2490 -    symbol_class: ( @'type_class' class )
  6.2491 -    ;
  6.2492 -
  6.2493 -    symbol_class_relation: ( @'class_relation' class ( '<' | '\<subseteq>' ) class )
  6.2494 -    ;
  6.2495 -
  6.2496 -    symbol_class_instance: ( @'class_instance' typeconstructor @'::' class )
  6.2497 -    ;
  6.2498 -
  6.2499 -    symbol_module: ( @'code_module' name )
  6.2500 -    ;
  6.2501 -
  6.2502 -    syntax: @{syntax string} | ( @'infix' | @'infixl' | @'infixr' ) @{syntax nat} @{syntax string}
  6.2503 -    ;
  6.2504 -
  6.2505 -    printing_const: symbol_const ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2506 -      ( '(' target ')' syntax ? + @'and' )
  6.2507 -    ;
  6.2508 -
  6.2509 -    printing_typeconstructor: symbol_typeconstructor ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2510 -      ( '(' target ')' syntax ? + @'and' )
  6.2511 -    ;
  6.2512 -
  6.2513 -    printing_class: symbol_class ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2514 -      ( '(' target ')' @{syntax string} ? + @'and' )
  6.2515 -    ;
  6.2516 -
  6.2517 -    printing_class_relation: symbol_class_relation ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2518 -      ( '(' target ')' @{syntax string} ? + @'and' )
  6.2519 -    ;
  6.2520 -
  6.2521 -    printing_class_instance: symbol_class_instance ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2522 -      ( '(' target ')' '-' ? + @'and' )
  6.2523 -    ;
  6.2524 -
  6.2525 -    printing_module: symbol_module ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2526 -      ( '(' target ')' ( @{syntax string} ( @'attach' ( const + ) ) ? ) ? + @'and' )
  6.2527 -    ;
  6.2528 -
  6.2529 -    @@{command (HOL) code_printing} ( ( printing_const | printing_typeconstructor
  6.2530 -      | printing_class | printing_class_relation | printing_class_instance
  6.2531 -      | printing_module ) + '|' )
  6.2532 -    ;
  6.2533 -
  6.2534 -    @@{command (HOL) code_identifier} ( ( symbol_const | symbol_typeconstructor
  6.2535 -      | symbol_class | symbol_class_relation | symbol_class_instance
  6.2536 -      | symbol_module ) ( '\<rightharpoonup>' | '=>' ) \<newline>
  6.2537 -      ( '(' target ')' @{syntax string} ? + @'and' ) + '|' )
  6.2538 -    ;
  6.2539 -
  6.2540 -    @@{command (HOL) code_monad} const const target
  6.2541 -    ;
  6.2542 -
  6.2543 -    @@{command (HOL) code_reflect} @{syntax string} \<newline>
  6.2544 -      ( @'datatypes' ( @{syntax string} '=' ( '_' | ( @{syntax string} + '|' ) + @'and' ) ) ) ? \<newline>
  6.2545 -      ( @'functions' ( @{syntax string} + ) ) ? ( @'file' @{syntax string} ) ?
  6.2546 -    ;
  6.2547 -
  6.2548 -    @@{command (HOL) code_pred} \<newline> ('(' @'modes' ':' modedecl ')')? \<newline> const
  6.2549 -    ;
  6.2550 -
  6.2551 -    modedecl: (modes | ((const ':' modes) \<newline>
  6.2552 -        (@'and' ((const ':' modes @'and') +))?))
  6.2553 -    ;
  6.2554 -
  6.2555 -    modes: mode @'as' const
  6.2556 -  \<close>}
  6.2557 -
  6.2558 -  \begin{description}
  6.2559 -
  6.2560 -  \item @{command (HOL) "export_code"} generates code for a given list
  6.2561 -  of constants in the specified target language(s).  If no
  6.2562 -  serialization instruction is given, only abstract code is generated
  6.2563 -  internally.
  6.2564 -
  6.2565 -  Constants may be specified by giving them literally, referring to
  6.2566 -  all executable constants within a certain theory by giving @{text
  6.2567 -  "name._"}, or referring to \emph{all} executable constants currently
  6.2568 -  available by giving @{text "_"}.
  6.2569 -
  6.2570 -  By default, exported identifiers are minimized per module.  This
  6.2571 -  can be suppressed by prepending @{keyword "open"} before the list
  6.2572 -  of contants.
  6.2573 -
  6.2574 -  By default, for each involved theory one corresponding name space
  6.2575 -  module is generated.  Alternatively, a module name may be specified
  6.2576 -  after the @{keyword "module_name"} keyword; then \emph{all} code is
  6.2577 -  placed in this module.
  6.2578 -
  6.2579 -  For \emph{SML}, \emph{OCaml} and \emph{Scala} the file specification
  6.2580 -  refers to a single file; for \emph{Haskell}, it refers to a whole
  6.2581 -  directory, where code is generated in multiple files reflecting the
  6.2582 -  module hierarchy.  Omitting the file specification denotes standard
  6.2583 -  output.
  6.2584 -
  6.2585 -  Serializers take an optional list of arguments in parentheses.
  6.2586 -  For \emph{Haskell} a module name prefix may be given using the
  6.2587 -  ``@{text "root:"}'' argument; ``@{text string_classes}'' adds a
  6.2588 -  ``@{verbatim "deriving (Read, Show)"}'' clause to each appropriate
  6.2589 -  datatype declaration.
  6.2590 -
  6.2591 -  \item @{attribute (HOL) code} declare code equations for code
  6.2592 -  generation.  Variant @{text "code equation"} declares a conventional
  6.2593 -  equation as code equation.  Variants @{text "code abstype"} and
  6.2594 -  @{text "code abstract"} declare abstract datatype certificates or
  6.2595 -  code equations on abstract datatype representations respectively.
  6.2596 -  Vanilla @{text "code"} falls back to @{text "code equation"}
  6.2597 -  or @{text "code abstype"} depending on the syntactic shape
  6.2598 -  of the underlying equation.  Variant @{text "code del"}
  6.2599 -  deselects a code equation for code generation.
  6.2600 -
  6.2601 -  Variants @{text "code drop:"} and @{text "code abort:"} take
  6.2602 -  a list of constant as arguments and drop all code equations declared
  6.2603 -  for them.  In the case of {text abort}, these constants then are
  6.2604 -  are not required to have a definition by means of code equations;
  6.2605 -  if needed these are implemented by program abort (exception) instead.
  6.2606 -
  6.2607 -  Usually packages introducing code equations provide a reasonable
  6.2608 -  default setup for selection.  
  6.2609 -
  6.2610 -  \item @{command (HOL) "code_datatype"} specifies a constructor set
  6.2611 -  for a logical type.
  6.2612 -
  6.2613 -  \item @{command (HOL) "print_codesetup"} gives an overview on
  6.2614 -  selected code equations and code generator datatypes.
  6.2615 -
  6.2616 -  \item @{attribute (HOL) code_unfold} declares (or with option
  6.2617 -  ``@{text "del"}'' removes) theorems which during preprocessing
  6.2618 -  are applied as rewrite rules to any code equation or evaluation
  6.2619 -  input.
  6.2620 -
  6.2621 -  \item @{attribute (HOL) code_post} declares (or with option ``@{text
  6.2622 -  "del"}'' removes) theorems which are applied as rewrite rules to any
  6.2623 -  result of an evaluation.
  6.2624 -
  6.2625 -  \item @{attribute (HOL) code_abbrev} declares equations which are
  6.2626 -  applied as rewrite rules to any result of an evaluation and
  6.2627 -  symmetrically during preprocessing to any code equation or evaluation
  6.2628 -  input.
  6.2629 -
  6.2630 -  \item @{command (HOL) "print_codeproc"} prints the setup of the code
  6.2631 -  generator preprocessor.
  6.2632 -
  6.2633 -  \item @{command (HOL) "code_thms"} prints a list of theorems
  6.2634 -  representing the corresponding program containing all given
  6.2635 -  constants after preprocessing.
  6.2636 -
  6.2637 -  \item @{command (HOL) "code_deps"} visualizes dependencies of
  6.2638 -  theorems representing the corresponding program containing all given
  6.2639 -  constants after preprocessing.
  6.2640 -
  6.2641 -  \item @{command (HOL) "code_reserved"} declares a list of names as
  6.2642 -  reserved for a given target, preventing it to be shadowed by any
  6.2643 -  generated code.
  6.2644 -
  6.2645 -  \item @{command (HOL) "code_printing"} associates a series of symbols
  6.2646 -  (constants, type constructors, classes, class relations, instances,
  6.2647 -  module names) with target-specific serializations; omitting a serialization
  6.2648 -  deletes an existing serialization.
  6.2649 -
  6.2650 -  \item @{command (HOL) "code_monad"} provides an auxiliary mechanism
  6.2651 -  to generate monadic code for Haskell.
  6.2652 -
  6.2653 -  \item @{command (HOL) "code_identifier"} associates a a series of symbols
  6.2654 -  (constants, type constructors, classes, class relations, instances,
  6.2655 -  module names) with target-specific hints how these symbols shall be named.
  6.2656 -  These hints gain precedence over names for symbols with no hints at all.
  6.2657 -  Conflicting hints are subject to name disambiguation.
  6.2658 -  \emph{Warning:} It is at the discretion
  6.2659 -  of the user to ensure that name prefixes of identifiers in compound
  6.2660 -  statements like type classes or datatypes are still the same.
  6.2661 -
  6.2662 -  \item @{command (HOL) "code_reflect"} without a ``@{text "file"}''
  6.2663 -  argument compiles code into the system runtime environment and
  6.2664 -  modifies the code generator setup that future invocations of system
  6.2665 -  runtime code generation referring to one of the ``@{text
  6.2666 -  "datatypes"}'' or ``@{text "functions"}'' entities use these
  6.2667 -  precompiled entities.  With a ``@{text "file"}'' argument, the
  6.2668 -  corresponding code is generated into that specified file without
  6.2669 -  modifying the code generator setup.
  6.2670 -
  6.2671 -  \item @{command (HOL) "code_pred"} creates code equations for a
  6.2672 -    predicate given a set of introduction rules. Optional mode
  6.2673 -    annotations determine which arguments are supposed to be input or
  6.2674 -    output. If alternative introduction rules are declared, one must
  6.2675 -    prove a corresponding elimination rule.
  6.2676 -
  6.2677 -  \end{description}
  6.2678 -*}
  6.2679 -
  6.2680 -end
     7.1 --- a/src/Doc/Isar-Ref/Inner_Syntax.thy	Mon Apr 07 16:37:57 2014 +0200
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,1709 +0,0 @@
     7.4 -theory Inner_Syntax
     7.5 -imports Base Main
     7.6 -begin
     7.7 -
     7.8 -chapter {* Inner syntax --- the term language \label{ch:inner-syntax} *}
     7.9 -
    7.10 -text {* The inner syntax of Isabelle provides concrete notation for
    7.11 -  the main entities of the logical framework, notably @{text
    7.12 -  "\<lambda>"}-terms with types and type classes.  Applications may either
    7.13 -  extend existing syntactic categories by additional notation, or
    7.14 -  define new sub-languages that are linked to the standard term
    7.15 -  language via some explicit markers.  For example @{verbatim
    7.16 -  FOO}~@{text "foo"} could embed the syntax corresponding for some
    7.17 -  user-defined nonterminal @{text "foo"} --- within the bounds of the
    7.18 -  given lexical syntax of Isabelle/Pure.
    7.19 -
    7.20 -  The most basic way to specify concrete syntax for logical entities
    7.21 -  works via mixfix annotations (\secref{sec:mixfix}), which may be
    7.22 -  usually given as part of the original declaration or via explicit
    7.23 -  notation commands later on (\secref{sec:notation}).  This already
    7.24 -  covers many needs of concrete syntax without having to understand
    7.25 -  the full complexity of inner syntax layers.
    7.26 -
    7.27 -  Further details of the syntax engine involves the classical
    7.28 -  distinction of lexical language versus context-free grammar (see
    7.29 -  \secref{sec:pure-syntax}), and various mechanisms for \emph{syntax
    7.30 -  transformations} (see \secref{sec:syntax-transformations}).
    7.31 -*}
    7.32 -
    7.33 -
    7.34 -section {* Printing logical entities *}
    7.35 -
    7.36 -subsection {* Diagnostic commands \label{sec:print-diag} *}
    7.37 -
    7.38 -text {*
    7.39 -  \begin{matharray}{rcl}
    7.40 -    @{command_def "typ"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    7.41 -    @{command_def "term"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    7.42 -    @{command_def "prop"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    7.43 -    @{command_def "thm"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    7.44 -    @{command_def "prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    7.45 -    @{command_def "full_prf"}@{text "\<^sup>*"} & : & @{text "context \<rightarrow>"} \\
    7.46 -    @{command_def "print_state"}@{text "\<^sup>*"} & : & @{text "any \<rightarrow>"} \\
    7.47 -  \end{matharray}
    7.48 -
    7.49 -  These diagnostic commands assist interactive development by printing
    7.50 -  internal logical entities in a human-readable fashion.
    7.51 -
    7.52 -  @{rail \<open>
    7.53 -    @@{command typ} @{syntax modes}? @{syntax type} ('::' @{syntax sort})?
    7.54 -    ;
    7.55 -    @@{command term} @{syntax modes}? @{syntax term}
    7.56 -    ;
    7.57 -    @@{command prop} @{syntax modes}? @{syntax prop}
    7.58 -    ;
    7.59 -    @@{command thm} @{syntax modes}? @{syntax thmrefs}
    7.60 -    ;
    7.61 -    ( @@{command prf} | @@{command full_prf} ) @{syntax modes}? @{syntax thmrefs}?
    7.62 -    ;
    7.63 -    @@{command print_state} @{syntax modes}?
    7.64 -    ;
    7.65 -    @{syntax_def modes}: '(' (@{syntax name} + ) ')'
    7.66 -  \<close>}
    7.67 -
    7.68 -  \begin{description}
    7.69 -
    7.70 -  \item @{command "typ"}~@{text \<tau>} reads and prints a type expression
    7.71 -  according to the current context.
    7.72 -
    7.73 -  \item @{command "typ"}~@{text "\<tau> :: s"} uses type-inference to
    7.74 -  determine the most general way to make @{text "\<tau>"} conform to sort
    7.75 -  @{text "s"}.  For concrete @{text "\<tau>"} this checks if the type
    7.76 -  belongs to that sort.  Dummy type parameters ``@{text "_"}''
    7.77 -  (underscore) are assigned to fresh type variables with most general
    7.78 -  sorts, according the the principles of type-inference.
    7.79 -
    7.80 -  \item @{command "term"}~@{text t} and @{command "prop"}~@{text \<phi>}
    7.81 -  read, type-check and print terms or propositions according to the
    7.82 -  current theory or proof context; the inferred type of @{text t} is
    7.83 -  output as well.  Note that these commands are also useful in
    7.84 -  inspecting the current environment of term abbreviations.
    7.85 -
    7.86 -  \item @{command "thm"}~@{text "a\<^sub>1 \<dots> a\<^sub>n"} retrieves
    7.87 -  theorems from the current theory or proof context.  Note that any
    7.88 -  attributes included in the theorem specifications are applied to a
    7.89 -  temporary context derived from the current theory or proof; the
    7.90 -  result is discarded, i.e.\ attributes involved in @{text "a\<^sub>1,
    7.91 -  \<dots>, a\<^sub>n"} do not have any permanent effect.
    7.92 -
    7.93 -  \item @{command "prf"} displays the (compact) proof term of the
    7.94 -  current proof state (if present), or of the given theorems. Note
    7.95 -  that this requires proof terms to be switched on for the current
    7.96 -  object logic (see the ``Proof terms'' section of the Isabelle
    7.97 -  reference manual for information on how to do this).
    7.98 -
    7.99 -  \item @{command "full_prf"} is like @{command "prf"}, but displays
   7.100 -  the full proof term, i.e.\ also displays information omitted in the
   7.101 -  compact proof term, which is denoted by ``@{text _}'' placeholders
   7.102 -  there.
   7.103 -
   7.104 -  \item @{command "print_state"} prints the current proof state (if
   7.105 -  present), including current facts and goals.
   7.106 -
   7.107 -  \end{description}
   7.108 -
   7.109 -  All of the diagnostic commands above admit a list of @{text modes}
   7.110 -  to be specified, which is appended to the current print mode; see
   7.111 -  also \secref{sec:print-modes}.  Thus the output behavior may be
   7.112 -  modified according particular print mode features.  For example,
   7.113 -  @{command "print_state"}~@{text "(latex xsymbols)"} prints the
   7.114 -  current proof state with mathematical symbols and special characters
   7.115 -  represented in {\LaTeX} source, according to the Isabelle style
   7.116 -  \cite{isabelle-sys}.
   7.117 -
   7.118 -  Note that antiquotations (cf.\ \secref{sec:antiq}) provide a more
   7.119 -  systematic way to include formal items into the printed text
   7.120 -  document.
   7.121 -*}
   7.122 -
   7.123 -
   7.124 -subsection {* Details of printed content *}
   7.125 -
   7.126 -text {*
   7.127 -  \begin{tabular}{rcll}
   7.128 -    @{attribute_def show_markup} & : & @{text attribute} \\
   7.129 -    @{attribute_def show_types} & : & @{text attribute} & default @{text false} \\
   7.130 -    @{attribute_def show_sorts} & : & @{text attribute} & default @{text false} \\
   7.131 -    @{attribute_def show_consts} & : & @{text attribute} & default @{text false} \\
   7.132 -    @{attribute_def show_abbrevs} & : & @{text attribute} & default @{text true} \\
   7.133 -    @{attribute_def show_bracke