--- a/doc-src/ROOT Mon Aug 27 17:11:55 2012 +0200
+++ b/doc-src/ROOT Mon Aug 27 17:24:01 2012 +0200
@@ -114,6 +114,24 @@
Logic
Isar
+session Ref (doc) in "Ref" = Pure +
+ options [document_variants = "ref"]
+ theories
+ files
+ "../iman.sty"
+ "../extra.sty"
+ "../ttbox.sty"
+ "../proof.sty"
+ "../manual.bib"
+ "document/build"
+ "document/classical.tex"
+ "document/root.tex"
+ "document/simplifier.tex"
+ "document/substitution.tex"
+ "document/syntax.tex"
+ "document/tactic.tex"
+ "document/thm.tex"
+
session System (doc) in "System" = Pure +
options [document_variants = "system", thy_output_source]
theories
--- a/doc-src/Ref/Makefile Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-
-## targets
-
-default: dvi
-
-
-## dependencies
-
-include ../Makefile.in
-
-NAME = ref
-FILES = ref.tex tactic.tex thm.tex syntax.tex \
- substitution.tex simplifier.tex classical.tex ../proof.sty \
- ../iman.sty ../extra.sty ../ttbox.sty ../manual.bib
-
-dvi: $(NAME).dvi
-
-$(NAME).dvi: $(FILES) isabelle.eps
- $(LATEX) $(NAME)
- $(BIBTEX) $(NAME)
- $(LATEX) $(NAME)
- $(LATEX) $(NAME)
- $(SEDINDEX) $(NAME)
- $(LATEX) $(NAME)
-
-pdf: $(NAME).pdf
-
-$(NAME).pdf: $(FILES) isabelle.pdf
- $(PDFLATEX) $(NAME)
- $(BIBTEX) $(NAME)
- $(PDFLATEX) $(NAME)
- $(PDFLATEX) $(NAME)
- $(SEDINDEX) $(NAME)
- $(FIXBOOKMARKS) $(NAME).out
- $(PDFLATEX) $(NAME)
--- a/doc-src/Ref/classical.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,224 +0,0 @@
-
-\chapter{The Classical Reasoner}\label{chap:classical}
-\index{classical reasoner|(}
-\newcommand\ainfer[2]{\begin{array}{r@{\,}l}#2\\ \hline#1\end{array}}
-
-\section{Classical rule sets}
-\index{classical sets}
-
-For elimination and destruction rules there are variants of the add operations
-adding a rule in a way such that it is applied only if also its second premise
-can be unified with an assumption of the current proof state:
-\indexbold{*addSE2}\indexbold{*addSD2}\indexbold{*addE2}\indexbold{*addD2}
-\begin{ttbox}
-addSE2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
-addSD2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
-addE2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
-addD2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
-\end{ttbox}
-\begin{warn}
- A rule to be added in this special way must be given a name, which is used
- to delete it again -- when desired -- using \texttt{delSWrappers} or
- \texttt{delWrappers}, respectively. This is because these add operations
- are implemented as wrappers (see \ref{sec:modifying-search} below).
-\end{warn}
-
-
-\subsection{Modifying the search step}
-\label{sec:modifying-search}
-For a given classical set, the proof strategy is simple. Perform as many safe
-inferences as possible; or else, apply certain safe rules, allowing
-instantiation of unknowns; or else, apply an unsafe rule. The tactics also
-eliminate assumptions of the form $x=t$ by substitution if they have been set
-up to do so (see \texttt{hyp_subst_tacs} in~{\S}\ref{sec:classical-setup} below).
-They may perform a form of Modus Ponens: if there are assumptions $P\imp Q$
-and~$P$, then replace $P\imp Q$ by~$Q$.
-
-The classical reasoning tactics --- except \texttt{blast_tac}! --- allow
-you to modify this basic proof strategy by applying two lists of arbitrary
-{\bf wrapper tacticals} to it.
-The first wrapper list, which is considered to contain safe wrappers only,
-affects \ttindex{safe_step_tac} and all the tactics that call it.
-The second one, which may contain unsafe wrappers, affects the unsafe parts
-of \ttindex{step_tac}, \ttindex{slow_step_tac}, and the tactics that call them.
-A wrapper transforms each step of the search, for example
-by attempting other tactics before or after the original step tactic.
-All members of a wrapper list are applied in turn to the respective step tactic.
-
-Initially the two wrapper lists are empty, which means no modification of the
-step tactics. Safe and unsafe wrappers are added to a claset
-with the functions given below, supplying them with wrapper names.
-These names may be used to selectively delete wrappers.
-
-\begin{ttbox}
-type wrapper = (int -> tactic) -> (int -> tactic);
-
-addSWrapper : claset * (string * wrapper ) -> claset \hfill{\bf infix 4}
-addSbefore : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
-addSafter : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
-delSWrapper : claset * string -> claset \hfill{\bf infix 4}
-
-addWrapper : claset * (string * wrapper ) -> claset \hfill{\bf infix 4}
-addbefore : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
-addafter : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
-delWrapper : claset * string -> claset \hfill{\bf infix 4}
-
-addSss : claset * simpset -> claset \hfill{\bf infix 4}
-addss : claset * simpset -> claset \hfill{\bf infix 4}
-\end{ttbox}
-%
-
-\begin{ttdescription}
-\item[$cs$ addSWrapper $(name,wrapper)$] \indexbold{*addSWrapper}
-adds a new wrapper, which should yield a safe tactic,
-to modify the existing safe step tactic.
-
-\item[$cs$ addSbefore $(name,tac)$] \indexbold{*addSbefore}
-adds the given tactic as a safe wrapper, such that it is tried
-{\em before} each safe step of the search.
-
-\item[$cs$ addSafter $(name,tac)$] \indexbold{*addSafter}
-adds the given tactic as a safe wrapper, such that it is tried
-when a safe step of the search would fail.
-
-\item[$cs$ delSWrapper $name$] \indexbold{*delSWrapper}
-deletes the safe wrapper with the given name.
-
-\item[$cs$ addWrapper $(name,wrapper)$] \indexbold{*addWrapper}
-adds a new wrapper to modify the existing (unsafe) step tactic.
-
-\item[$cs$ addbefore $(name,tac)$] \indexbold{*addbefore}
-adds the given tactic as an unsafe wrapper, such that it its result is
-concatenated {\em before} the result of each unsafe step.
-
-\item[$cs$ addafter $(name,tac)$] \indexbold{*addafter}
-adds the given tactic as an unsafe wrapper, such that it its result is
-concatenated {\em after} the result of each unsafe step.
-
-\item[$cs$ delWrapper $name$] \indexbold{*delWrapper}
-deletes the unsafe wrapper with the given name.
-
-\item[$cs$ addSss $ss$] \indexbold{*addss}
-adds the simpset~$ss$ to the classical set. The assumptions and goal will be
-simplified, in a rather safe way, after each safe step of the search.
-
-\item[$cs$ addss $ss$] \indexbold{*addss}
-adds the simpset~$ss$ to the classical set. The assumptions and goal will be
-simplified, before the each unsafe step of the search.
-
-\end{ttdescription}
-
-\index{simplification!from classical reasoner}
-Strictly speaking, the operators \texttt{addss} and \texttt{addSss}
-are not part of the classical reasoner.
-, which are used as primitives
-for the automatic tactics described in {\S}\ref{sec:automatic-tactics}, are
-implemented as wrapper tacticals.
-they
-\begin{warn}
-Being defined as wrappers, these operators are inappropriate for adding more
-than one simpset at a time: the simpset added last overwrites any earlier ones.
-When a simpset combined with a claset is to be augmented, this should done
-{\em before} combining it with the claset.
-\end{warn}
-
-
-\section{The classical tactics}
-
-\subsection{Other classical tactics}
-\begin{ttbox}
-slow_best_tac : claset -> int -> tactic
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{slow_best_tac} $cs$ $i$] applies \texttt{slow_step_tac} with
-best-first search to prove subgoal~$i$.
-\end{ttdescription}
-
-
-\subsection{Other useful tactics}
-\index{tactics!for contradiction}
-\index{tactics!for Modus Ponens}
-\begin{ttbox}
-contr_tac : int -> tactic
-mp_tac : int -> tactic
-eq_mp_tac : int -> tactic
-swap_res_tac : thm list -> int -> tactic
-\end{ttbox}
-These can be used in the body of a specialized search.
-\begin{ttdescription}
-\item[\ttindexbold{contr_tac} {\it i}]\index{assumptions!contradictory}
- solves subgoal~$i$ by detecting a contradiction among two assumptions of
- the form $P$ and~$\neg P$, or fail. It may instantiate unknowns. The
- tactic can produce multiple outcomes, enumerating all possible
- contradictions.
-
-\item[\ttindexbold{mp_tac} {\it i}]
-is like \texttt{contr_tac}, but also attempts to perform Modus Ponens in
-subgoal~$i$. If there are assumptions $P\imp Q$ and~$P$, then it replaces
-$P\imp Q$ by~$Q$. It may instantiate unknowns. It fails if it can do
-nothing.
-
-\item[\ttindexbold{eq_mp_tac} {\it i}]
-is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
-is safe.
-
-\item[\ttindexbold{swap_res_tac} {\it thms} {\it i}] refines subgoal~$i$ of
-the proof state using {\it thms}, which should be a list of introduction
-rules. First, it attempts to prove the goal using \texttt{assume_tac} or
-\texttt{contr_tac}. It then attempts to apply each rule in turn, attempting
-resolution and also elim-resolution with the swapped form.
-\end{ttdescription}
-
-
-\section{Setting up the classical reasoner}\label{sec:classical-setup}
-\index{classical reasoner!setting up}
-Isabelle's classical object-logics, including \texttt{FOL} and \texttt{HOL},
-have the classical reasoner already set up.
-When defining a new classical logic, you should set up the reasoner yourself.
-It consists of the \ML{} functor \ttindex{ClassicalFun}, which takes the
-argument signature \texttt{CLASSICAL_DATA}:
-\begin{ttbox}
-signature CLASSICAL_DATA =
- sig
- val mp : thm
- val not_elim : thm
- val swap : thm
- val sizef : thm -> int
- val hyp_subst_tacs : (int -> tactic) list
- end;
-\end{ttbox}
-Thus, the functor requires the following items:
-\begin{ttdescription}
-\item[\tdxbold{mp}] should be the Modus Ponens rule
-$\List{\Var{P}\imp\Var{Q};\; \Var{P}} \Imp \Var{Q}$.
-
-\item[\tdxbold{not_elim}] should be the contradiction rule
-$\List{\neg\Var{P};\; \Var{P}} \Imp \Var{R}$.
-
-\item[\tdxbold{swap}] should be the swap rule
-$\List{\neg \Var{P}; \; \neg \Var{R}\Imp \Var{P}} \Imp \Var{R}$.
-
-\item[\ttindexbold{sizef}] is the heuristic function used for best-first
-search. It should estimate the size of the remaining subgoals. A good
-heuristic function is \ttindex{size_of_thm}, which measures the size of the
-proof state. Another size function might ignore certain subgoals (say,
-those concerned with type-checking). A heuristic function might simply
-count the subgoals.
-
-\item[\ttindexbold{hyp_subst_tacs}] is a list of tactics for substitution in
-the hypotheses, typically created by \ttindex{HypsubstFun} (see
-Chapter~\ref{substitution}). This list can, of course, be empty. The
-tactics are assumed to be safe!
-\end{ttdescription}
-The functor is not at all sensitive to the formalization of the
-object-logic. It does not even examine the rules, but merely applies
-them according to its fixed strategy. The functor resides in {\tt
- Provers/classical.ML} in the Isabelle sources.
-
-\index{classical reasoner|)}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/build Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+set -e
+
+FORMAT="$1"
+VARIANT="$2"
+
+"$ISABELLE_TOOL" logo -o isabelle.pdf ""
+"$ISABELLE_TOOL" logo -o isabelle.eps ""
+
+cp "$ISABELLE_HOME/doc-src/iman.sty" .
+cp "$ISABELLE_HOME/doc-src/extra.sty" .
+cp "$ISABELLE_HOME/doc-src/ttbox.sty" .
+cp "$ISABELLE_HOME/doc-src/proof.sty" .
+cp "$ISABELLE_HOME/doc-src/manual.bib" .
+
+"$ISABELLE_TOOL" latex -o sty
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_TOOL" latex -o bbl
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_HOME/doc-src/sedindex" root
+[ -f root.out ] && "$ISABELLE_HOME/doc-src/fixbookmarks" root.out
+"$ISABELLE_TOOL" latex -o "$FORMAT"
+"$ISABELLE_TOOL" latex -o "$FORMAT"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/classical.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,224 @@
+
+\chapter{The Classical Reasoner}\label{chap:classical}
+\index{classical reasoner|(}
+\newcommand\ainfer[2]{\begin{array}{r@{\,}l}#2\\ \hline#1\end{array}}
+
+\section{Classical rule sets}
+\index{classical sets}
+
+For elimination and destruction rules there are variants of the add operations
+adding a rule in a way such that it is applied only if also its second premise
+can be unified with an assumption of the current proof state:
+\indexbold{*addSE2}\indexbold{*addSD2}\indexbold{*addE2}\indexbold{*addD2}
+\begin{ttbox}
+addSE2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
+addSD2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
+addE2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
+addD2 : claset * (string * thm) -> claset \hfill{\bf infix 4}
+\end{ttbox}
+\begin{warn}
+ A rule to be added in this special way must be given a name, which is used
+ to delete it again -- when desired -- using \texttt{delSWrappers} or
+ \texttt{delWrappers}, respectively. This is because these add operations
+ are implemented as wrappers (see \ref{sec:modifying-search} below).
+\end{warn}
+
+
+\subsection{Modifying the search step}
+\label{sec:modifying-search}
+For a given classical set, the proof strategy is simple. Perform as many safe
+inferences as possible; or else, apply certain safe rules, allowing
+instantiation of unknowns; or else, apply an unsafe rule. The tactics also
+eliminate assumptions of the form $x=t$ by substitution if they have been set
+up to do so (see \texttt{hyp_subst_tacs} in~{\S}\ref{sec:classical-setup} below).
+They may perform a form of Modus Ponens: if there are assumptions $P\imp Q$
+and~$P$, then replace $P\imp Q$ by~$Q$.
+
+The classical reasoning tactics --- except \texttt{blast_tac}! --- allow
+you to modify this basic proof strategy by applying two lists of arbitrary
+{\bf wrapper tacticals} to it.
+The first wrapper list, which is considered to contain safe wrappers only,
+affects \ttindex{safe_step_tac} and all the tactics that call it.
+The second one, which may contain unsafe wrappers, affects the unsafe parts
+of \ttindex{step_tac}, \ttindex{slow_step_tac}, and the tactics that call them.
+A wrapper transforms each step of the search, for example
+by attempting other tactics before or after the original step tactic.
+All members of a wrapper list are applied in turn to the respective step tactic.
+
+Initially the two wrapper lists are empty, which means no modification of the
+step tactics. Safe and unsafe wrappers are added to a claset
+with the functions given below, supplying them with wrapper names.
+These names may be used to selectively delete wrappers.
+
+\begin{ttbox}
+type wrapper = (int -> tactic) -> (int -> tactic);
+
+addSWrapper : claset * (string * wrapper ) -> claset \hfill{\bf infix 4}
+addSbefore : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
+addSafter : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
+delSWrapper : claset * string -> claset \hfill{\bf infix 4}
+
+addWrapper : claset * (string * wrapper ) -> claset \hfill{\bf infix 4}
+addbefore : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
+addafter : claset * (string * (int -> tactic)) -> claset \hfill{\bf infix 4}
+delWrapper : claset * string -> claset \hfill{\bf infix 4}
+
+addSss : claset * simpset -> claset \hfill{\bf infix 4}
+addss : claset * simpset -> claset \hfill{\bf infix 4}
+\end{ttbox}
+%
+
+\begin{ttdescription}
+\item[$cs$ addSWrapper $(name,wrapper)$] \indexbold{*addSWrapper}
+adds a new wrapper, which should yield a safe tactic,
+to modify the existing safe step tactic.
+
+\item[$cs$ addSbefore $(name,tac)$] \indexbold{*addSbefore}
+adds the given tactic as a safe wrapper, such that it is tried
+{\em before} each safe step of the search.
+
+\item[$cs$ addSafter $(name,tac)$] \indexbold{*addSafter}
+adds the given tactic as a safe wrapper, such that it is tried
+when a safe step of the search would fail.
+
+\item[$cs$ delSWrapper $name$] \indexbold{*delSWrapper}
+deletes the safe wrapper with the given name.
+
+\item[$cs$ addWrapper $(name,wrapper)$] \indexbold{*addWrapper}
+adds a new wrapper to modify the existing (unsafe) step tactic.
+
+\item[$cs$ addbefore $(name,tac)$] \indexbold{*addbefore}
+adds the given tactic as an unsafe wrapper, such that it its result is
+concatenated {\em before} the result of each unsafe step.
+
+\item[$cs$ addafter $(name,tac)$] \indexbold{*addafter}
+adds the given tactic as an unsafe wrapper, such that it its result is
+concatenated {\em after} the result of each unsafe step.
+
+\item[$cs$ delWrapper $name$] \indexbold{*delWrapper}
+deletes the unsafe wrapper with the given name.
+
+\item[$cs$ addSss $ss$] \indexbold{*addss}
+adds the simpset~$ss$ to the classical set. The assumptions and goal will be
+simplified, in a rather safe way, after each safe step of the search.
+
+\item[$cs$ addss $ss$] \indexbold{*addss}
+adds the simpset~$ss$ to the classical set. The assumptions and goal will be
+simplified, before the each unsafe step of the search.
+
+\end{ttdescription}
+
+\index{simplification!from classical reasoner}
+Strictly speaking, the operators \texttt{addss} and \texttt{addSss}
+are not part of the classical reasoner.
+, which are used as primitives
+for the automatic tactics described in {\S}\ref{sec:automatic-tactics}, are
+implemented as wrapper tacticals.
+they
+\begin{warn}
+Being defined as wrappers, these operators are inappropriate for adding more
+than one simpset at a time: the simpset added last overwrites any earlier ones.
+When a simpset combined with a claset is to be augmented, this should done
+{\em before} combining it with the claset.
+\end{warn}
+
+
+\section{The classical tactics}
+
+\subsection{Other classical tactics}
+\begin{ttbox}
+slow_best_tac : claset -> int -> tactic
+\end{ttbox}
+
+\begin{ttdescription}
+\item[\ttindexbold{slow_best_tac} $cs$ $i$] applies \texttt{slow_step_tac} with
+best-first search to prove subgoal~$i$.
+\end{ttdescription}
+
+
+\subsection{Other useful tactics}
+\index{tactics!for contradiction}
+\index{tactics!for Modus Ponens}
+\begin{ttbox}
+contr_tac : int -> tactic
+mp_tac : int -> tactic
+eq_mp_tac : int -> tactic
+swap_res_tac : thm list -> int -> tactic
+\end{ttbox}
+These can be used in the body of a specialized search.
+\begin{ttdescription}
+\item[\ttindexbold{contr_tac} {\it i}]\index{assumptions!contradictory}
+ solves subgoal~$i$ by detecting a contradiction among two assumptions of
+ the form $P$ and~$\neg P$, or fail. It may instantiate unknowns. The
+ tactic can produce multiple outcomes, enumerating all possible
+ contradictions.
+
+\item[\ttindexbold{mp_tac} {\it i}]
+is like \texttt{contr_tac}, but also attempts to perform Modus Ponens in
+subgoal~$i$. If there are assumptions $P\imp Q$ and~$P$, then it replaces
+$P\imp Q$ by~$Q$. It may instantiate unknowns. It fails if it can do
+nothing.
+
+\item[\ttindexbold{eq_mp_tac} {\it i}]
+is like \texttt{mp_tac} {\it i}, but may not instantiate unknowns --- thus, it
+is safe.
+
+\item[\ttindexbold{swap_res_tac} {\it thms} {\it i}] refines subgoal~$i$ of
+the proof state using {\it thms}, which should be a list of introduction
+rules. First, it attempts to prove the goal using \texttt{assume_tac} or
+\texttt{contr_tac}. It then attempts to apply each rule in turn, attempting
+resolution and also elim-resolution with the swapped form.
+\end{ttdescription}
+
+
+\section{Setting up the classical reasoner}\label{sec:classical-setup}
+\index{classical reasoner!setting up}
+Isabelle's classical object-logics, including \texttt{FOL} and \texttt{HOL},
+have the classical reasoner already set up.
+When defining a new classical logic, you should set up the reasoner yourself.
+It consists of the \ML{} functor \ttindex{ClassicalFun}, which takes the
+argument signature \texttt{CLASSICAL_DATA}:
+\begin{ttbox}
+signature CLASSICAL_DATA =
+ sig
+ val mp : thm
+ val not_elim : thm
+ val swap : thm
+ val sizef : thm -> int
+ val hyp_subst_tacs : (int -> tactic) list
+ end;
+\end{ttbox}
+Thus, the functor requires the following items:
+\begin{ttdescription}
+\item[\tdxbold{mp}] should be the Modus Ponens rule
+$\List{\Var{P}\imp\Var{Q};\; \Var{P}} \Imp \Var{Q}$.
+
+\item[\tdxbold{not_elim}] should be the contradiction rule
+$\List{\neg\Var{P};\; \Var{P}} \Imp \Var{R}$.
+
+\item[\tdxbold{swap}] should be the swap rule
+$\List{\neg \Var{P}; \; \neg \Var{R}\Imp \Var{P}} \Imp \Var{R}$.
+
+\item[\ttindexbold{sizef}] is the heuristic function used for best-first
+search. It should estimate the size of the remaining subgoals. A good
+heuristic function is \ttindex{size_of_thm}, which measures the size of the
+proof state. Another size function might ignore certain subgoals (say,
+those concerned with type-checking). A heuristic function might simply
+count the subgoals.
+
+\item[\ttindexbold{hyp_subst_tacs}] is a list of tactics for substitution in
+the hypotheses, typically created by \ttindex{HypsubstFun} (see
+Chapter~\ref{substitution}). This list can, of course, be empty. The
+tactics are assumed to be safe!
+\end{ttdescription}
+The functor is not at all sensitive to the formalization of the
+object-logic. It does not even examine the rules, but merely applies
+them according to its fixed strategy. The functor resides in {\tt
+ Provers/classical.ML} in the Isabelle sources.
+
+\index{classical reasoner|)}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "ref"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/root.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,67 @@
+\documentclass[12pt,a4paper]{report}
+\usepackage{graphicx,iman,extra,ttbox,proof,pdfsetup}
+
+%%\includeonly{}
+%%% to index ids: \[\\tt \([a-zA-Z0-9][a-zA-Z0-9_'.]*\) [\\ttindexbold{\1}
+%%% to delete old ones: \\indexbold{\*[^}]*}
+%% run sedindex ref to prepare index file
+%%% needs chapter on Provers/typedsimp.ML?
+\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Isabelle Reference Manual}
+
+\author{{\em Lawrence C. Paulson}\\
+ Computer Laboratory \\ University of Cambridge \\
+ \texttt{lcp@cl.cam.ac.uk}\\[3ex]
+ With Contributions by Tobias Nipkow and Markus Wenzel}
+
+\makeindex
+
+\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
+
+\pagestyle{headings}
+\sloppy
+\binperiod %%%treat . like a binary operator
+
+\begin{document}
+\underscoreoff
+
+\index{definitions|see{rewriting, meta-level}}
+\index{rewriting!object-level|see{simplification}}
+\index{meta-rules|see{meta-rules}}
+
+\maketitle
+\emph{Note}: this document is part of the earlier Isabelle
+documentation and is mostly outdated. Fully obsolete parts of the
+original text have already been removed. The remaining material
+covers some aspects that did not make it into the newer manuals yet.
+
+\subsubsection*{Acknowledgements}
+Tobias Nipkow, of T. U. Munich, wrote most of
+ Chapters~\protect\ref{Defining-Logics} and~\protect\ref{chap:simplification}.
+ Markus Wenzel contributed to Chapter~\protect\ref{chap:syntax}.
+ Jeremy Dawson, Sara Kalvala, Martin
+ Simons and others suggested changes
+ and corrections. The research has been funded by the EPSRC (grants
+ GR/G53279, GR/H40570, GR/K57381, GR/K77051, GR/M75440) and by ESPRIT
+ (projects 3245: Logical Frameworks, and 6453: Types), and by the DFG
+ Schwerpunktprogramm \emph{Deduktion}.
+
+\pagenumbering{roman} \tableofcontents \clearfirst
+
+\include{tactic}
+\include{thm}
+\include{syntax}
+\include{substitution}
+\include{simplifier}
+\include{classical}
+
+%%seealso's must be last so that they appear last in the index entries
+\index{meta-rewriting|seealso{tactics, theorems}}
+
+\begingroup
+ \bibliographystyle{plain} \small\raggedright\frenchspacing
+ \bibliography{manual}
+\endgroup
+\include{theory-syntax}
+
+\printindex
+\end{document}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/simplifier.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,1032 @@
+
+\chapter{Simplification}
+\label{chap:simplification}
+\index{simplification|(}
+
+This chapter describes Isabelle's generic simplification package. It performs
+conditional and unconditional rewriting and uses contextual information
+(`local assumptions'). It provides several general hooks, which can provide
+automatic case splits during rewriting, for example. The simplifier is
+already set up for many of Isabelle's logics: FOL, ZF, HOL, HOLCF.
+
+The first section is a quick introduction to the simplifier that
+should be sufficient to get started. The later sections explain more
+advanced features.
+
+
+\section{Simplification for dummies}
+\label{sec:simp-for-dummies}
+
+Basic use of the simplifier is particularly easy because each theory
+is equipped with sensible default information controlling the rewrite
+process --- namely the implicit {\em current
+ simpset}\index{simpset!current}. A suite of simple commands is
+provided that refer to the implicit simpset of the current theory
+context.
+
+\begin{warn}
+ Make sure that you are working within the correct theory context.
+ Executing proofs interactively, or loading them from ML files
+ without associated theories may require setting the current theory
+ manually via the \ttindex{context} command.
+\end{warn}
+
+\subsection{Simplification tactics} \label{sec:simp-for-dummies-tacs}
+\begin{ttbox}
+Simp_tac : int -> tactic
+Asm_simp_tac : int -> tactic
+Full_simp_tac : int -> tactic
+Asm_full_simp_tac : int -> tactic
+trace_simp : bool ref \hfill{\bf initially false}
+debug_simp : bool ref \hfill{\bf initially false}
+\end{ttbox}
+
+\begin{ttdescription}
+\item[\ttindexbold{Simp_tac} $i$] simplifies subgoal~$i$ using the
+ current simpset. It may solve the subgoal completely if it has
+ become trivial, using the simpset's solver tactic.
+
+\item[\ttindexbold{Asm_simp_tac}]\index{assumptions!in simplification}
+ is like \verb$Simp_tac$, but extracts additional rewrite rules from
+ the local assumptions.
+
+\item[\ttindexbold{Full_simp_tac}] is like \verb$Simp_tac$, but also
+ simplifies the assumptions (without using the assumptions to
+ simplify each other or the actual goal).
+
+\item[\ttindexbold{Asm_full_simp_tac}] is like \verb$Asm_simp_tac$,
+ but also simplifies the assumptions. In particular, assumptions can
+ simplify each other.
+\footnote{\texttt{Asm_full_simp_tac} used to process the assumptions from
+ left to right. For backwards compatibilty reasons only there is now
+ \texttt{Asm_lr_simp_tac} that behaves like the old \texttt{Asm_full_simp_tac}.}
+\item[set \ttindexbold{trace_simp};] makes the simplifier output internal
+ operations. This includes rewrite steps, but also bookkeeping like
+ modifications of the simpset.
+\item[set \ttindexbold{debug_simp};] makes the simplifier output some extra
+ information about internal operations. This includes any attempted
+ invocation of simplification procedures.
+\end{ttdescription}
+
+\medskip
+
+As an example, consider the theory of arithmetic in HOL. The (rather trivial)
+goal $0 + (x + 0) = x + 0 + 0$ can be solved by a single call of
+\texttt{Simp_tac} as follows:
+\begin{ttbox}
+context Arith.thy;
+Goal "0 + (x + 0) = x + 0 + 0";
+{\out 1. 0 + (x + 0) = x + 0 + 0}
+by (Simp_tac 1);
+{\out Level 1}
+{\out 0 + (x + 0) = x + 0 + 0}
+{\out No subgoals!}
+\end{ttbox}
+
+The simplifier uses the current simpset of \texttt{Arith.thy}, which
+contains suitable theorems like $\Var{n}+0 = \Var{n}$ and $0+\Var{n} =
+\Var{n}$.
+
+\medskip In many cases, assumptions of a subgoal are also needed in
+the simplification process. For example, \texttt{x = 0 ==> x + x = 0}
+is solved by \texttt{Asm_simp_tac} as follows:
+\begin{ttbox}
+{\out 1. x = 0 ==> x + x = 0}
+by (Asm_simp_tac 1);
+\end{ttbox}
+
+\medskip \texttt{Asm_full_simp_tac} is the most powerful of this quartet
+of tactics but may also loop where some of the others terminate. For
+example,
+\begin{ttbox}
+{\out 1. ALL x. f x = g (f (g x)) ==> f 0 = f 0 + 0}
+\end{ttbox}
+is solved by \texttt{Simp_tac}, but \texttt{Asm_simp_tac} and {\tt
+ Asm_full_simp_tac} loop because the rewrite rule $f\,\Var{x} =
+g\,(f\,(g\,\Var{x}))$ extracted from the assumption does not
+terminate. Isabelle notices certain simple forms of nontermination,
+but not this one. Because assumptions may simplify each other, there can be
+very subtle cases of nontermination. For example, invoking
+{\tt Asm_full_simp_tac} on
+\begin{ttbox}
+{\out 1. [| P (f x); y = x; f x = f y |] ==> Q}
+\end{ttbox}
+gives rise to the infinite reduction sequence
+\[
+P\,(f\,x) \stackrel{f\,x = f\,y}{\longmapsto} P\,(f\,y) \stackrel{y = x}{\longmapsto}
+P\,(f\,x) \stackrel{f\,x = f\,y}{\longmapsto} \cdots
+\]
+whereas applying the same tactic to
+\begin{ttbox}
+{\out 1. [| y = x; f x = f y; P (f x) |] ==> Q}
+\end{ttbox}
+terminates.
+
+\medskip
+
+Using the simplifier effectively may take a bit of experimentation.
+Set the \verb$trace_simp$\index{tracing!of simplification} flag to get
+a better idea of what is going on. The resulting output can be
+enormous, especially since invocations of the simplifier are often
+nested (e.g.\ when solving conditions of rewrite rules).
+
+
+\subsection{Modifying the current simpset}
+\begin{ttbox}
+Addsimps : thm list -> unit
+Delsimps : thm list -> unit
+Addsimprocs : simproc list -> unit
+Delsimprocs : simproc list -> unit
+Addcongs : thm list -> unit
+Delcongs : thm list -> unit
+Addsplits : thm list -> unit
+Delsplits : thm list -> unit
+\end{ttbox}
+
+Depending on the theory context, the \texttt{Add} and \texttt{Del}
+functions manipulate basic components of the associated current
+simpset. Internally, all rewrite rules have to be expressed as
+(conditional) meta-equalities. This form is derived automatically
+from object-level equations that are supplied by the user. Another
+source of rewrite rules are \emph{simplification procedures}, that is
+\ML\ functions that produce suitable theorems on demand, depending on
+the current redex. Congruences are a more advanced feature; see
+{\S}\ref{sec:simp-congs}.
+
+\begin{ttdescription}
+
+\item[\ttindexbold{Addsimps} $thms$;] adds rewrite rules derived from
+ $thms$ to the current simpset.
+
+\item[\ttindexbold{Delsimps} $thms$;] deletes rewrite rules derived
+ from $thms$ from the current simpset.
+
+\item[\ttindexbold{Addsimprocs} $procs$;] adds simplification
+ procedures $procs$ to the current simpset.
+
+\item[\ttindexbold{Delsimprocs} $procs$;] deletes simplification
+ procedures $procs$ from the current simpset.
+
+\item[\ttindexbold{Addcongs} $thms$;] adds congruence rules to the
+ current simpset.
+
+\item[\ttindexbold{Delcongs} $thms$;] deletes congruence rules from the
+ current simpset.
+
+\item[\ttindexbold{Addsplits} $thms$;] adds splitting rules to the
+ current simpset.
+
+\item[\ttindexbold{Delsplits} $thms$;] deletes splitting rules from the
+ current simpset.
+
+\end{ttdescription}
+
+When a new theory is built, its implicit simpset is initialized by the union
+of the respective simpsets of its parent theories. In addition, certain
+theory definition constructs (e.g.\ \ttindex{datatype} and \ttindex{primrec}
+in HOL) implicitly augment the current simpset. Ordinary definitions are not
+added automatically!
+
+It is up the user to manipulate the current simpset further by
+explicitly adding or deleting theorems and simplification procedures.
+
+\medskip
+
+Good simpsets are hard to design. Rules that obviously simplify,
+like $\Var{n}+0 = \Var{n}$, should be added to the current simpset right after
+they have been proved. More specific ones (such as distributive laws, which
+duplicate subterms) should be added only for specific proofs and deleted
+afterwards. Conversely, sometimes a rule needs
+to be removed for a certain proof and restored afterwards. The need of
+frequent additions or deletions may indicate a badly designed
+simpset.
+
+\begin{warn}
+ The union of the parent simpsets (as described above) is not always
+ a good starting point for the new theory. If some ancestors have
+ deleted simplification rules because they are no longer wanted,
+ while others have left those rules in, then the union will contain
+ the unwanted rules. After this union is formed, changes to
+ a parent simpset have no effect on the child simpset.
+\end{warn}
+
+
+\section{Simplification sets}\index{simplification sets}
+
+The simplifier is controlled by information contained in {\bf
+ simpsets}. These consist of several components, including rewrite
+rules, simplification procedures, congruence rules, and the subgoaler,
+solver and looper tactics. The simplifier should be set up with
+sensible defaults so that most simplifier calls specify only rewrite
+rules or simplification procedures. Experienced users can exploit the
+other components to streamline proofs in more sophisticated manners.
+
+\subsection{Inspecting simpsets}
+\begin{ttbox}
+print_ss : simpset -> unit
+rep_ss : simpset -> \{mss : meta_simpset,
+ subgoal_tac: simpset -> int -> tactic,
+ loop_tacs : (string * (int -> tactic))list,
+ finish_tac : solver list,
+ unsafe_finish_tac : solver list\}
+\end{ttbox}
+\begin{ttdescription}
+
+\item[\ttindexbold{print_ss} $ss$;] displays the printable contents of
+ simpset $ss$. This includes the rewrite rules and congruences in
+ their internal form expressed as meta-equalities. The names of the
+ simplification procedures and the patterns they are invoked on are
+ also shown. The other parts, functions and tactics, are
+ non-printable.
+
+\item[\ttindexbold{rep_ss} $ss$;] decomposes $ss$ as a record of its internal
+ components, namely the meta_simpset, the subgoaler, the loop, and the safe
+ and unsafe solvers.
+
+\end{ttdescription}
+
+
+\subsection{Building simpsets}
+\begin{ttbox}
+empty_ss : simpset
+merge_ss : simpset * simpset -> simpset
+\end{ttbox}
+\begin{ttdescription}
+
+\item[\ttindexbold{empty_ss}] is the empty simpset. This is not very useful
+ under normal circumstances because it doesn't contain suitable tactics
+ (subgoaler etc.). When setting up the simplifier for a particular
+ object-logic, one will typically define a more appropriate ``almost empty''
+ simpset. For example, in HOL this is called \ttindexbold{HOL_basic_ss}.
+
+\item[\ttindexbold{merge_ss} ($ss@1$, $ss@2$)] merges simpsets $ss@1$
+ and $ss@2$ by building the union of their respective rewrite rules,
+ simplification procedures and congruences. The other components
+ (tactics etc.) cannot be merged, though; they are taken from either
+ simpset\footnote{Actually from $ss@1$, but it would unwise to count
+ on that.}.
+
+\end{ttdescription}
+
+
+\subsection{Rewrite rules}
+\begin{ttbox}
+addsimps : simpset * thm list -> simpset \hfill{\bf infix 4}
+delsimps : simpset * thm list -> simpset \hfill{\bf infix 4}
+\end{ttbox}
+
+\index{rewrite rules|(} Rewrite rules are theorems expressing some
+form of equality, for example:
+\begin{eqnarray*}
+ Suc(\Var{m}) + \Var{n} &=& \Var{m} + Suc(\Var{n}) \\
+ \Var{P}\conj\Var{P} &\bimp& \Var{P} \\
+ \Var{A} \un \Var{B} &\equiv& \{x.x\in \Var{A} \disj x\in \Var{B}\}
+\end{eqnarray*}
+Conditional rewrites such as $\Var{m}<\Var{n} \Imp \Var{m}/\Var{n} =
+0$ are also permitted; the conditions can be arbitrary formulas.
+
+Internally, all rewrite rules are translated into meta-equalities,
+theorems with conclusion $lhs \equiv rhs$. Each simpset contains a
+function for extracting equalities from arbitrary theorems. For
+example, $\neg(\Var{x}\in \{\})$ could be turned into $\Var{x}\in \{\}
+\equiv False$. This function can be installed using
+\ttindex{setmksimps} but only the definer of a logic should need to do
+this; see {\S}\ref{sec:setmksimps}. The function processes theorems
+added by \texttt{addsimps} as well as local assumptions.
+
+\begin{ttdescription}
+
+\item[$ss$ \ttindexbold{addsimps} $thms$] adds rewrite rules derived
+ from $thms$ to the simpset $ss$.
+
+\item[$ss$ \ttindexbold{delsimps} $thms$] deletes rewrite rules
+ derived from $thms$ from the simpset $ss$.
+
+\end{ttdescription}
+
+\begin{warn}
+ The simplifier will accept all standard rewrite rules: those where
+ all unknowns are of base type. Hence ${\Var{i}+(\Var{j}+\Var{k})} =
+ {(\Var{i}+\Var{j})+\Var{k}}$ is OK.
+
+ It will also deal gracefully with all rules whose left-hand sides
+ are so-called {\em higher-order patterns}~\cite{nipkow-patterns}.
+ \indexbold{higher-order pattern}\indexbold{pattern, higher-order}
+ These are terms in $\beta$-normal form (this will always be the case
+ unless you have done something strange) where each occurrence of an
+ unknown is of the form $\Var{F}(x@1,\dots,x@n)$, where the $x@i$ are
+ distinct bound variables. Hence $(\forall x.\Var{P}(x) \land
+ \Var{Q}(x)) \bimp (\forall x.\Var{P}(x)) \land (\forall
+ x.\Var{Q}(x))$ is also OK, in both directions.
+
+ In some rare cases the rewriter will even deal with quite general
+ rules: for example ${\Var{f}(\Var{x})\in range(\Var{f})} = True$
+ rewrites $g(a) \in range(g)$ to $True$, but will fail to match
+ $g(h(b)) \in range(\lambda x.g(h(x)))$. However, you can replace
+ the offending subterms (in our case $\Var{f}(\Var{x})$, which is not
+ a pattern) by adding new variables and conditions: $\Var{y} =
+ \Var{f}(\Var{x}) \Imp \Var{y}\in range(\Var{f}) = True$ is
+ acceptable as a conditional rewrite rule since conditions can be
+ arbitrary terms.
+
+ There is basically no restriction on the form of the right-hand
+ sides. They may not contain extraneous term or type variables,
+ though.
+\end{warn}
+\index{rewrite rules|)}
+
+
+\subsection{*The subgoaler}\label{sec:simp-subgoaler}
+\begin{ttbox}
+setsubgoaler :
+ simpset * (simpset -> int -> tactic) -> simpset \hfill{\bf infix 4}
+prems_of_ss : simpset -> thm list
+\end{ttbox}
+
+The subgoaler is the tactic used to solve subgoals arising out of
+conditional rewrite rules or congruence rules. The default should be
+simplification itself. Occasionally this strategy needs to be
+changed. For example, if the premise of a conditional rule is an
+instance of its conclusion, as in $Suc(\Var{m}) < \Var{n} \Imp \Var{m}
+< \Var{n}$, the default strategy could loop.
+
+\begin{ttdescription}
+
+\item[$ss$ \ttindexbold{setsubgoaler} $tacf$] sets the subgoaler of
+ $ss$ to $tacf$. The function $tacf$ will be applied to the current
+ simplifier context expressed as a simpset.
+
+\item[\ttindexbold{prems_of_ss} $ss$] retrieves the current set of
+ premises from simplifier context $ss$. This may be non-empty only
+ if the simplifier has been told to utilize local assumptions in the
+ first place, e.g.\ if invoked via \texttt{asm_simp_tac}.
+
+\end{ttdescription}
+
+As an example, consider the following subgoaler:
+\begin{ttbox}
+fun subgoaler ss =
+ assume_tac ORELSE'
+ resolve_tac (prems_of_ss ss) ORELSE'
+ asm_simp_tac ss;
+\end{ttbox}
+This tactic first tries to solve the subgoal by assumption or by
+resolving with with one of the premises, calling simplification only
+if that fails.
+
+
+\subsection{*The solver}\label{sec:simp-solver}
+\begin{ttbox}
+mk_solver : string -> (thm list -> int -> tactic) -> solver
+setSolver : simpset * solver -> simpset \hfill{\bf infix 4}
+addSolver : simpset * solver -> simpset \hfill{\bf infix 4}
+setSSolver : simpset * solver -> simpset \hfill{\bf infix 4}
+addSSolver : simpset * solver -> simpset \hfill{\bf infix 4}
+\end{ttbox}
+
+A solver is a tactic that attempts to solve a subgoal after
+simplification. Typically it just proves trivial subgoals such as
+\texttt{True} and $t=t$. It could use sophisticated means such as {\tt
+ blast_tac}, though that could make simplification expensive.
+To keep things more abstract, solvers are packaged up in type
+\texttt{solver}. The only way to create a solver is via \texttt{mk_solver}.
+
+Rewriting does not instantiate unknowns. For example, rewriting
+cannot prove $a\in \Var{A}$ since this requires
+instantiating~$\Var{A}$. The solver, however, is an arbitrary tactic
+and may instantiate unknowns as it pleases. This is the only way the
+simplifier can handle a conditional rewrite rule whose condition
+contains extra variables. When a simplification tactic is to be
+combined with other provers, especially with the classical reasoner,
+it is important whether it can be considered safe or not. For this
+reason a simpset contains two solvers, a safe and an unsafe one.
+
+The standard simplification strategy solely uses the unsafe solver,
+which is appropriate in most cases. For special applications where
+the simplification process is not allowed to instantiate unknowns
+within the goal, simplification starts with the safe solver, but may
+still apply the ordinary unsafe one in nested simplifications for
+conditional rules or congruences. Note that in this way the overall
+tactic is not totally safe: it may instantiate unknowns that appear also
+in other subgoals.
+
+\begin{ttdescription}
+\item[\ttindexbold{mk_solver} $s$ $tacf$] converts $tacf$ into a new solver;
+ the string $s$ is only attached as a comment and has no other significance.
+
+\item[$ss$ \ttindexbold{setSSolver} $tacf$] installs $tacf$ as the
+ \emph{safe} solver of $ss$.
+
+\item[$ss$ \ttindexbold{addSSolver} $tacf$] adds $tacf$ as an
+ additional \emph{safe} solver; it will be tried after the solvers
+ which had already been present in $ss$.
+
+\item[$ss$ \ttindexbold{setSolver} $tacf$] installs $tacf$ as the
+ unsafe solver of $ss$.
+
+\item[$ss$ \ttindexbold{addSolver} $tacf$] adds $tacf$ as an
+ additional unsafe solver; it will be tried after the solvers which
+ had already been present in $ss$.
+
+\end{ttdescription}
+
+\medskip
+
+\index{assumptions!in simplification} The solver tactic is invoked
+with a list of theorems, namely assumptions that hold in the local
+context. This may be non-empty only if the simplifier has been told
+to utilize local assumptions in the first place, e.g.\ if invoked via
+\texttt{asm_simp_tac}. The solver is also presented the full goal
+including its assumptions in any case. Thus it can use these (e.g.\
+by calling \texttt{assume_tac}), even if the list of premises is not
+passed.
+
+\medskip
+
+As explained in {\S}\ref{sec:simp-subgoaler}, the subgoaler is also used
+to solve the premises of congruence rules. These are usually of the
+form $s = \Var{x}$, where $s$ needs to be simplified and $\Var{x}$
+needs to be instantiated with the result. Typically, the subgoaler
+will invoke the simplifier at some point, which will eventually call
+the solver. For this reason, solver tactics must be prepared to solve
+goals of the form $t = \Var{x}$, usually by reflexivity. In
+particular, reflexivity should be tried before any of the fancy
+tactics like \texttt{blast_tac}.
+
+It may even happen that due to simplification the subgoal is no longer
+an equality. For example $False \bimp \Var{Q}$ could be rewritten to
+$\neg\Var{Q}$. To cover this case, the solver could try resolving
+with the theorem $\neg False$.
+
+\medskip
+
+\begin{warn}
+ If a premise of a congruence rule cannot be proved, then the
+ congruence is ignored. This should only happen if the rule is
+ \emph{conditional} --- that is, contains premises not of the form $t
+ = \Var{x}$; otherwise it indicates that some congruence rule, or
+ possibly the subgoaler or solver, is faulty.
+\end{warn}
+
+
+\subsection{*The looper}\label{sec:simp-looper}
+\begin{ttbox}
+setloop : simpset * (int -> tactic) -> simpset \hfill{\bf infix 4}
+addloop : simpset * (string * (int -> tactic)) -> simpset \hfill{\bf infix 4}
+delloop : simpset * string -> simpset \hfill{\bf infix 4}
+addsplits : simpset * thm list -> simpset \hfill{\bf infix 4}
+delsplits : simpset * thm list -> simpset \hfill{\bf infix 4}
+\end{ttbox}
+
+The looper is a list of tactics that are applied after simplification, in case
+the solver failed to solve the simplified goal. If the looper
+succeeds, the simplification process is started all over again. Each
+of the subgoals generated by the looper is attacked in turn, in
+reverse order.
+
+A typical looper is \index{case splitting}: the expansion of a conditional.
+Another possibility is to apply an elimination rule on the
+assumptions. More adventurous loopers could start an induction.
+
+\begin{ttdescription}
+
+\item[$ss$ \ttindexbold{setloop} $tacf$] installs $tacf$ as the only looper
+ tactic of $ss$.
+
+\item[$ss$ \ttindexbold{addloop} $(name,tacf)$] adds $tacf$ as an additional
+ looper tactic with name $name$; it will be tried after the looper tactics
+ that had already been present in $ss$.
+
+\item[$ss$ \ttindexbold{delloop} $name$] deletes the looper tactic $name$
+ from $ss$.
+
+\item[$ss$ \ttindexbold{addsplits} $thms$] adds
+ split tactics for $thms$ as additional looper tactics of $ss$.
+
+\item[$ss$ \ttindexbold{addsplits} $thms$] deletes the
+ split tactics for $thms$ from the looper tactics of $ss$.
+
+\end{ttdescription}
+
+The splitter replaces applications of a given function; the right-hand side
+of the replacement can be anything. For example, here is a splitting rule
+for conditional expressions:
+\[ \Var{P}(if(\Var{Q},\Var{x},\Var{y})) \bimp (\Var{Q} \imp \Var{P}(\Var{x}))
+\conj (\neg\Var{Q} \imp \Var{P}(\Var{y}))
+\]
+Another example is the elimination operator for Cartesian products (which
+happens to be called~$split$):
+\[ \Var{P}(split(\Var{f},\Var{p})) \bimp (\forall a~b. \Var{p} =
+\langle a,b\rangle \imp \Var{P}(\Var{f}(a,b)))
+\]
+
+For technical reasons, there is a distinction between case splitting in the
+conclusion and in the premises of a subgoal. The former is done by
+\texttt{split_tac} with rules like \texttt{split_if} or \texttt{option.split},
+which do not split the subgoal, while the latter is done by
+\texttt{split_asm_tac} with rules like \texttt{split_if_asm} or
+\texttt{option.split_asm}, which split the subgoal.
+The operator \texttt{addsplits} automatically takes care of which tactic to
+call, analyzing the form of the rules given as argument.
+\begin{warn}
+Due to \texttt{split_asm_tac}, the simplifier may split subgoals!
+\end{warn}
+
+Case splits should be allowed only when necessary; they are expensive
+and hard to control. Here is an example of use, where \texttt{split_if}
+is the first rule above:
+\begin{ttbox}
+by (simp_tac (simpset()
+ addloop ("split if", split_tac [split_if])) 1);
+\end{ttbox}
+Users would usually prefer the following shortcut using \texttt{addsplits}:
+\begin{ttbox}
+by (simp_tac (simpset() addsplits [split_if]) 1);
+\end{ttbox}
+Case-splitting on conditional expressions is usually beneficial, so it is
+enabled by default in the object-logics \texttt{HOL} and \texttt{FOL}.
+
+
+\section{The simplification tactics}\label{simp-tactics}
+\index{simplification!tactics}\index{tactics!simplification}
+\begin{ttbox}
+generic_simp_tac : bool -> bool * bool * bool ->
+ simpset -> int -> tactic
+simp_tac : simpset -> int -> tactic
+asm_simp_tac : simpset -> int -> tactic
+full_simp_tac : simpset -> int -> tactic
+asm_full_simp_tac : simpset -> int -> tactic
+safe_asm_full_simp_tac : simpset -> int -> tactic
+\end{ttbox}
+
+\texttt{generic_simp_tac} is the basic tactic that is underlying any actual
+simplification work. The others are just instantiations of it. The rewriting
+strategy is always strictly bottom up, except for congruence rules,
+which are applied while descending into a term. Conditions in conditional
+rewrite rules are solved recursively before the rewrite rule is applied.
+
+\begin{ttdescription}
+
+\item[\ttindexbold{generic_simp_tac} $safe$ ($simp\_asm$, $use\_asm$, $mutual$)]
+ gives direct access to the various simplification modes:
+ \begin{itemize}
+ \item if $safe$ is {\tt true}, the safe solver is used as explained in
+ {\S}\ref{sec:simp-solver},
+ \item $simp\_asm$ determines whether the local assumptions are simplified,
+ \item $use\_asm$ determines whether the assumptions are used as local rewrite
+ rules, and
+ \item $mutual$ determines whether assumptions can simplify each other rather
+ than being processed from left to right.
+ \end{itemize}
+ This generic interface is intended
+ for building special tools, e.g.\ for combining the simplifier with the
+ classical reasoner. It is rarely used directly.
+
+\item[\ttindexbold{simp_tac}, \ttindexbold{asm_simp_tac},
+ \ttindexbold{full_simp_tac}, \ttindexbold{asm_full_simp_tac}] are
+ the basic simplification tactics that work exactly like their
+ namesakes in {\S}\ref{sec:simp-for-dummies}, except that they are
+ explicitly supplied with a simpset.
+
+\end{ttdescription}
+
+\medskip
+
+Local modifications of simpsets within a proof are often much cleaner
+by using above tactics in conjunction with explicit simpsets, rather
+than their capitalized counterparts. For example
+\begin{ttbox}
+Addsimps \(thms\);
+by (Simp_tac \(i\));
+Delsimps \(thms\);
+\end{ttbox}
+can be expressed more appropriately as
+\begin{ttbox}
+by (simp_tac (simpset() addsimps \(thms\)) \(i\));
+\end{ttbox}
+
+\medskip
+
+Also note that functions depending implicitly on the current theory
+context (like capital \texttt{Simp_tac} and the other commands of
+{\S}\ref{sec:simp-for-dummies}) should be considered harmful outside of
+actual proof scripts. In particular, ML programs like theory
+definition packages or special tactics should refer to simpsets only
+explicitly, via the above tactics used in conjunction with
+\texttt{simpset_of} or the \texttt{SIMPSET} tacticals.
+
+
+\section{Forward rules and conversions}
+\index{simplification!forward rules}\index{simplification!conversions}
+\begin{ttbox}\index{*simplify}\index{*asm_simplify}\index{*full_simplify}\index{*asm_full_simplify}\index{*Simplifier.rewrite}\index{*Simplifier.asm_rewrite}\index{*Simplifier.full_rewrite}\index{*Simplifier.asm_full_rewrite}
+simplify : simpset -> thm -> thm
+asm_simplify : simpset -> thm -> thm
+full_simplify : simpset -> thm -> thm
+asm_full_simplify : simpset -> thm -> thm\medskip
+Simplifier.rewrite : simpset -> cterm -> thm
+Simplifier.asm_rewrite : simpset -> cterm -> thm
+Simplifier.full_rewrite : simpset -> cterm -> thm
+Simplifier.asm_full_rewrite : simpset -> cterm -> thm
+\end{ttbox}
+
+The first four of these functions provide \emph{forward} rules for
+simplification. Their effect is analogous to the corresponding
+tactics described in {\S}\ref{simp-tactics}, but affect the whole
+theorem instead of just a certain subgoal. Also note that the
+looper~/ solver process as described in {\S}\ref{sec:simp-looper} and
+{\S}\ref{sec:simp-solver} is omitted in forward simplification.
+
+The latter four are \emph{conversions}, establishing proven equations
+of the form $t \equiv u$ where the l.h.s.\ $t$ has been given as
+argument.
+
+\begin{warn}
+ Forward simplification rules and conversions should be used rarely
+ in ordinary proof scripts. The main intention is to provide an
+ internal interface to the simplifier for special utilities.
+\end{warn}
+
+
+\section{Permutative rewrite rules}
+\index{rewrite rules!permutative|(}
+
+A rewrite rule is {\bf permutative} if the left-hand side and right-hand
+side are the same up to renaming of variables. The most common permutative
+rule is commutativity: $x+y = y+x$. Other examples include $(x-y)-z =
+(x-z)-y$ in arithmetic and $insert(x,insert(y,A)) = insert(y,insert(x,A))$
+for sets. Such rules are common enough to merit special attention.
+
+Because ordinary rewriting loops given such rules, the simplifier
+employs a special strategy, called {\bf ordered
+ rewriting}\index{rewriting!ordered}. There is a standard
+lexicographic ordering on terms. This should be perfectly OK in most
+cases, but can be changed for special applications.
+
+\begin{ttbox}
+settermless : simpset * (term * term -> bool) -> simpset \hfill{\bf infix 4}
+\end{ttbox}
+\begin{ttdescription}
+
+\item[$ss$ \ttindexbold{settermless} $rel$] installs relation $rel$ as
+ term order in simpset $ss$.
+
+\end{ttdescription}
+
+\medskip
+
+A permutative rewrite rule is applied only if it decreases the given
+term with respect to this ordering. For example, commutativity
+rewrites~$b+a$ to $a+b$, but then stops because $a+b$ is strictly less
+than $b+a$. The Boyer-Moore theorem prover~\cite{bm88book} also
+employs ordered rewriting.
+
+Permutative rewrite rules are added to simpsets just like other
+rewrite rules; the simplifier recognizes their special status
+automatically. They are most effective in the case of
+associative-commutative operators. (Associativity by itself is not
+permutative.) When dealing with an AC-operator~$f$, keep the
+following points in mind:
+\begin{itemize}\index{associative-commutative operators}
+
+\item The associative law must always be oriented from left to right,
+ namely $f(f(x,y),z) = f(x,f(y,z))$. The opposite orientation, if
+ used with commutativity, leads to looping in conjunction with the
+ standard term order.
+
+\item To complete your set of rewrite rules, you must add not just
+ associativity~(A) and commutativity~(C) but also a derived rule, {\bf
+ left-com\-mut\-ativ\-ity} (LC): $f(x,f(y,z)) = f(y,f(x,z))$.
+\end{itemize}
+Ordered rewriting with the combination of A, C, and~LC sorts a term
+lexicographically:
+\[\def\maps#1{\stackrel{#1}{\longmapsto}}
+ (b+c)+a \maps{A} b+(c+a) \maps{C} b+(a+c) \maps{LC} a+(b+c) \]
+Martin and Nipkow~\cite{martin-nipkow} discuss the theory and give many
+examples; other algebraic structures are amenable to ordered rewriting,
+such as boolean rings.
+
+\subsection{Example: sums of natural numbers}
+
+This example is again set in HOL (see \texttt{HOL/ex/NatSum}). Theory
+\thydx{Arith} contains natural numbers arithmetic. Its associated simpset
+contains many arithmetic laws including distributivity of~$\times$ over~$+$,
+while \texttt{add_ac} is a list consisting of the A, C and LC laws for~$+$ on
+type \texttt{nat}. Let us prove the theorem
+\[ \sum@{i=1}^n i = n\times(n+1)/2. \]
+%
+A functional~\texttt{sum} represents the summation operator under the
+interpretation $\texttt{sum} \, f \, (n + 1) = \sum@{i=0}^n f\,i$. We
+extend \texttt{Arith} as follows:
+\begin{ttbox}
+NatSum = Arith +
+consts sum :: [nat=>nat, nat] => nat
+primrec
+ "sum f 0 = 0"
+ "sum f (Suc n) = f(n) + sum f n"
+end
+\end{ttbox}
+The \texttt{primrec} declaration automatically adds rewrite rules for
+\texttt{sum} to the default simpset. We now remove the
+\texttt{nat_cancel} simplification procedures (in order not to spoil
+the example) and insert the AC-rules for~$+$:
+\begin{ttbox}
+Delsimprocs nat_cancel;
+Addsimps add_ac;
+\end{ttbox}
+Our desired theorem now reads $\texttt{sum} \, (\lambda i.i) \, (n+1) =
+n\times(n+1)/2$. The Isabelle goal has both sides multiplied by~$2$:
+\begin{ttbox}
+Goal "2 * sum (\%i.i) (Suc n) = n * Suc n";
+{\out Level 0}
+{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
+{\out 1. 2 * sum (\%i. i) (Suc n) = n * Suc n}
+\end{ttbox}
+Induction should not be applied until the goal is in the simplest
+form:
+\begin{ttbox}
+by (Simp_tac 1);
+{\out Level 1}
+{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
+{\out 1. n + (sum (\%i. i) n + sum (\%i. i) n) = n * n}
+\end{ttbox}
+Ordered rewriting has sorted the terms in the left-hand side. The
+subgoal is now ready for induction:
+\begin{ttbox}
+by (induct_tac "n" 1);
+{\out Level 2}
+{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
+{\out 1. 0 + (sum (\%i. i) 0 + sum (\%i. i) 0) = 0 * 0}
+\ttbreak
+{\out 2. !!n. n + (sum (\%i. i) n + sum (\%i. i) n) = n * n}
+{\out ==> Suc n + (sum (\%i. i) (Suc n) + sum (\%i.\,i) (Suc n)) =}
+{\out Suc n * Suc n}
+\end{ttbox}
+Simplification proves both subgoals immediately:\index{*ALLGOALS}
+\begin{ttbox}
+by (ALLGOALS Asm_simp_tac);
+{\out Level 3}
+{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
+{\out No subgoals!}
+\end{ttbox}
+Simplification cannot prove the induction step if we omit \texttt{add_ac} from
+the simpset. Observe that like terms have not been collected:
+\begin{ttbox}
+{\out Level 3}
+{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
+{\out 1. !!n. n + sum (\%i. i) n + (n + sum (\%i. i) n) = n + n * n}
+{\out ==> n + (n + sum (\%i. i) n) + (n + (n + sum (\%i.\,i) n)) =}
+{\out n + (n + (n + n * n))}
+\end{ttbox}
+Ordered rewriting proves this by sorting the left-hand side. Proving
+arithmetic theorems without ordered rewriting requires explicit use of
+commutativity. This is tedious; try it and see!
+
+Ordered rewriting is equally successful in proving
+$\sum@{i=1}^n i^3 = n^2\times(n+1)^2/4$.
+
+
+\subsection{Re-orienting equalities}
+Ordered rewriting with the derived rule \texttt{symmetry} can reverse
+equations:
+\begin{ttbox}
+val symmetry = prove_goal HOL.thy "(x=y) = (y=x)"
+ (fn _ => [Blast_tac 1]);
+\end{ttbox}
+This is frequently useful. Assumptions of the form $s=t$, where $t$ occurs
+in the conclusion but not~$s$, can often be brought into the right form.
+For example, ordered rewriting with \texttt{symmetry} can prove the goal
+\[ f(a)=b \conj f(a)=c \imp b=c. \]
+Here \texttt{symmetry} reverses both $f(a)=b$ and $f(a)=c$
+because $f(a)$ is lexicographically greater than $b$ and~$c$. These
+re-oriented equations, as rewrite rules, replace $b$ and~$c$ in the
+conclusion by~$f(a)$.
+
+Another example is the goal $\neg(t=u) \imp \neg(u=t)$.
+The differing orientations make this appear difficult to prove. Ordered
+rewriting with \texttt{symmetry} makes the equalities agree. (Without
+knowing more about~$t$ and~$u$ we cannot say whether they both go to $t=u$
+or~$u=t$.) Then the simplifier can prove the goal outright.
+
+\index{rewrite rules!permutative|)}
+
+
+\section{*Setting up the Simplifier}\label{sec:setting-up-simp}
+\index{simplification!setting up}
+
+Setting up the simplifier for new logics is complicated in the general case.
+This section describes how the simplifier is installed for intuitionistic
+first-order logic; the code is largely taken from {\tt FOL/simpdata.ML} of the
+Isabelle sources.
+
+The case splitting tactic, which resides on a separate files, is not part of
+Pure Isabelle. It needs to be loaded explicitly by the object-logic as
+follows (below \texttt{\~\relax\~\relax} refers to \texttt{\$ISABELLE_HOME}):
+\begin{ttbox}
+use "\~\relax\~\relax/src/Provers/splitter.ML";
+\end{ttbox}
+
+Simplification requires converting object-equalities to meta-level rewrite
+rules. This demands rules stating that equal terms and equivalent formulae
+are also equal at the meta-level. The rule declaration part of the file
+\texttt{FOL/IFOL.thy} contains the two lines
+\begin{ttbox}\index{*eq_reflection theorem}\index{*iff_reflection theorem}
+eq_reflection "(x=y) ==> (x==y)"
+iff_reflection "(P<->Q) ==> (P==Q)"
+\end{ttbox}
+Of course, you should only assert such rules if they are true for your
+particular logic. In Constructive Type Theory, equality is a ternary
+relation of the form $a=b\in A$; the type~$A$ determines the meaning
+of the equality essentially as a partial equivalence relation. The
+present simplifier cannot be used. Rewriting in \texttt{CTT} uses
+another simplifier, which resides in the file {\tt
+ Provers/typedsimp.ML} and is not documented. Even this does not
+work for later variants of Constructive Type Theory that use
+intensional equality~\cite{nordstrom90}.
+
+
+\subsection{A collection of standard rewrite rules}
+
+We first prove lots of standard rewrite rules about the logical
+connectives. These include cancellation and associative laws. We
+define a function that echoes the desired law and then supplies it the
+prover for intuitionistic FOL:
+\begin{ttbox}
+fun int_prove_fun s =
+ (writeln s;
+ prove_goal IFOL.thy s
+ (fn prems => [ (cut_facts_tac prems 1),
+ (IntPr.fast_tac 1) ]));
+\end{ttbox}
+The following rewrite rules about conjunction are a selection of those
+proved on \texttt{FOL/simpdata.ML}. Later, these will be supplied to the
+standard simpset.
+\begin{ttbox}
+val conj_simps = map int_prove_fun
+ ["P & True <-> P", "True & P <-> P",
+ "P & False <-> False", "False & P <-> False",
+ "P & P <-> P",
+ "P & ~P <-> False", "~P & P <-> False",
+ "(P & Q) & R <-> P & (Q & R)"];
+\end{ttbox}
+The file also proves some distributive laws. As they can cause exponential
+blowup, they will not be included in the standard simpset. Instead they
+are merely bound to an \ML{} identifier, for user reference.
+\begin{ttbox}
+val distrib_simps = map int_prove_fun
+ ["P & (Q | R) <-> P&Q | P&R",
+ "(Q | R) & P <-> Q&P | R&P",
+ "(P | Q --> R) <-> (P --> R) & (Q --> R)"];
+\end{ttbox}
+
+
+\subsection{Functions for preprocessing the rewrite rules}
+\label{sec:setmksimps}
+\begin{ttbox}\indexbold{*setmksimps}
+setmksimps : simpset * (thm -> thm list) -> simpset \hfill{\bf infix 4}
+\end{ttbox}
+The next step is to define the function for preprocessing rewrite rules.
+This will be installed by calling \texttt{setmksimps} below. Preprocessing
+occurs whenever rewrite rules are added, whether by user command or
+automatically. Preprocessing involves extracting atomic rewrites at the
+object-level, then reflecting them to the meta-level.
+
+To start, the function \texttt{gen_all} strips any meta-level
+quantifiers from the front of the given theorem.
+
+The function \texttt{atomize} analyses a theorem in order to extract
+atomic rewrite rules. The head of all the patterns, matched by the
+wildcard~\texttt{_}, is the coercion function \texttt{Trueprop}.
+\begin{ttbox}
+fun atomize th = case concl_of th of
+ _ $ (Const("op &",_) $ _ $ _) => atomize(th RS conjunct1) \at
+ atomize(th RS conjunct2)
+ | _ $ (Const("op -->",_) $ _ $ _) => atomize(th RS mp)
+ | _ $ (Const("All",_) $ _) => atomize(th RS spec)
+ | _ $ (Const("True",_)) => []
+ | _ $ (Const("False",_)) => []
+ | _ => [th];
+\end{ttbox}
+There are several cases, depending upon the form of the conclusion:
+\begin{itemize}
+\item Conjunction: extract rewrites from both conjuncts.
+\item Implication: convert $P\imp Q$ to the meta-implication $P\Imp Q$ and
+ extract rewrites from~$Q$; these will be conditional rewrites with the
+ condition~$P$.
+\item Universal quantification: remove the quantifier, replacing the bound
+ variable by a schematic variable, and extract rewrites from the body.
+\item \texttt{True} and \texttt{False} contain no useful rewrites.
+\item Anything else: return the theorem in a singleton list.
+\end{itemize}
+The resulting theorems are not literally atomic --- they could be
+disjunctive, for example --- but are broken down as much as possible.
+See the file \texttt{ZF/simpdata.ML} for a sophisticated translation of
+set-theoretic formulae into rewrite rules.
+
+For standard situations like the above,
+there is a generic auxiliary function \ttindexbold{mk_atomize} that takes a
+list of pairs $(name, thms)$, where $name$ is an operator name and
+$thms$ is a list of theorems to resolve with in case the pattern matches,
+and returns a suitable \texttt{atomize} function.
+
+
+The simplified rewrites must now be converted into meta-equalities. The
+rule \texttt{eq_reflection} converts equality rewrites, while {\tt
+ iff_reflection} converts if-and-only-if rewrites. The latter possibility
+can arise in two other ways: the negative theorem~$\neg P$ is converted to
+$P\equiv\texttt{False}$, and any other theorem~$P$ is converted to
+$P\equiv\texttt{True}$. The rules \texttt{iff_reflection_F} and {\tt
+ iff_reflection_T} accomplish this conversion.
+\begin{ttbox}
+val P_iff_F = int_prove_fun "~P ==> (P <-> False)";
+val iff_reflection_F = P_iff_F RS iff_reflection;
+\ttbreak
+val P_iff_T = int_prove_fun "P ==> (P <-> True)";
+val iff_reflection_T = P_iff_T RS iff_reflection;
+\end{ttbox}
+The function \texttt{mk_eq} converts a theorem to a meta-equality
+using the case analysis described above.
+\begin{ttbox}
+fun mk_eq th = case concl_of th of
+ _ $ (Const("op =",_)$_$_) => th RS eq_reflection
+ | _ $ (Const("op <->",_)$_$_) => th RS iff_reflection
+ | _ $ (Const("Not",_)$_) => th RS iff_reflection_F
+ | _ => th RS iff_reflection_T;
+\end{ttbox}
+The
+three functions \texttt{gen_all}, \texttt{atomize} and \texttt{mk_eq}
+will be composed together and supplied below to \texttt{setmksimps}.
+
+
+\subsection{Making the initial simpset}
+
+It is time to assemble these items. The list \texttt{IFOL_simps} contains the
+default rewrite rules for intuitionistic first-order logic. The first of
+these is the reflexive law expressed as the equivalence
+$(a=a)\bimp\texttt{True}$; the rewrite rule $a=a$ is clearly useless.
+\begin{ttbox}
+val IFOL_simps =
+ [refl RS P_iff_T] \at conj_simps \at disj_simps \at not_simps \at
+ imp_simps \at iff_simps \at quant_simps;
+\end{ttbox}
+The list \texttt{triv_rls} contains trivial theorems for the solver. Any
+subgoal that is simplified to one of these will be removed.
+\begin{ttbox}
+val notFalseI = int_prove_fun "~False";
+val triv_rls = [TrueI,refl,iff_refl,notFalseI];
+\end{ttbox}
+We also define the function \ttindex{mk_meta_cong} to convert the conclusion
+of congruence rules into meta-equalities.
+\begin{ttbox}
+fun mk_meta_cong rl = standard (mk_meta_eq (mk_meta_prems rl));
+\end{ttbox}
+%
+The basic simpset for intuitionistic FOL is \ttindexbold{FOL_basic_ss}. It
+preprocess rewrites using
+{\tt gen_all}, \texttt{atomize} and \texttt{mk_eq}.
+It solves simplified subgoals using \texttt{triv_rls} and assumptions, and by
+detecting contradictions. It uses \ttindex{asm_simp_tac} to tackle subgoals
+of conditional rewrites.
+
+Other simpsets built from \texttt{FOL_basic_ss} will inherit these items.
+In particular, \ttindexbold{IFOL_ss}, which introduces {\tt
+ IFOL_simps} as rewrite rules. \ttindexbold{FOL_ss} will later
+extend \texttt{IFOL_ss} with classical rewrite rules such as $\neg\neg
+P\bimp P$.
+\index{*setmksimps}\index{*setSSolver}\index{*setSolver}\index{*setsubgoaler}
+\index{*addsimps}\index{*addcongs}
+\begin{ttbox}
+fun unsafe_solver prems = FIRST'[resolve_tac (triv_rls {\at} prems),
+ atac, etac FalseE];
+
+fun safe_solver prems = FIRST'[match_tac (triv_rls {\at} prems),
+ eq_assume_tac, ematch_tac [FalseE]];
+
+val FOL_basic_ss =
+ empty_ss setsubgoaler asm_simp_tac
+ addsimprocs [defALL_regroup, defEX_regroup]
+ setSSolver safe_solver
+ setSolver unsafe_solver
+ setmksimps (map mk_eq o atomize o gen_all)
+ setmkcong mk_meta_cong;
+
+val IFOL_ss =
+ FOL_basic_ss addsimps (IFOL_simps {\at}
+ int_ex_simps {\at} int_all_simps)
+ addcongs [imp_cong];
+\end{ttbox}
+This simpset takes \texttt{imp_cong} as a congruence rule in order to use
+contextual information to simplify the conclusions of implications:
+\[ \List{\Var{P}\bimp\Var{P'};\; \Var{P'} \Imp \Var{Q}\bimp\Var{Q'}} \Imp
+ (\Var{P}\imp\Var{Q}) \bimp (\Var{P'}\imp\Var{Q'})
+\]
+By adding the congruence rule \texttt{conj_cong}, we could obtain a similar
+effect for conjunctions.
+
+
+\index{simplification|)}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "ref"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/substitution.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,217 @@
+
+\chapter{Substitution Tactics} \label{substitution}
+\index{tactics!substitution|(}\index{equality|(}
+
+Replacing equals by equals is a basic form of reasoning. Isabelle supports
+several kinds of equality reasoning. {\bf Substitution} means replacing
+free occurrences of~$t$ by~$u$ in a subgoal. This is easily done, given an
+equality $t=u$, provided the logic possesses the appropriate rule. The
+tactic \texttt{hyp_subst_tac} performs substitution even in the assumptions.
+But it works via object-level implication, and therefore must be specially
+set up for each suitable object-logic.
+
+Substitution should not be confused with object-level {\bf rewriting}.
+Given equalities of the form $t=u$, rewriting replaces instances of~$t$ by
+corresponding instances of~$u$, and continues until it reaches a normal
+form. Substitution handles `one-off' replacements by particular
+equalities while rewriting handles general equations.
+Chapter~\ref{chap:simplification} discusses Isabelle's rewriting tactics.
+
+
+\section{Substitution rules}
+\index{substitution!rules}\index{*subst theorem}
+Many logics include a substitution rule of the form
+$$
+\List{\Var{a}=\Var{b}; \Var{P}(\Var{a})} \Imp
+\Var{P}(\Var{b}) \eqno(subst)
+$$
+In backward proof, this may seem difficult to use: the conclusion
+$\Var{P}(\Var{b})$ admits far too many unifiers. But, if the theorem {\tt
+eqth} asserts $t=u$, then \hbox{\tt eqth RS subst} is the derived rule
+\[ \Var{P}(t) \Imp \Var{P}(u). \]
+Provided $u$ is not an unknown, resolution with this rule is
+well-behaved.\footnote{Unifying $\Var{P}(u)$ with a formula~$Q$
+expresses~$Q$ in terms of its dependence upon~$u$. There are still $2^k$
+unifiers, if $Q$ has $k$ occurrences of~$u$, but Isabelle ensures that
+the first unifier includes all the occurrences.} To replace $u$ by~$t$ in
+subgoal~$i$, use
+\begin{ttbox}
+resolve_tac [eqth RS subst] \(i\){\it.}
+\end{ttbox}
+To replace $t$ by~$u$ in
+subgoal~$i$, use
+\begin{ttbox}
+resolve_tac [eqth RS ssubst] \(i\){\it,}
+\end{ttbox}
+where \tdxbold{ssubst} is the `swapped' substitution rule
+$$
+\List{\Var{a}=\Var{b}; \Var{P}(\Var{b})} \Imp
+\Var{P}(\Var{a}). \eqno(ssubst)
+$$
+If \tdx{sym} denotes the symmetry rule
+\(\Var{a}=\Var{b}\Imp\Var{b}=\Var{a}\), then \texttt{ssubst} is just
+\hbox{\tt sym RS subst}. Many logics with equality include the rules {\tt
+subst} and \texttt{ssubst}, as well as \texttt{refl}, \texttt{sym} and \texttt{trans}
+(for the usual equality laws). Examples include \texttt{FOL} and \texttt{HOL},
+but not \texttt{CTT} (Constructive Type Theory).
+
+Elim-resolution is well-behaved with assumptions of the form $t=u$.
+To replace $u$ by~$t$ or $t$ by~$u$ in subgoal~$i$, use
+\begin{ttbox}
+eresolve_tac [subst] \(i\) {\rm or} eresolve_tac [ssubst] \(i\){\it.}
+\end{ttbox}
+
+Logics HOL, FOL and ZF define the tactic \ttindexbold{stac} by
+\begin{ttbox}
+fun stac eqth = CHANGED o rtac (eqth RS ssubst);
+\end{ttbox}
+Now \texttt{stac~eqth} is like \texttt{resolve_tac [eqth RS ssubst]} but with the
+valuable property of failing if the substitution has no effect.
+
+
+\section{Substitution in the hypotheses}
+\index{assumptions!substitution in}
+Substitution rules, like other rules of natural deduction, do not affect
+the assumptions. This can be inconvenient. Consider proving the subgoal
+\[ \List{c=a; c=b} \Imp a=b. \]
+Calling \texttt{eresolve_tac\ts[ssubst]\ts\(i\)} simply discards the
+assumption~$c=a$, since $c$ does not occur in~$a=b$. Of course, we can
+work out a solution. First apply \texttt{eresolve_tac\ts[subst]\ts\(i\)},
+replacing~$a$ by~$c$:
+\[ c=b \Imp c=b \]
+Equality reasoning can be difficult, but this trivial proof requires
+nothing more sophisticated than substitution in the assumptions.
+Object-logics that include the rule~$(subst)$ provide tactics for this
+purpose:
+\begin{ttbox}
+hyp_subst_tac : int -> tactic
+bound_hyp_subst_tac : int -> tactic
+\end{ttbox}
+\begin{ttdescription}
+\item[\ttindexbold{hyp_subst_tac} {\it i}]
+ selects an equality assumption of the form $t=u$ or $u=t$, where $t$ is a
+ free variable or parameter. Deleting this assumption, it replaces $t$
+ by~$u$ throughout subgoal~$i$, including the other assumptions.
+
+\item[\ttindexbold{bound_hyp_subst_tac} {\it i}]
+ is similar but only substitutes for parameters (bound variables).
+ Uses for this are discussed below.
+\end{ttdescription}
+The term being replaced must be a free variable or parameter. Substitution
+for constants is usually unhelpful, since they may appear in other
+theorems. For instance, the best way to use the assumption $0=1$ is to
+contradict a theorem that states $0\not=1$, rather than to replace 0 by~1
+in the subgoal!
+
+Substitution for unknowns, such as $\Var{x}=0$, is a bad idea: we might prove
+the subgoal more easily by instantiating~$\Var{x}$ to~1.
+Substitution for free variables is unhelpful if they appear in the
+premises of a rule being derived: the substitution affects object-level
+assumptions, not meta-level assumptions. For instance, replacing~$a$
+by~$b$ could make the premise~$P(a)$ worthless. To avoid this problem, use
+\texttt{bound_hyp_subst_tac}; alternatively, call \ttindex{cut_facts_tac} to
+insert the atomic premises as object-level assumptions.
+
+
+\section{Setting up the package}
+Many Isabelle object-logics, such as \texttt{FOL}, \texttt{HOL} and their
+descendants, come with \texttt{hyp_subst_tac} already defined. A few others,
+such as \texttt{CTT}, do not support this tactic because they lack the
+rule~$(subst)$. When defining a new logic that includes a substitution
+rule and implication, you must set up \texttt{hyp_subst_tac} yourself. It
+is packaged as the \ML{} functor \ttindex{HypsubstFun}, which takes the
+argument signature~\texttt{HYPSUBST_DATA}:
+\begin{ttbox}
+signature HYPSUBST_DATA =
+ sig
+ structure Simplifier : SIMPLIFIER
+ val dest_Trueprop : term -> term
+ val dest_eq : term -> (term*term)*typ
+ val dest_imp : term -> term*term
+ val eq_reflection : thm (* a=b ==> a==b *)
+ val rev_eq_reflection: thm (* a==b ==> a=b *)
+ val imp_intr : thm (*(P ==> Q) ==> P-->Q *)
+ val rev_mp : thm (* [| P; P-->Q |] ==> Q *)
+ val subst : thm (* [| a=b; P(a) |] ==> P(b) *)
+ val sym : thm (* a=b ==> b=a *)
+ val thin_refl : thm (* [|x=x; P|] ==> P *)
+ end;
+\end{ttbox}
+Thus, the functor requires the following items:
+\begin{ttdescription}
+\item[Simplifier] should be an instance of the simplifier (see
+ Chapter~\ref{chap:simplification}).
+
+\item[\ttindexbold{dest_Trueprop}] should coerce a meta-level formula to the
+ corresponding object-level one. Typically, it should return $P$ when
+ applied to the term $\texttt{Trueprop}\,P$ (see example below).
+
+\item[\ttindexbold{dest_eq}] should return the triple~$((t,u),T)$, where $T$ is
+ the type of~$t$ and~$u$, when applied to the \ML{} term that
+ represents~$t=u$. For other terms, it should raise an exception.
+
+\item[\ttindexbold{dest_imp}] should return the pair~$(P,Q)$ when applied to
+ the \ML{} term that represents the implication $P\imp Q$. For other terms,
+ it should raise an exception.
+
+\item[\tdxbold{eq_reflection}] is the theorem discussed
+ in~\S\ref{sec:setting-up-simp}.
+
+\item[\tdxbold{rev_eq_reflection}] is the reverse of \texttt{eq_reflection}.
+
+\item[\tdxbold{imp_intr}] should be the implies introduction
+rule $(\Var{P}\Imp\Var{Q})\Imp \Var{P}\imp\Var{Q}$.
+
+\item[\tdxbold{rev_mp}] should be the `reversed' implies elimination
+rule $\List{\Var{P}; \;\Var{P}\imp\Var{Q}} \Imp \Var{Q}$.
+
+\item[\tdxbold{subst}] should be the substitution rule
+$\List{\Var{a}=\Var{b};\; \Var{P}(\Var{a})} \Imp \Var{P}(\Var{b})$.
+
+\item[\tdxbold{sym}] should be the symmetry rule
+$\Var{a}=\Var{b}\Imp\Var{b}=\Var{a}$.
+
+\item[\tdxbold{thin_refl}] should be the rule
+$\List{\Var{a}=\Var{a};\; \Var{P}} \Imp \Var{P}$, which is used to erase
+trivial equalities.
+\end{ttdescription}
+%
+The functor resides in file \texttt{Provers/hypsubst.ML} in the Isabelle
+distribution directory. It is not sensitive to the precise formalization
+of the object-logic. It is not concerned with the names of the equality
+and implication symbols, or the types of formula and terms.
+
+Coding the functions \texttt{dest_Trueprop}, \texttt{dest_eq} and
+\texttt{dest_imp} requires knowledge of Isabelle's representation of terms.
+For \texttt{FOL}, they are declared by
+\begin{ttbox}
+fun dest_Trueprop (Const ("Trueprop", _) $ P) = P
+ | dest_Trueprop t = raise TERM ("dest_Trueprop", [t]);
+
+fun dest_eq (Const("op =",T) $ t $ u) = ((t, u), domain_type T)
+
+fun dest_imp (Const("op -->",_) $ A $ B) = (A, B)
+ | dest_imp t = raise TERM ("dest_imp", [t]);
+\end{ttbox}
+Recall that \texttt{Trueprop} is the coercion from type~$o$ to type~$prop$,
+while \hbox{\tt op =} is the internal name of the infix operator~\texttt{=}.
+Function \ttindexbold{domain_type}, given the function type $S\To T$, returns
+the type~$S$. Pattern-matching expresses the function concisely, using
+wildcards~({\tt_}) for the types.
+
+The tactic \texttt{hyp_subst_tac} works as follows. First, it identifies a
+suitable equality assumption, possibly re-orienting it using~\texttt{sym}.
+Then it moves other assumptions into the conclusion of the goal, by repeatedly
+calling \texttt{etac~rev_mp}. Then, it uses \texttt{asm_full_simp_tac} or
+\texttt{ssubst} to substitute throughout the subgoal. (If the equality
+involves unknowns then it must use \texttt{ssubst}.) Then, it deletes the
+equality. Finally, it moves the assumptions back to their original positions
+by calling \hbox{\tt resolve_tac\ts[imp_intr]}.
+
+\index{equality|)}\index{tactics!substitution|)}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "ref"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/syntax.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,240 @@
+
+\chapter{Syntax Transformations} \label{chap:syntax}
+\newcommand\ttapp{\mathrel{\hbox{\tt\$}}}
+\newcommand\mtt[1]{\mbox{\tt #1}}
+\newcommand\ttfct[1]{\mathop{\mtt{#1}}\nolimits}
+\newcommand\Constant{\ttfct{Constant}}
+\newcommand\Variable{\ttfct{Variable}}
+\newcommand\Appl[1]{\ttfct{Appl}\,[#1]}
+\index{syntax!transformations|(}
+
+
+\section{Transforming parse trees to ASTs}\label{sec:astofpt}
+\index{ASTs!made from parse trees}
+\newcommand\astofpt[1]{\lbrakk#1\rbrakk}
+
+The parse tree is the raw output of the parser. Translation functions,
+called {\bf parse AST translations}\indexbold{translations!parse AST},
+transform the parse tree into an abstract syntax tree.
+
+The parse tree is constructed by nesting the right-hand sides of the
+productions used to recognize the input. Such parse trees are simply lists
+of tokens and constituent parse trees, the latter representing the
+nonterminals of the productions. Let us refer to the actual productions in
+the form displayed by {\tt print_syntax} (see \S\ref{sec:inspct-thy} for an
+example).
+
+Ignoring parse \AST{} translations, parse trees are transformed to \AST{}s
+by stripping out delimiters and copy productions. More precisely, the
+mapping $\astofpt{-}$ is derived from the productions as follows:
+\begin{itemize}
+\item Name tokens: $\astofpt{t} = \Variable s$, where $t$ is an \ndx{id},
+ \ndx{var}, \ndx{tid}, \ndx{tvar}, \ndx{num}, \ndx{xnum} or \ndx{xstr} token,
+ and $s$ its associated string. Note that for {\tt xstr} this does not
+ include the quotes.
+
+\item Copy productions:\index{productions!copy}
+ $\astofpt{\ldots P \ldots} = \astofpt{P}$. Here $\ldots$ stands for
+ strings of delimiters, which are discarded. $P$ stands for the single
+ constituent that is not a delimiter; it is either a nonterminal symbol or
+ a name token.
+
+ \item 0-ary productions: $\astofpt{\ldots \mtt{=>} c} = \Constant c$.
+ Here there are no constituents other than delimiters, which are
+ discarded.
+
+ \item $n$-ary productions, where $n \ge 1$: delimiters are discarded and
+ the remaining constituents $P@1$, \ldots, $P@n$ are built into an
+ application whose head constant is~$c$:
+ \[ \astofpt{\ldots P@1 \ldots P@n \ldots \mtt{=>} c} =
+ \Appl{\Constant c, \astofpt{P@1}, \ldots, \astofpt{P@n}}
+ \]
+\end{itemize}
+Figure~\ref{fig:parse_ast} presents some simple examples, where {\tt ==},
+{\tt _appl}, {\tt _args}, and so forth name productions of the Pure syntax.
+These examples illustrate the need for further translations to make \AST{}s
+closer to the typed $\lambda$-calculus. The Pure syntax provides
+predefined parse \AST{} translations\index{translations!parse AST} for
+ordinary applications, type applications, nested abstractions, meta
+implications and function types. Figure~\ref{fig:parse_ast_tr} shows their
+effect on some representative input strings.
+
+
+\begin{figure}
+\begin{center}
+\tt\begin{tabular}{ll}
+\rm input string & \rm \AST \\\hline
+"f" & f \\
+"'a" & 'a \\
+"t == u" & ("==" t u) \\
+"f(x)" & ("_appl" f x) \\
+"f(x, y)" & ("_appl" f ("_args" x y)) \\
+"f(x, y, z)" & ("_appl" f ("_args" x ("_args" y z))) \\
+"\%x y.\ t" & ("_lambda" ("_idts" x y) t) \\
+\end{tabular}
+\end{center}
+\caption{Parsing examples using the Pure syntax}\label{fig:parse_ast}
+\end{figure}
+
+\begin{figure}
+\begin{center}
+\tt\begin{tabular}{ll}
+\rm input string & \rm \AST{} \\\hline
+"f(x, y, z)" & (f x y z) \\
+"'a ty" & (ty 'a) \\
+"('a, 'b) ty" & (ty 'a 'b) \\
+"\%x y z.\ t" & ("_abs" x ("_abs" y ("_abs" z t))) \\
+"\%x ::\ 'a.\ t" & ("_abs" ("_constrain" x 'a) t) \\
+"[| P; Q; R |] => S" & ("==>" P ("==>" Q ("==>" R S))) \\
+"['a, 'b, 'c] => 'd" & ("fun" 'a ("fun" 'b ("fun" 'c 'd)))
+\end{tabular}
+\end{center}
+\caption{Built-in parse \AST{} translations}\label{fig:parse_ast_tr}
+\end{figure}
+
+The names of constant heads in the \AST{} control the translation process.
+The list of constants invoking parse \AST{} translations appears in the
+output of {\tt print_syntax} under {\tt parse_ast_translation}.
+
+
+\section{Transforming ASTs to terms}\label{sec:termofast}
+\index{terms!made from ASTs}
+\newcommand\termofast[1]{\lbrakk#1\rbrakk}
+
+The \AST{}, after application of macros (see \S\ref{sec:macros}), is
+transformed into a term. This term is probably ill-typed since type
+inference has not occurred yet. The term may contain type constraints
+consisting of applications with head {\tt "_constrain"}; the second
+argument is a type encoded as a term. Type inference later introduces
+correct types or rejects the input.
+
+Another set of translation functions, namely parse
+translations\index{translations!parse}, may affect this process. If we
+ignore parse translations for the time being, then \AST{}s are transformed
+to terms by mapping \AST{} constants to constants, \AST{} variables to
+schematic or free variables and \AST{} applications to applications.
+
+More precisely, the mapping $\termofast{-}$ is defined by
+\begin{itemize}
+\item Constants: $\termofast{\Constant x} = \ttfct{Const} (x,
+ \mtt{dummyT})$.
+
+\item Schematic variables: $\termofast{\Variable \mtt{"?}xi\mtt"} =
+ \ttfct{Var} ((x, i), \mtt{dummyT})$, where $x$ is the base name and $i$
+ the index extracted from~$xi$.
+
+\item Free variables: $\termofast{\Variable x} = \ttfct{Free} (x,
+ \mtt{dummyT})$.
+
+\item Function applications with $n$ arguments:
+ \[ \termofast{\Appl{f, x@1, \ldots, x@n}} =
+ \termofast{f} \ttapp
+ \termofast{x@1} \ttapp \ldots \ttapp \termofast{x@n}
+ \]
+\end{itemize}
+Here \ttindex{Const}, \ttindex{Var}, \ttindex{Free} and
+\verb|$|\index{$@{\tt\$}} are constructors of the datatype \mltydx{term},
+while \ttindex{dummyT} stands for some dummy type that is ignored during
+type inference.
+
+So far the outcome is still a first-order term. Abstractions and bound
+variables (constructors \ttindex{Abs} and \ttindex{Bound}) are introduced
+by parse translations. Such translations are attached to {\tt "_abs"},
+{\tt "!!"} and user-defined binders.
+
+
+\section{Printing of terms}
+\newcommand\astofterm[1]{\lbrakk#1\rbrakk}\index{ASTs!made from terms}
+
+The output phase is essentially the inverse of the input phase. Terms are
+translated via abstract syntax trees into strings. Finally the strings are
+pretty printed.
+
+Print translations (\S\ref{sec:tr_funs}) may affect the transformation of
+terms into \AST{}s. Ignoring those, the transformation maps
+term constants, variables and applications to the corresponding constructs
+on \AST{}s. Abstractions are mapped to applications of the special
+constant {\tt _abs}.
+
+More precisely, the mapping $\astofterm{-}$ is defined as follows:
+\begin{itemize}
+ \item $\astofterm{\ttfct{Const} (x, \tau)} = \Constant x$.
+
+ \item $\astofterm{\ttfct{Free} (x, \tau)} = constrain (\Variable x,
+ \tau)$.
+
+ \item $\astofterm{\ttfct{Var} ((x, i), \tau)} = constrain (\Variable
+ \mtt{"?}xi\mtt", \tau)$, where $\mtt?xi$ is the string representation of
+ the {\tt indexname} $(x, i)$.
+
+ \item For the abstraction $\lambda x::\tau.t$, let $x'$ be a variant
+ of~$x$ renamed to differ from all names occurring in~$t$, and let $t'$
+ be obtained from~$t$ by replacing all bound occurrences of~$x$ by
+ the free variable $x'$. This replaces corresponding occurrences of the
+ constructor \ttindex{Bound} by the term $\ttfct{Free} (x',
+ \mtt{dummyT})$:
+ \[ \astofterm{\ttfct{Abs} (x, \tau, t)} =
+ \Appl{\Constant \mtt{"_abs"},
+ constrain(\Variable x', \tau), \astofterm{t'}}
+ \]
+
+ \item $\astofterm{\ttfct{Bound} i} = \Variable \mtt{"B.}i\mtt"$.
+ The occurrence of constructor \ttindex{Bound} should never happen
+ when printing well-typed terms; it indicates a de Bruijn index with no
+ matching abstraction.
+
+ \item Where $f$ is not an application,
+ \[ \astofterm{f \ttapp x@1 \ttapp \ldots \ttapp x@n} =
+ \Appl{\astofterm{f}, \astofterm{x@1}, \ldots,\astofterm{x@n}}
+ \]
+\end{itemize}
+%
+Type constraints\index{type constraints} are inserted to allow the printing
+of types. This is governed by the boolean variable \ttindex{show_types}:
+\begin{itemize}
+ \item $constrain(x, \tau) = x$ \ if $\tau = \mtt{dummyT}$ \index{*dummyT} or
+ \ttindex{show_types} is set to {\tt false}.
+
+ \item $constrain(x, \tau) = \Appl{\Constant \mtt{"_constrain"}, x,
+ \astofterm{\tau}}$ \ otherwise.
+
+ Here, $\astofterm{\tau}$ is the \AST{} encoding of $\tau$: type
+ constructors go to {\tt Constant}s; type identifiers go to {\tt
+ Variable}s; type applications go to {\tt Appl}s with the type
+ constructor as the first element. If \ttindex{show_sorts} is set to
+ {\tt true}, some type variables are decorated with an \AST{} encoding
+ of their sort.
+\end{itemize}
+%
+The \AST{}, after application of macros (see \S\ref{sec:macros}), is
+transformed into the final output string. The built-in {\bf print AST
+ translations}\indexbold{translations!print AST} reverse the
+parse \AST{} translations of Fig.\ts\ref{fig:parse_ast_tr}.
+
+For the actual printing process, the names attached to productions
+of the form $\ldots A^{(p@1)}@1 \ldots A^{(p@n)}@n \ldots \mtt{=>} c$ play
+a vital role. Each \AST{} with constant head $c$, namely $\mtt"c\mtt"$ or
+$(\mtt"c\mtt"~ x@1 \ldots x@n)$, is printed according to the production
+for~$c$. Each argument~$x@i$ is converted to a string, and put in
+parentheses if its priority~$(p@i)$ requires this. The resulting strings
+and their syntactic sugar (denoted by \dots{} above) are joined to make a
+single string.
+
+If an application $(\mtt"c\mtt"~ x@1 \ldots x@m)$ has more arguments
+than the corresponding production, it is first split into
+$((\mtt"c\mtt"~ x@1 \ldots x@n) ~ x@{n+1} \ldots x@m)$. Applications
+with too few arguments or with non-constant head or without a
+corresponding production are printed as $f(x@1, \ldots, x@l)$ or
+$(\alpha@1, \ldots, \alpha@l) ty$. Multiple productions associated
+with some name $c$ are tried in order of appearance. An occurrence of
+$\Variable x$ is simply printed as~$x$.
+
+Blanks are {\em not\/} inserted automatically. If blanks are required to
+separate tokens, specify them in the mixfix declaration, possibly preceded
+by a slash~({\tt/}) to allow a line break.
+\index{ASTs|)}
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "ref"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/tactic.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,173 @@
+
+\chapter{Tactics} \label{tactics}
+\index{tactics|(}
+
+\section{Other basic tactics}
+
+\subsection{Inserting premises and facts}\label{cut_facts_tac}
+\index{tactics!for inserting facts}\index{assumptions!inserting}
+\begin{ttbox}
+cut_facts_tac : thm list -> int -> tactic
+\end{ttbox}
+These tactics add assumptions to a subgoal.
+\begin{ttdescription}
+\item[\ttindexbold{cut_facts_tac} {\it thms} {\it i}]
+ adds the {\it thms} as new assumptions to subgoal~$i$. Once they have
+ been inserted as assumptions, they become subject to tactics such as {\tt
+ eresolve_tac} and {\tt rewrite_goals_tac}. Only rules with no premises
+ are inserted: Isabelle cannot use assumptions that contain $\Imp$
+ or~$\Forall$. Sometimes the theorems are premises of a rule being
+ derived, returned by~{\tt goal}; instead of calling this tactic, you
+ could state the goal with an outermost meta-quantifier.
+
+\end{ttdescription}
+
+
+\subsection{Composition: resolution without lifting}
+\index{tactics!for composition}
+\begin{ttbox}
+compose_tac: (bool * thm * int) -> int -> tactic
+\end{ttbox}
+{\bf Composing} two rules means resolving them without prior lifting or
+renaming of unknowns. This low-level operation, which underlies the
+resolution tactics, may occasionally be useful for special effects.
+A typical application is \ttindex{res_inst_tac}, which lifts and instantiates a
+rule, then passes the result to {\tt compose_tac}.
+\begin{ttdescription}
+\item[\ttindexbold{compose_tac} ($flag$, $rule$, $m$) $i$]
+refines subgoal~$i$ using $rule$, without lifting. The $rule$ is taken to
+have the form $\List{\psi@1; \ldots; \psi@m} \Imp \psi$, where $\psi$ need
+not be atomic; thus $m$ determines the number of new subgoals. If
+$flag$ is {\tt true} then it performs elim-resolution --- it solves the
+first premise of~$rule$ by assumption and deletes that assumption.
+\end{ttdescription}
+
+
+\section{*Managing lots of rules}
+These operations are not intended for interactive use. They are concerned
+with the processing of large numbers of rules in automatic proof
+strategies. Higher-order resolution involving a long list of rules is
+slow. Filtering techniques can shorten the list of rules given to
+resolution, and can also detect whether a subgoal is too flexible,
+with too many rules applicable.
+
+\subsection{Combined resolution and elim-resolution} \label{biresolve_tac}
+\index{tactics!resolution}
+\begin{ttbox}
+biresolve_tac : (bool*thm)list -> int -> tactic
+bimatch_tac : (bool*thm)list -> int -> tactic
+subgoals_of_brl : bool*thm -> int
+lessb : (bool*thm) * (bool*thm) -> bool
+\end{ttbox}
+{\bf Bi-resolution} takes a list of $\it (flag,rule)$ pairs. For each
+pair, it applies resolution if the flag is~{\tt false} and
+elim-resolution if the flag is~{\tt true}. A single tactic call handles a
+mixture of introduction and elimination rules.
+
+\begin{ttdescription}
+\item[\ttindexbold{biresolve_tac} {\it brls} {\it i}]
+refines the proof state by resolution or elim-resolution on each rule, as
+indicated by its flag. It affects subgoal~$i$ of the proof state.
+
+\item[\ttindexbold{bimatch_tac}]
+is like {\tt biresolve_tac}, but performs matching: unknowns in the
+proof state are never updated (see~{\S}\ref{match_tac}).
+
+\item[\ttindexbold{subgoals_of_brl}({\it flag},{\it rule})]
+returns the number of new subgoals that bi-res\-o\-lu\-tion would yield for the
+pair (if applied to a suitable subgoal). This is $n$ if the flag is
+{\tt false} and $n-1$ if the flag is {\tt true}, where $n$ is the number
+of premises of the rule. Elim-resolution yields one fewer subgoal than
+ordinary resolution because it solves the major premise by assumption.
+
+\item[\ttindexbold{lessb} ({\it brl1},{\it brl2})]
+returns the result of
+\begin{ttbox}
+subgoals_of_brl{\it brl1} < subgoals_of_brl{\it brl2}
+\end{ttbox}
+\end{ttdescription}
+Note that \hbox{\tt sort lessb {\it brls}} sorts a list of $\it
+(flag,rule)$ pairs by the number of new subgoals they will yield. Thus,
+those that yield the fewest subgoals should be tried first.
+
+
+\subsection{Discrimination nets for fast resolution}\label{filt_resolve_tac}
+\index{discrimination nets|bold}
+\index{tactics!resolution}
+\begin{ttbox}
+net_resolve_tac : thm list -> int -> tactic
+net_match_tac : thm list -> int -> tactic
+net_biresolve_tac: (bool*thm) list -> int -> tactic
+net_bimatch_tac : (bool*thm) list -> int -> tactic
+filt_resolve_tac : thm list -> int -> int -> tactic
+could_unify : term*term->bool
+filter_thms : (term*term->bool) -> int*term*thm list -> thm{\ts}list
+\end{ttbox}
+The module {\tt Net} implements a discrimination net data structure for
+fast selection of rules \cite[Chapter 14]{charniak80}. A term is
+classified by the symbol list obtained by flattening it in preorder.
+The flattening takes account of function applications, constants, and free
+and bound variables; it identifies all unknowns and also regards
+\index{lambda abs@$\lambda$-abstractions}
+$\lambda$-abstractions as unknowns, since they could $\eta$-contract to
+anything.
+
+A discrimination net serves as a polymorphic dictionary indexed by terms.
+The module provides various functions for inserting and removing items from
+nets. It provides functions for returning all items whose term could match
+or unify with a target term. The matching and unification tests are
+overly lax (due to the identifications mentioned above) but they serve as
+useful filters.
+
+A net can store introduction rules indexed by their conclusion, and
+elimination rules indexed by their major premise. Isabelle provides
+several functions for `compiling' long lists of rules into fast
+resolution tactics. When supplied with a list of theorems, these functions
+build a discrimination net; the net is used when the tactic is applied to a
+goal. To avoid repeatedly constructing the nets, use currying: bind the
+resulting tactics to \ML{} identifiers.
+
+\begin{ttdescription}
+\item[\ttindexbold{net_resolve_tac} {\it thms}]
+builds a discrimination net to obtain the effect of a similar call to {\tt
+resolve_tac}.
+
+\item[\ttindexbold{net_match_tac} {\it thms}]
+builds a discrimination net to obtain the effect of a similar call to {\tt
+match_tac}.
+
+\item[\ttindexbold{net_biresolve_tac} {\it brls}]
+builds a discrimination net to obtain the effect of a similar call to {\tt
+biresolve_tac}.
+
+\item[\ttindexbold{net_bimatch_tac} {\it brls}]
+builds a discrimination net to obtain the effect of a similar call to {\tt
+bimatch_tac}.
+
+\item[\ttindexbold{filt_resolve_tac} {\it thms} {\it maxr} {\it i}]
+uses discrimination nets to extract the {\it thms} that are applicable to
+subgoal~$i$. If more than {\it maxr\/} theorems are applicable then the
+tactic fails. Otherwise it calls {\tt resolve_tac}.
+
+This tactic helps avoid runaway instantiation of unknowns, for example in
+type inference.
+
+\item[\ttindexbold{could_unify} ({\it t},{\it u})]
+returns {\tt false} if~$t$ and~$u$ are `obviously' non-unifiable, and
+otherwise returns~{\tt true}. It assumes all variables are distinct,
+reporting that {\tt ?a=?a} may unify with {\tt 0=1}.
+
+\item[\ttindexbold{filter_thms} $could\; (limit,prem,thms)$]
+returns the list of potentially resolvable rules (in {\it thms\/}) for the
+subgoal {\it prem}, using the predicate {\it could\/} to compare the
+conclusion of the subgoal with the conclusion of each rule. The resulting list
+is no longer than {\it limit}.
+\end{ttdescription}
+
+\index{tactics|)}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "ref"
+%%% End:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/doc-src/Ref/document/thm.tex Mon Aug 27 17:24:01 2012 +0200
@@ -0,0 +1,636 @@
+
+\chapter{Theorems and Forward Proof}
+\index{theorems|(}
+
+Theorems, which represent the axioms, theorems and rules of
+object-logics, have type \mltydx{thm}. This chapter describes
+operations that join theorems in forward proof. Most theorem
+operations are intended for advanced applications, such as programming
+new proof procedures.
+
+
+\subsection{Instantiating unknowns in a theorem} \label{sec:instantiate}
+\index{instantiation}
+\begin{alltt}\footnotesize
+read_instantiate : (string*string) list -> thm -> thm
+read_instantiate_sg : Sign.sg -> (string*string) list -> thm -> thm
+cterm_instantiate : (cterm*cterm) list -> thm -> thm
+instantiate' : ctyp option list -> cterm option list -> thm -> thm
+\end{alltt}
+These meta-rules instantiate type and term unknowns in a theorem. They are
+occasionally useful. They can prevent difficulties with higher-order
+unification, and define specialized versions of rules.
+\begin{ttdescription}
+\item[\ttindexbold{read_instantiate} {\it insts} {\it thm}]
+processes the instantiations {\it insts} and instantiates the rule~{\it
+thm}. The processing of instantiations is described
+in \S\ref{res_inst_tac}, under {\tt res_inst_tac}.
+
+Use {\tt res_inst_tac}, not {\tt read_instantiate}, to instantiate a rule
+and refine a particular subgoal. The tactic allows instantiation by the
+subgoal's parameters, and reads the instantiations using the signature
+associated with the proof state.
+
+Use {\tt read_instantiate_sg} below if {\it insts\/} appears to be treated
+incorrectly.
+
+\item[\ttindexbold{read_instantiate_sg} {\it sg} {\it insts} {\it thm}]
+ is like \texttt{read_instantiate {\it insts}~{\it thm}}, but it reads
+ the instantiations under signature~{\it sg}. This is necessary to
+ instantiate a rule from a general theory, such as first-order logic,
+ using the notation of some specialized theory. Use the function {\tt
+ sign_of} to get a theory's signature.
+
+\item[\ttindexbold{cterm_instantiate} {\it ctpairs} {\it thm}]
+is similar to {\tt read_instantiate}, but the instantiations are provided
+as pairs of certified terms, not as strings to be read.
+
+\item[\ttindexbold{instantiate'} {\it ctyps} {\it cterms} {\it thm}]
+ instantiates {\it thm} according to the positional arguments {\it
+ ctyps} and {\it cterms}. Counting from left to right, schematic
+ variables $?x$ are either replaced by $t$ for any argument
+ \texttt{Some\(\;t\)}, or left unchanged in case of \texttt{None} or
+ if the end of the argument list is encountered. Types are
+ instantiated before terms.
+
+\end{ttdescription}
+
+
+\subsection{Miscellaneous forward rules}\label{MiscellaneousForwardRules}
+\index{theorems!standardizing}
+\begin{ttbox}
+standard : thm -> thm
+zero_var_indexes : thm -> thm
+make_elim : thm -> thm
+rule_by_tactic : tactic -> thm -> thm
+rotate_prems : int -> thm -> thm
+permute_prems : int -> int -> thm -> thm
+rearrange_prems : int list -> thm -> thm
+\end{ttbox}
+\begin{ttdescription}
+\item[\ttindexbold{standard} $thm$] puts $thm$ into the standard form
+ of object-rules. It discharges all meta-assumptions, replaces free
+ variables by schematic variables, renames schematic variables to
+ have subscript zero, also strips outer (meta) quantifiers and
+ removes dangling sort hypotheses.
+
+\item[\ttindexbold{zero_var_indexes} $thm$]
+makes all schematic variables have subscript zero, renaming them to avoid
+clashes.
+
+\item[\ttindexbold{make_elim} $thm$]
+\index{rules!converting destruction to elimination}
+converts $thm$, which should be a destruction rule of the form
+$\List{P@1;\ldots;P@m}\Imp
+Q$, to the elimination rule $\List{P@1; \ldots; P@m; Q\Imp R}\Imp R$. This
+is the basis for destruct-resolution: {\tt dresolve_tac}, etc.
+
+\item[\ttindexbold{rule_by_tactic} {\it tac} {\it thm}]
+ applies {\it tac\/} to the {\it thm}, freezing its variables first, then
+ yields the proof state returned by the tactic. In typical usage, the
+ {\it thm\/} represents an instance of a rule with several premises, some
+ with contradictory assumptions (because of the instantiation). The
+ tactic proves those subgoals and does whatever else it can, and returns
+ whatever is left.
+
+\item[\ttindexbold{rotate_prems} $k$ $thm$] rotates the premises of $thm$ to
+ the left by~$k$ positions (to the right if $k<0$). It simply calls
+ \texttt{permute_prems}, below, with $j=0$. Used with
+ \texttt{eresolve_tac}\index{*eresolve_tac!on other than first premise}, it
+ gives the effect of applying the tactic to some other premise of $thm$ than
+ the first.
+
+\item[\ttindexbold{permute_prems} $j$ $k$ $thm$] rotates the premises of $thm$
+ leaving the first $j$ premises unchanged. It
+ requires $0\leq j\leq n$, where $n$ is the number of premises. If $k$ is
+ positive then it rotates the remaining $n-j$ premises to the left; if $k$ is
+ negative then it rotates the premises to the right.
+
+\item[\ttindexbold{rearrange_prems} $ps$ $thm$] permutes the premises of $thm$
+ where the value at the $i$-th position (counting from $0$) in the list $ps$
+ gives the position within the original thm to be transferred to position $i$.
+ Any remaining trailing positions are left unchanged.
+\end{ttdescription}
+
+
+\subsection{Taking a theorem apart}
+\index{theorems!taking apart}
+\index{flex-flex constraints}
+\begin{ttbox}
+cprop_of : thm -> cterm
+concl_of : thm -> term
+prems_of : thm -> term list
+cprems_of : thm -> cterm list
+nprems_of : thm -> int
+tpairs_of : thm -> (term*term) list
+theory_of_thm : thm -> theory
+\end{ttbox}
+\begin{ttdescription}
+\item[\ttindexbold{cprop_of} $thm$] returns the statement of $thm$ as
+ a certified term.
+
+\item[\ttindexbold{concl_of} $thm$] returns the conclusion of $thm$ as
+ a term.
+
+\item[\ttindexbold{prems_of} $thm$] returns the premises of $thm$ as a
+ list of terms.
+
+\item[\ttindexbold{cprems_of} $thm$] returns the premises of $thm$ as
+ a list of certified terms.
+
+\item[\ttindexbold{nprems_of} $thm$]
+returns the number of premises in $thm$, and is equivalent to {\tt
+ length~(prems_of~$thm$)}.
+
+\item[\ttindexbold{tpairs_of} $thm$] returns the flex-flex constraints
+ of $thm$.
+
+\item[\ttindexbold{theory_of_thm} $thm$] returns the theory associated
+ with $thm$. Note that this does a lookup in Isabelle's global
+ database of loaded theories.
+
+\end{ttdescription}
+
+
+\subsection{*Sort hypotheses} \label{sec:sort-hyps}
+\index{sort hypotheses}
+\begin{ttbox}
+strip_shyps : thm -> thm
+strip_shyps_warning : thm -> thm
+\end{ttbox}
+
+Isabelle's type variables are decorated with sorts, constraining them to
+certain ranges of types. This has little impact when sorts only serve for
+syntactic classification of types --- for example, FOL distinguishes between
+terms and other types. But when type classes are introduced through axioms,
+this may result in some sorts becoming {\em empty\/}: where one cannot exhibit
+a type belonging to it because certain sets of axioms are unsatisfiable.
+
+If a theorem contains a type variable that is constrained by an empty
+sort, then that theorem has no instances. It is basically an instance
+of {\em ex falso quodlibet}. But what if it is used to prove another
+theorem that no longer involves that sort? The latter theorem holds
+only if under an additional non-emptiness assumption.
+
+Therefore, Isabelle's theorems carry around sort hypotheses. The {\tt
+shyps} field is a list of sorts occurring in type variables in the current
+{\tt prop} and {\tt hyps} fields. It may also includes sorts used in the
+theorem's proof that no longer appear in the {\tt prop} or {\tt hyps}
+fields --- so-called {\em dangling\/} sort constraints. These are the
+critical ones, asserting non-emptiness of the corresponding sorts.
+
+Isabelle automatically removes extraneous sorts from the {\tt shyps} field at
+the end of a proof, provided that non-emptiness can be established by looking
+at the theorem's signature: from the {\tt classes} and {\tt arities}
+information. This operation is performed by \texttt{strip_shyps} and
+\texttt{strip_shyps_warning}.
+
+\begin{ttdescription}
+
+\item[\ttindexbold{strip_shyps} $thm$] removes any extraneous sort hypotheses
+ that can be witnessed from the type signature.
+
+\item[\ttindexbold{strip_shyps_warning}] is like \texttt{strip_shyps}, but
+ issues a warning message of any pending sort hypotheses that do not have a
+ (syntactic) witness.
+
+\end{ttdescription}
+
+
+\subsection{Tracing flags for unification}
+\index{tracing!of unification}
+\begin{ttbox}
+Unify.trace_simp : bool ref \hfill\textbf{initially false}
+Unify.trace_types : bool ref \hfill\textbf{initially false}
+Unify.trace_bound : int ref \hfill\textbf{initially 10}
+Unify.search_bound : int ref \hfill\textbf{initially 20}
+\end{ttbox}
+Tracing the search may be useful when higher-order unification behaves
+unexpectedly. Letting {\tt res_inst_tac} circumvent the problem is easier,
+though.
+\begin{ttdescription}
+\item[set Unify.trace_simp;]
+causes tracing of the simplification phase.
+
+\item[set Unify.trace_types;]
+generates warnings of incompleteness, when unification is not considering
+all possible instantiations of type unknowns.
+
+\item[Unify.trace_bound := $n$;]
+causes unification to print tracing information once it reaches depth~$n$.
+Use $n=0$ for full tracing. At the default value of~10, tracing
+information is almost never printed.
+
+\item[Unify.search_bound := $n$;] prevents unification from
+ searching past the depth~$n$. Because of this bound, higher-order
+ unification cannot return an infinite sequence, though it can return
+ an exponentially long one. The search rarely approaches the default value
+ of~20. If the search is cut off, unification prints a warning
+ \texttt{Unification bound exceeded}.
+\end{ttdescription}
+
+
+\section{*Primitive meta-level inference rules}
+\index{meta-rules|(}
+
+\subsection{Logical equivalence rules}
+\index{meta-equality}
+\begin{ttbox}
+equal_intr : thm -> thm -> thm
+equal_elim : thm -> thm -> thm
+\end{ttbox}
+\begin{ttdescription}
+\item[\ttindexbold{equal_intr} $thm@1$ $thm@2$]
+applies $({\equiv}I)$ to $thm@1$ and~$thm@2$. It maps the premises~$\psi$
+and~$\phi$ to the conclusion~$\phi\equiv\psi$; the assumptions are those of
+the first premise with~$\phi$ removed, plus those of
+the second premise with~$\psi$ removed.
+
+\item[\ttindexbold{equal_elim} $thm@1$ $thm@2$]
+applies $({\equiv}E)$ to $thm@1$ and~$thm@2$. It maps the premises
+$\phi\equiv\psi$ and $\phi$ to the conclusion~$\psi$.
+\end{ttdescription}
+
+
+\subsection{Equality rules}
+\index{meta-equality}
+\begin{ttbox}
+reflexive : cterm -> thm
+symmetric : thm -> thm
+transitive : thm -> thm -> thm
+\end{ttbox}
+\begin{ttdescription}
+\item[\ttindexbold{reflexive} $ct$]
+makes the theorem \(ct\equiv ct\).
+
+\item[\ttindexbold{symmetric} $thm$]
+maps the premise $a\equiv b$ to the conclusion $b\equiv a$.
+
+\item[\ttindexbold{transitive} $thm@1$ $thm@2$]
+maps the premises $a\equiv b$ and $b\equiv c$ to the conclusion~${a\equiv c}$.
+\end{ttdescription}
+
+
+\subsection{The $\lambda$-conversion rules}
+\index{lambda calc@$\lambda$-calculus}
+\begin{ttbox}
+beta_conversion : cterm -> thm
+extensional : thm -> thm
+abstract_rule : string -> cterm -> thm -> thm
+combination : thm -> thm -> thm
+\end{ttbox}
+There is no rule for $\alpha$-conversion because Isabelle regards
+$\alpha$-convertible theorems as equal.
+\begin{ttdescription}
+\item[\ttindexbold{beta_conversion} $ct$]
+makes the theorem $((\lambda x.a)(b)) \equiv a[b/x]$, where $ct$ is the
+term $(\lambda x.a)(b)$.
+
+\item[\ttindexbold{extensional} $thm$]
+maps the premise $f(x) \equiv g(x)$ to the conclusion $f\equiv g$.
+Parameter~$x$ is taken from the premise. It may be an unknown or a free
+variable (provided it does not occur in the assumptions); it must not occur
+in $f$ or~$g$.
+
+\item[\ttindexbold{abstract_rule} $v$ $x$ $thm$]
+maps the premise $a\equiv b$ to the conclusion $(\lambda x.a) \equiv
+(\lambda x.b)$, abstracting over all occurrences (if any!) of~$x$.
+Parameter~$x$ is supplied as a cterm. It may be an unknown or a free
+variable (provided it does not occur in the assumptions). In the
+conclusion, the bound variable is named~$v$.
+
+\item[\ttindexbold{combination} $thm@1$ $thm@2$]
+maps the premises $f\equiv g$ and $a\equiv b$ to the conclusion~$f(a)\equiv
+g(b)$.
+\end{ttdescription}
+
+
+\section{Derived rules for goal-directed proof}
+Most of these rules have the sole purpose of implementing particular
+tactics. There are few occasions for applying them directly to a theorem.
+
+\subsection{Resolution}
+\index{resolution}
+\begin{ttbox}
+biresolution : bool -> (bool*thm)list -> int -> thm
+ -> thm Seq.seq
+\end{ttbox}
+\begin{ttdescription}
+\item[\ttindexbold{biresolution} $match$ $rules$ $i$ $state$]
+performs bi-resolution on subgoal~$i$ of $state$, using the list of $\it
+(flag,rule)$ pairs. For each pair, it applies resolution if the flag
+is~{\tt false} and elim-resolution if the flag is~{\tt true}. If $match$
+is~{\tt true}, the $state$ is not instantiated.
+\end{ttdescription}
+
+
+\subsection{Composition: resolution without lifting}
+\index{resolution!without lifting}
+\begin{ttbox}
+compose : thm * int * thm -> thm list
+COMP : thm * thm -> thm
+bicompose : bool -> bool * thm * int -> int -> thm
+ -> thm Seq.seq
+\end{ttbox}
+In forward proof, a typical use of composition is to regard an assertion of
+the form $\phi\Imp\psi$ as atomic. Schematic variables are not renamed, so
+beware of clashes!
+\begin{ttdescription}
+\item[\ttindexbold{compose} ($thm@1$, $i$, $thm@2$)]
+uses $thm@1$, regarded as an atomic formula, to solve premise~$i$
+of~$thm@2$. Let $thm@1$ and $thm@2$ be $\psi$ and $\List{\phi@1; \ldots;
+\phi@n} \Imp \phi$. For each $s$ that unifies~$\psi$ and $\phi@i$, the
+result list contains the theorem
+\[ (\List{\phi@1; \ldots; \phi@{i-1}; \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s.
+\]
+
+\item[$thm@1$ \ttindexbold{COMP} $thm@2$]
+calls \hbox{\tt compose ($thm@1$, 1, $thm@2$)} and returns the result, if
+unique; otherwise, it raises exception~\xdx{THM}\@. It is
+analogous to {\tt RS}\@.
+
+For example, suppose that $thm@1$ is $a=b\Imp b=a$, a symmetry rule, and
+that $thm@2$ is $\List{P\Imp Q; \neg Q} \Imp\neg P$, which is the
+principle of contrapositives. Then the result would be the
+derived rule $\neg(b=a)\Imp\neg(a=b)$.
+
+\item[\ttindexbold{bicompose} $match$ ($flag$, $rule$, $m$) $i$ $state$]
+refines subgoal~$i$ of $state$ using $rule$, without lifting. The $rule$
+is taken to have the form $\List{\psi@1; \ldots; \psi@m} \Imp \psi$, where
+$\psi$ need not be atomic; thus $m$ determines the number of new
+subgoals. If $flag$ is {\tt true} then it performs elim-resolution --- it
+solves the first premise of~$rule$ by assumption and deletes that
+assumption. If $match$ is~{\tt true}, the $state$ is not instantiated.
+\end{ttdescription}
+
+
+\subsection{Other meta-rules}
+\begin{ttbox}
+rename_params_rule : string list * int -> thm -> thm
+\end{ttbox}
+\begin{ttdescription}
+
+\item[\ttindexbold{rename_params_rule} ({\it names}, {\it i}) $thm$]
+uses the $names$ to rename the parameters of premise~$i$ of $thm$. The
+names must be distinct. If there are fewer names than parameters, then the
+rule renames the innermost parameters and may modify the remaining ones to
+ensure that all the parameters are distinct.
+\index{parameters!renaming}
+
+\end{ttdescription}
+\index{meta-rules|)}
+
+
+\section{Proof terms}\label{sec:proofObjects}
+\index{proof terms|(} Isabelle can record the full meta-level proof of each
+theorem. The proof term contains all logical inferences in detail.
+%while
+%omitting bookkeeping steps that have no logical meaning to an outside
+%observer. Rewriting steps are recorded in similar detail as the output of
+%simplifier tracing.
+Resolution and rewriting steps are broken down to primitive rules of the
+meta-logic. The proof term can be inspected by a separate proof-checker,
+for example.
+
+According to the well-known {\em Curry-Howard isomorphism}, a proof can
+be viewed as a $\lambda$-term. Following this idea, proofs
+in Isabelle are internally represented by a datatype similar to the one for
+terms described in \S\ref{sec:terms}.
+\begin{ttbox}
+infix 8 % %%;
+
+datatype proof =
+ PBound of int
+ | Abst of string * typ option * proof
+ | AbsP of string * term option * proof
+ | op % of proof * term option
+ | op %% of proof * proof
+ | Hyp of term
+ | PThm of (string * (string * string list) list) *
+ proof * term * typ list option
+ | PAxm of string * term * typ list option
+ | Oracle of string * term * typ list option
+ | MinProof of proof list;
+\end{ttbox}
+
+\begin{ttdescription}
+\item[\ttindexbold{Abst} ($a$, $\tau$, $prf$)] is the abstraction over
+a {\it term variable} of type $\tau$ in the body $prf$. Logically, this
+corresponds to $\bigwedge$ introduction. The name $a$ is used only for
+parsing and printing.
+\item[\ttindexbold{AbsP} ($a$, $\varphi$, $prf$)] is the abstraction
+over a {\it proof variable} standing for a proof of proposition $\varphi$
+in the body $prf$. This corresponds to $\Longrightarrow$ introduction.
+\item[$prf$ \% $t$] \index{\%@{\tt\%}|bold}
+is the application of proof $prf$ to term $t$
+which corresponds to $\bigwedge$ elimination.
+\item[$prf@1$ \%\% $prf@2$] \index{\%\%@{\tt\%\%}|bold}
+is the application of proof $prf@1$ to
+proof $prf@2$ which corresponds to $\Longrightarrow$ elimination.
+\item[\ttindexbold{PBound} $i$] is a {\em proof variable} with de Bruijn
+\cite{debruijn72} index $i$.
+\item[\ttindexbold{Hyp} $\varphi$] corresponds to the use of a meta level
+hypothesis $\varphi$.
+\item[\ttindexbold{PThm} (($name$, $tags$), $prf$, $\varphi$, $\overline{\tau}$)]
+stands for a pre-proved theorem, where $name$ is the name of the theorem,
+$prf$ is its actual proof, $\varphi$ is the proven proposition,
+and $\overline{\tau}$ is
+a type assignment for the type variables occurring in the proposition.
+\item[\ttindexbold{PAxm} ($name$, $\varphi$, $\overline{\tau}$)]
+corresponds to the use of an axiom with name $name$ and proposition
+$\varphi$, where $\overline{\tau}$ is a type assignment for the type
+variables occurring in the proposition.
+\item[\ttindexbold{Oracle} ($name$, $\varphi$, $\overline{\tau}$)]
+denotes the invocation of an oracle with name $name$ which produced
+a proposition $\varphi$, where $\overline{\tau}$ is a type assignment
+for the type variables occurring in the proposition.
+\item[\ttindexbold{MinProof} $prfs$]
+represents a {\em minimal proof} where $prfs$ is a list of theorems,
+axioms or oracles.
+\end{ttdescription}
+Note that there are no separate constructors
+for abstraction and application on the level of {\em types}, since
+instantiation of type variables is accomplished via the type assignments
+attached to {\tt Thm}, {\tt Axm} and {\tt Oracle}.
+
+Each theorem's derivation is stored as the {\tt der} field of its internal
+record:
+\begin{ttbox}
+#2 (#der (rep_thm conjI));
+{\out PThm (("HOL.conjI", []),}
+{\out AbsP ("H", None, AbsP ("H", None, \dots)), \dots, None) %}
+{\out None % None : Proofterm.proof}
+\end{ttbox}
+This proof term identifies a labelled theorem, {\tt conjI} of theory
+\texttt{HOL}, whose underlying proof is
+{\tt AbsP ("H", None, AbsP ("H", None, $\dots$))}.
+The theorem is applied to two (implicit) term arguments, which correspond
+to the two variables occurring in its proposition.
+
+Isabelle's inference kernel can produce proof objects with different
+levels of detail. This is controlled via the global reference variable
+\ttindexbold{proofs}:
+\begin{ttdescription}
+\item[proofs := 0;] only record uses of oracles
+\item[proofs := 1;] record uses of oracles as well as dependencies
+ on other theorems and axioms
+\item[proofs := 2;] record inferences in full detail
+\end{ttdescription}
+Reconstruction and checking of proofs as described in \S\ref{sec:reconstruct_proofs}
+will not work for proofs constructed with {\tt proofs} set to
+{\tt 0} or {\tt 1}.
+Theorems involving oracles will be printed with a
+suffixed \verb|[!]| to point out the different quality of confidence achieved.
+
+\medskip
+
+The dependencies of theorems can be viewed using the function
+\ttindexbold{thm_deps}\index{theorems!dependencies}:
+\begin{ttbox}
+thm_deps [\(thm@1\), \(\ldots\), \(thm@n\)];
+\end{ttbox}
+generates the dependency graph of the theorems $thm@1$, $\ldots$, $thm@n$ and
+displays it using Isabelle's graph browser. For this to work properly,
+the theorems in question have to be proved with {\tt proofs} set to a value
+greater than {\tt 0}. You can use
+\begin{ttbox}
+ThmDeps.enable : unit -> unit
+ThmDeps.disable : unit -> unit
+\end{ttbox}
+to set \texttt{proofs} appropriately.
+
+\subsection{Reconstructing and checking proof terms}\label{sec:reconstruct_proofs}
+\index{proof terms!reconstructing}
+\index{proof terms!checking}
+
+When looking at the above datatype of proofs more closely, one notices that
+some arguments of constructors are {\it optional}. The reason for this is that
+keeping a full proof term for each theorem would result in enormous memory
+requirements. Fortunately, typical proof terms usually contain quite a lot of
+redundant information that can be reconstructed from the context. Therefore,
+Isabelle's inference kernel creates only {\em partial} (or {\em implicit})
+\index{proof terms!partial} proof terms, in which
+all typing information in terms, all term and type labels of abstractions
+{\tt AbsP} and {\tt Abst}, and (if possible) some argument terms of
+\verb!%! are omitted. The following functions are available for
+reconstructing and checking proof terms:
+\begin{ttbox}
+Reconstruct.reconstruct_proof :
+ Sign.sg -> term -> Proofterm.proof -> Proofterm.proof
+Reconstruct.expand_proof :
+ Sign.sg -> string list -> Proofterm.proof -> Proofterm.proof
+ProofChecker.thm_of_proof : theory -> Proofterm.proof -> thm
+\end{ttbox}
+
+\begin{ttdescription}
+\item[Reconstruct.reconstruct_proof $sg$ $t$ $prf$]
+turns the partial proof $prf$ into a full proof of the
+proposition denoted by $t$, with respect to signature $sg$.
+Reconstruction will fail with an error message if $prf$
+is not a proof of $t$, is ill-formed, or does not contain
+sufficient information for reconstruction by
+{\em higher order pattern unification}
+\cite{nipkow-patterns, Berghofer-Nipkow:2000:TPHOL}.
+The latter may only happen for proofs
+built up ``by hand'' but not for those produced automatically
+by Isabelle's inference kernel.
+\item[Reconstruct.expand_proof $sg$
+ \ttlbrack$name@1$, $\ldots$, $name@n${\ttrbrack} $prf$]
+expands and reconstructs the proofs of all theorems with names
+$name@1$, $\ldots$, $name@n$ in the (full) proof $prf$.
+\item[ProofChecker.thm_of_proof $thy$ $prf$] turns the (full) proof
+$prf$ into a theorem with respect to theory $thy$ by replaying
+it using only primitive rules from Isabelle's inference kernel.
+\end{ttdescription}
+
+\subsection{Parsing and printing proof terms}
+\index{proof terms!parsing}
+\index{proof terms!printing}
+
+Isabelle offers several functions for parsing and printing
+proof terms. The concrete syntax for proof terms is described
+in Fig.\ts\ref{fig:proof_gram}.
+Implicit term arguments in partial proofs are indicated
+by ``{\tt _}''.
+Type arguments for theorems and axioms may be specified using
+\verb!%! or ``$\cdot$'' with an argument of the form {\tt TYPE($type$)}
+(see \S\ref{sec:basic_syntax}).
+They must appear before any other term argument of a theorem
+or axiom. In contrast to term arguments, type arguments may
+be completely omitted.
+\begin{ttbox}
+ProofSyntax.read_proof : theory -> bool -> string -> Proofterm.proof
+ProofSyntax.pretty_proof : Sign.sg -> Proofterm.proof -> Pretty.T
+ProofSyntax.pretty_proof_of : bool -> thm -> Pretty.T
+ProofSyntax.print_proof_of : bool -> thm -> unit
+\end{ttbox}
+\begin{figure}
+\begin{center}
+\begin{tabular}{rcl}
+$proof$ & $=$ & {\tt Lam} $params${\tt .} $proof$ ~~$|$~~
+ $\Lambda params${\tt .} $proof$ \\
+ & $|$ & $proof$ \verb!%! $any$ ~~$|$~~
+ $proof$ $\cdot$ $any$ \\
+ & $|$ & $proof$ \verb!%%! $proof$ ~~$|$~~
+ $proof$ {\boldmath$\cdot$} $proof$ \\
+ & $|$ & $id$ ~~$|$~~ $longid$ \\\\
+$param$ & $=$ & $idt$ ~~$|$~~ $idt$ {\tt :} $prop$ ~~$|$~~
+ {\tt (} $param$ {\tt )} \\\\
+$params$ & $=$ & $param$ ~~$|$~~ $param$ $params$
+\end{tabular}
+\end{center}
+\caption{Proof term syntax}\label{fig:proof_gram}
+\end{figure}
+The function {\tt read_proof} reads in a proof term with
+respect to a given theory. The boolean flag indicates whether
+the proof term to be parsed contains explicit typing information
+to be taken into account.
+Usually, typing information is left implicit and
+is inferred during proof reconstruction. The pretty printing
+functions operating on theorems take a boolean flag as an
+argument which indicates whether the proof term should
+be reconstructed before printing.
+
+The following example (based on Isabelle/HOL) illustrates how
+to parse and check proof terms. We start by parsing a partial
+proof term
+\begin{ttbox}
+val prf = ProofSyntax.read_proof Main.thy false
+ "impI % _ % _ %% (Lam H : _. conjE % _ % _ % _ %% H %%
+ (Lam (H1 : _) H2 : _. conjI % _ % _ %% H2 %% H1))";
+{\out val prf = PThm (("HOL.impI", []), \dots, \dots, None) % None % None %%}
+{\out AbsP ("H", None, PThm (("HOL.conjE", []), \dots, \dots, None) %}
+{\out None % None % None %% PBound 0 %%}
+{\out AbsP ("H1", None, AbsP ("H2", None, \dots))) : Proofterm.proof}
+\end{ttbox}
+The statement to be established by this proof is
+\begin{ttbox}
+val t = term_of
+ (read_cterm (sign_of Main.thy) ("A & B --> B & A", propT));
+{\out val t = Const ("Trueprop", "bool => prop") $}
+{\out (Const ("op -->", "[bool, bool] => bool") $}
+{\out \dots $ \dots : Term.term}
+\end{ttbox}
+Using {\tt t} we can reconstruct the full proof
+\begin{ttbox}
+val prf' = Reconstruct.reconstruct_proof (sign_of Main.thy) t prf;
+{\out val prf' = PThm (("HOL.impI", []), \dots, \dots, Some []) %}
+{\out Some (Const ("op &", \dots) $ Free ("A", \dots) $ Free ("B", \dots)) %}
+{\out Some (Const ("op &", \dots) $ Free ("B", \dots) $ Free ("A", \dots)) %%}
+{\out AbsP ("H", Some (Const ("Trueprop", \dots) $ \dots), \dots)}
+{\out : Proofterm.proof}
+\end{ttbox}
+This proof can finally be turned into a theorem
+\begin{ttbox}
+val thm = ProofChecker.thm_of_proof Main.thy prf';
+{\out val thm = "A & B --> B & A" : Thm.thm}
+\end{ttbox}
+
+\index{proof terms|)}
+\index{theorems|)}
+
+
+%%% Local Variables:
+%%% mode: latex
+%%% TeX-master: "ref"
+%%% End:
--- a/doc-src/Ref/ref.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,67 +0,0 @@
-\documentclass[12pt,a4paper]{report}
-\usepackage{graphicx,../iman,../extra,../ttbox,../proof,../pdfsetup}
-
-%%\includeonly{}
-%%% to index ids: \[\\tt \([a-zA-Z0-9][a-zA-Z0-9_'.]*\) [\\ttindexbold{\1}
-%%% to delete old ones: \\indexbold{\*[^}]*}
-%% run sedindex ref to prepare index file
-%%% needs chapter on Provers/typedsimp.ML?
-\title{\includegraphics[scale=0.5]{isabelle} \\[4ex] Old Isabelle Reference Manual}
-
-\author{{\em Lawrence C. Paulson}\\
- Computer Laboratory \\ University of Cambridge \\
- \texttt{lcp@cl.cam.ac.uk}\\[3ex]
- With Contributions by Tobias Nipkow and Markus Wenzel}
-
-\makeindex
-
-\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
-
-\pagestyle{headings}
-\sloppy
-\binperiod %%%treat . like a binary operator
-
-\begin{document}
-\underscoreoff
-
-\index{definitions|see{rewriting, meta-level}}
-\index{rewriting!object-level|see{simplification}}
-\index{meta-rules|see{meta-rules}}
-
-\maketitle
-\emph{Note}: this document is part of the earlier Isabelle
-documentation and is mostly outdated. Fully obsolete parts of the
-original text have already been removed. The remaining material
-covers some aspects that did not make it into the newer manuals yet.
-
-\subsubsection*{Acknowledgements}
-Tobias Nipkow, of T. U. Munich, wrote most of
- Chapters~\protect\ref{Defining-Logics} and~\protect\ref{chap:simplification}.
- Markus Wenzel contributed to Chapter~\protect\ref{chap:syntax}.
- Jeremy Dawson, Sara Kalvala, Martin
- Simons and others suggested changes
- and corrections. The research has been funded by the EPSRC (grants
- GR/G53279, GR/H40570, GR/K57381, GR/K77051, GR/M75440) and by ESPRIT
- (projects 3245: Logical Frameworks, and 6453: Types), and by the DFG
- Schwerpunktprogramm \emph{Deduktion}.
-
-\pagenumbering{roman} \tableofcontents \clearfirst
-
-\include{tactic}
-\include{thm}
-\include{syntax}
-\include{substitution}
-\include{simplifier}
-\include{classical}
-
-%%seealso's must be last so that they appear last in the index entries
-\index{meta-rewriting|seealso{tactics, theorems}}
-
-\begingroup
- \bibliographystyle{plain} \small\raggedright\frenchspacing
- \bibliography{../manual}
-\endgroup
-\include{theory-syntax}
-
-\printindex
-\end{document}
--- a/doc-src/Ref/simp.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,512 +0,0 @@
-%%%THIS DOCUMENTS THE OBSOLETE SIMPLIFIER!!!!
-\chapter{Simplification} \label{simp-chap}
-\index{simplification|(}
-Object-level rewriting is not primitive in Isabelle. For efficiency,
-perhaps it ought to be. On the other hand, it is difficult to conceive of
-a general mechanism that could accommodate the diversity of rewriting found
-in different logics. Hence rewriting in Isabelle works via resolution,
-using unknowns as place-holders for simplified terms. This chapter
-describes a generic simplification package, the functor~\ttindex{SimpFun},
-which expects the basic laws of equational logic and returns a suite of
-simplification tactics. The code lives in
-\verb$Provers/simp.ML$.
-
-This rewriting package is not as general as one might hope (using it for {\tt
-HOL} is not quite as convenient as it could be; rewriting modulo equations is
-not supported~\ldots) but works well for many logics. It performs
-conditional and unconditional rewriting and handles multiple reduction
-relations and local assumptions. It also has a facility for automatic case
-splits by expanding conditionals like {\it if-then-else\/} during rewriting.
-
-For many of Isabelle's logics ({\tt FOL}, {\tt ZF}, {\tt LCF} and {\tt HOL})
-the simplifier has been set up already. Hence we start by describing the
-functions provided by the simplifier --- those functions exported by
-\ttindex{SimpFun} through its result signature \ttindex{SIMP} shown in
-Fig.\ts\ref{SIMP}.
-
-
-\section{Simplification sets}
-\index{simplification sets}
-The simplification tactics are controlled by {\bf simpsets}, which consist of
-three things:
-\begin{enumerate}
-\item {\bf Rewrite rules}, which are theorems like
-$\Var{m} + succ(\Var{n}) = succ(\Var{m} + \Var{n})$. {\bf Conditional}
-rewrites such as $m<n \Imp m/n = 0$ are permitted.
-\index{rewrite rules}
-
-\item {\bf Congruence rules}, which typically have the form
-\index{congruence rules}
-\[ \List{\Var{x@1} = \Var{y@1}; \ldots; \Var{x@n} = \Var{y@n}} \Imp
- f(\Var{x@1},\ldots,\Var{x@n}) = f(\Var{y@1},\ldots,\Var{y@n}).
-\]
-
-\item The {\bf auto-tactic}, which attempts to solve the simplified
-subgoal, say by recognizing it as a tautology.
-\end{enumerate}
-
-\subsection{Congruence rules}
-Congruence rules enable the rewriter to simplify subterms. Without a
-congruence rule for the function~$g$, no argument of~$g$ can be rewritten.
-Congruence rules can be generalized in the following ways:
-
-{\bf Additional assumptions} are allowed:
-\[ \List{\Var{P@1} \bimp \Var{Q@1};\; \Var{Q@1} \Imp \Var{P@2} \bimp \Var{Q@2}}
- \Imp (\Var{P@1} \imp \Var{P@2}) \bimp (\Var{Q@1} \imp \Var{Q@2})
-\]
-This rule assumes $Q@1$, and any rewrite rules it contains, while
-simplifying~$P@2$. Such `local' assumptions are effective for rewriting
-formulae such as $x=0\imp y+x=y$.
-
-{\bf Additional quantifiers} are allowed, typically for binding operators:
-\[ \List{\Forall z. \Var{P}(z) \bimp \Var{Q}(z)} \Imp
- \forall x.\Var{P}(x) \bimp \forall x.\Var{Q}(x)
-\]
-
-{\bf Different equalities} can be mixed. The following example
-enables the transition from formula rewriting to term rewriting:
-\[ \List{\Var{x@1}=\Var{y@1};\Var{x@2}=\Var{y@2}} \Imp
- (\Var{x@1}=\Var{x@2}) \bimp (\Var{y@1}=\Var{y@2})
-\]
-\begin{warn}
-It is not necessary to assert a separate congruence rule for each constant,
-provided your logic contains suitable substitution rules. The function {\tt
-mk_congs} derives congruence rules from substitution
-rules~\S\ref{simp-tactics}.
-\end{warn}
-
-
-\begin{figure}
-\indexbold{*SIMP}
-\begin{ttbox}
-infix 4 addrews addcongs delrews delcongs setauto;
-signature SIMP =
-sig
- type simpset
- val empty_ss : simpset
- val addcongs : simpset * thm list -> simpset
- val addrews : simpset * thm list -> simpset
- val delcongs : simpset * thm list -> simpset
- val delrews : simpset * thm list -> simpset
- val print_ss : simpset -> unit
- val setauto : simpset * (int -> tactic) -> simpset
- val ASM_SIMP_CASE_TAC : simpset -> int -> tactic
- val ASM_SIMP_TAC : simpset -> int -> tactic
- val CASE_TAC : simpset -> int -> tactic
- val SIMP_CASE2_TAC : simpset -> int -> tactic
- val SIMP_THM : simpset -> thm -> thm
- val SIMP_TAC : simpset -> int -> tactic
- val SIMP_CASE_TAC : simpset -> int -> tactic
- val mk_congs : theory -> string list -> thm list
- val mk_typed_congs : theory -> (string*string) list -> thm list
- val tracing : bool ref
-end;
-\end{ttbox}
-\caption{The signature {\tt SIMP}} \label{SIMP}
-\end{figure}
-
-
-\subsection{The abstract type {\tt simpset}}\label{simp-simpsets}
-Simpsets are values of the abstract type \ttindexbold{simpset}. They are
-manipulated by the following functions:
-\index{simplification sets|bold}
-\begin{ttdescription}
-\item[\ttindexbold{empty_ss}]
-is the empty simpset. It has no congruence or rewrite rules and its
-auto-tactic always fails.
-
-\item[$ss$ \ttindexbold{addcongs} $thms$]
-is the simpset~$ss$ plus the congruence rules~$thms$.
-
-\item[$ss$ \ttindexbold{delcongs} $thms$]
-is the simpset~$ss$ minus the congruence rules~$thms$.
-
-\item[$ss$ \ttindexbold{addrews} $thms$]
-is the simpset~$ss$ plus the rewrite rules~$thms$.
-
-\item[$ss$ \ttindexbold{delrews} $thms$]
-is the simpset~$ss$ minus the rewrite rules~$thms$.
-
-\item[$ss$ \ttindexbold{setauto} $tacf$]
-is the simpset~$ss$ with $tacf$ for its auto-tactic.
-
-\item[\ttindexbold{print_ss} $ss$]
-prints all the congruence and rewrite rules in the simpset~$ss$.
-\end{ttdescription}
-Adding a rule to a simpset already containing it, or deleting one
-from a simpset not containing it, generates a warning message.
-
-In principle, any theorem can be used as a rewrite rule. Before adding a
-theorem to a simpset, {\tt addrews} preprocesses the theorem to extract the
-maximum amount of rewriting from it. Thus it need not have the form $s=t$.
-In {\tt FOL} for example, an atomic formula $P$ is transformed into the
-rewrite rule $P \bimp True$. This preprocessing is not fixed but logic
-dependent. The existing logics like {\tt FOL} are fairly clever in this
-respect. For a more precise description see {\tt mk_rew_rules} in
-\S\ref{SimpFun-input}.
-
-The auto-tactic is applied after simplification to solve a goal. This may
-be the overall goal or some subgoal that arose during conditional
-rewriting. Calling ${\tt auto_tac}~i$ must either solve exactly
-subgoal~$i$ or fail. If it succeeds without reducing the number of
-subgoals by one, havoc and strange exceptions may result.
-A typical auto-tactic is {\tt ares_tac [TrueI]}, which attempts proof by
-assumption and resolution with the theorem $True$. In explicitly typed
-logics, the auto-tactic can be used to solve simple type checking
-obligations. Some applications demand a sophisticated auto-tactic such as
-{\tt fast_tac}, but this could make simplification slow.
-
-\begin{warn}
-Rewriting never instantiates unknowns in subgoals. (It uses
-\ttindex{match_tac} rather than \ttindex{resolve_tac}.) However, the
-auto-tactic is permitted to instantiate unknowns.
-\end{warn}
-
-
-\section{The simplification tactics} \label{simp-tactics}
-\index{simplification!tactics|bold}
-\index{tactics!simplification|bold}
-The actual simplification work is performed by the following tactics. The
-rewriting strategy is strictly bottom up. Conditions in conditional rewrite
-rules are solved recursively before the rewrite rule is applied.
-
-There are two basic simplification tactics:
-\begin{ttdescription}
-\item[\ttindexbold{SIMP_TAC} $ss$ $i$]
-simplifies subgoal~$i$ using the rules in~$ss$. It may solve the
-subgoal completely if it has become trivial, using the auto-tactic
-(\S\ref{simp-simpsets}).
-
-\item[\ttindexbold{ASM_SIMP_TAC}]
-is like \verb$SIMP_TAC$, but also uses assumptions as additional
-rewrite rules.
-\end{ttdescription}
-Many logics have conditional operators like {\it if-then-else}. If the
-simplifier has been set up with such case splits (see~\ttindex{case_splits}
-in \S\ref{SimpFun-input}), there are tactics which automatically alternate
-between simplification and case splitting:
-\begin{ttdescription}
-\item[\ttindexbold{SIMP_CASE_TAC}]
-is like {\tt SIMP_TAC} but also performs automatic case splits.
-More precisely, after each simplification phase the tactic tries to apply a
-theorem in \ttindex{case_splits}. If this succeeds, the tactic calls
-itself recursively on the result.
-
-\item[\ttindexbold{ASM_SIMP_CASE_TAC}]
-is like {\tt SIMP_CASE_TAC}, but also uses assumptions for
-rewriting.
-
-\item[\ttindexbold{SIMP_CASE2_TAC}]
-is like {\tt SIMP_CASE_TAC}, but also tries to solve the
-pre-conditions of conditional simplification rules by repeated case splits.
-
-\item[\ttindexbold{CASE_TAC}]
-tries to break up a goal using a rule in
-\ttindex{case_splits}.
-
-\item[\ttindexbold{SIMP_THM}]
-simplifies a theorem using assumptions and case splitting.
-\end{ttdescription}
-Finally there are two useful functions for generating congruence
-rules for constants and free variables:
-\begin{ttdescription}
-\item[\ttindexbold{mk_congs} $thy$ $cs$]
-computes a list of congruence rules, one for each constant in $cs$.
-Remember that the name of an infix constant
-\verb$+$ is \verb$op +$.
-
-\item[\ttindexbold{mk_typed_congs}]
-computes congruence rules for explicitly typed free variables and
-constants. Its second argument is a list of name and type pairs. Names
-can be either free variables like {\tt P}, or constants like \verb$op =$.
-For example in {\tt FOL}, the pair
-\verb$("f","'a => 'a")$ generates the rule \verb$?x = ?y ==> f(?x) = f(?y)$.
-Such congruence rules are necessary for goals with free variables whose
-arguments need to be rewritten.
-\end{ttdescription}
-Both functions work correctly only if {\tt SimpFun} has been supplied with the
-necessary substitution rules. The details are discussed in
-\S\ref{SimpFun-input} under {\tt subst_thms}.
-\begin{warn}
-Using the simplifier effectively may take a bit of experimentation. In
-particular it may often happen that simplification stops short of what you
-expected or runs forever. To diagnose these problems, the simplifier can be
-traced. The reference variable \ttindexbold{tracing} controls the output of
-tracing information.
-\index{tracing!of simplification}
-\end{warn}
-
-
-\section{Example: using the simplifier}
-\index{simplification!example}
-Assume we are working within {\tt FOL} and that
-\begin{ttdescription}
-\item[Nat.thy] is a theory including the constants $0$, $Suc$ and $+$,
-\item[add_0] is the rewrite rule $0+n = n$,
-\item[add_Suc] is the rewrite rule $Suc(m)+n = Suc(m+n)$,
-\item[induct] is the induction rule
-$\List{P(0); \Forall x. P(x)\Imp P(Suc(x))} \Imp P(n)$.
-\item[FOL_ss] is a basic simpset for {\tt FOL}.
-\end{ttdescription}
-We generate congruence rules for $Suc$ and for the infix operator~$+$:
-\begin{ttbox}
-val nat_congs = mk_congs Nat.thy ["Suc", "op +"];
-prths nat_congs;
-{\out ?Xa = ?Ya ==> Suc(?Xa) = Suc(?Ya)}
-{\out [| ?Xa = ?Ya; ?Xb = ?Yb |] ==> ?Xa + ?Xb = ?Ya + ?Yb}
-\end{ttbox}
-We create a simpset for natural numbers by extending~{\tt FOL_ss}:
-\begin{ttbox}
-val add_ss = FOL_ss addcongs nat_congs
- addrews [add_0, add_Suc];
-\end{ttbox}
-Proofs by induction typically involve simplification:\footnote
-{These examples reside on the file {\tt FOL/ex/nat.ML}.}
-\begin{ttbox}
-goal Nat.thy "m+0 = m";
-{\out Level 0}
-{\out m + 0 = m}
-{\out 1. m + 0 = m}
-\ttbreak
-by (res_inst_tac [("n","m")] induct 1);
-{\out Level 1}
-{\out m + 0 = m}
-{\out 1. 0 + 0 = 0}
-{\out 2. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)}
-\end{ttbox}
-Simplification solves the first subgoal:
-\begin{ttbox}
-by (SIMP_TAC add_ss 1);
-{\out Level 2}
-{\out m + 0 = m}
-{\out 1. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)}
-\end{ttbox}
-The remaining subgoal requires \ttindex{ASM_SIMP_TAC} in order to use the
-induction hypothesis as a rewrite rule:
-\begin{ttbox}
-by (ASM_SIMP_TAC add_ss 1);
-{\out Level 3}
-{\out m + 0 = m}
-{\out No subgoals!}
-\end{ttbox}
-The next proof is similar.
-\begin{ttbox}
-goal Nat.thy "m+Suc(n) = Suc(m+n)";
-{\out Level 0}
-{\out m + Suc(n) = Suc(m + n)}
-{\out 1. m + Suc(n) = Suc(m + n)}
-\ttbreak
-by (res_inst_tac [("n","m")] induct 1);
-{\out Level 1}
-{\out m + Suc(n) = Suc(m + n)}
-{\out 1. 0 + Suc(n) = Suc(0 + n)}
-{\out 2. !!x. x + Suc(n) = Suc(x + n) ==> Suc(x) + Suc(n) = Suc(Suc(x) + n)}
-\end{ttbox}
-Using the tactical \ttindex{ALLGOALS}, a single command simplifies all the
-subgoals:
-\begin{ttbox}
-by (ALLGOALS (ASM_SIMP_TAC add_ss));
-{\out Level 2}
-{\out m + Suc(n) = Suc(m + n)}
-{\out No subgoals!}
-\end{ttbox}
-Some goals contain free function variables. The simplifier must have
-congruence rules for those function variables, or it will be unable to
-simplify their arguments:
-\begin{ttbox}
-val f_congs = mk_typed_congs Nat.thy [("f","nat => nat")];
-val f_ss = add_ss addcongs f_congs;
-prths f_congs;
-{\out ?Xa = ?Ya ==> f(?Xa) = f(?Ya)}
-\end{ttbox}
-Here is a conjecture to be proved for an arbitrary function~$f$ satisfying
-the law $f(Suc(n)) = Suc(f(n))$:
-\begin{ttbox}
-val [prem] = goal Nat.thy
- "(!!n. f(Suc(n)) = Suc(f(n))) ==> f(i+j) = i+f(j)";
-{\out Level 0}
-{\out f(i + j) = i + f(j)}
-{\out 1. f(i + j) = i + f(j)}
-\ttbreak
-by (res_inst_tac [("n","i")] induct 1);
-{\out Level 1}
-{\out f(i + j) = i + f(j)}
-{\out 1. f(0 + j) = 0 + f(j)}
-{\out 2. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)}
-\end{ttbox}
-We simplify each subgoal in turn. The first one is trivial:
-\begin{ttbox}
-by (SIMP_TAC f_ss 1);
-{\out Level 2}
-{\out f(i + j) = i + f(j)}
-{\out 1. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)}
-\end{ttbox}
-The remaining subgoal requires rewriting by the premise, shown
-below, so we add it to {\tt f_ss}:
-\begin{ttbox}
-prth prem;
-{\out f(Suc(?n)) = Suc(f(?n)) [!!n. f(Suc(n)) = Suc(f(n))]}
-by (ASM_SIMP_TAC (f_ss addrews [prem]) 1);
-{\out Level 3}
-{\out f(i + j) = i + f(j)}
-{\out No subgoals!}
-\end{ttbox}
-
-
-\section{Setting up the simplifier} \label{SimpFun-input}
-\index{simplification!setting up|bold}
-To set up a simplifier for a new logic, the \ML\ functor
-\ttindex{SimpFun} needs to be supplied with theorems to justify
-rewriting. A rewrite relation must be reflexive and transitive; symmetry
-is not necessary. Hence the package is also applicable to non-symmetric
-relations such as occur in operational semantics. In the sequel, $\gg$
-denotes some {\bf reduction relation}: a binary relation to be used for
-rewriting. Several reduction relations can be used at once. In {\tt FOL},
-both $=$ (on terms) and $\bimp$ (on formulae) can be used for rewriting.
-
-The argument to {\tt SimpFun} is a structure with signature
-\ttindexbold{SIMP_DATA}:
-\begin{ttbox}
-signature SIMP_DATA =
-sig
- val case_splits : (thm * string) list
- val dest_red : term -> term * term * term
- val mk_rew_rules : thm -> thm list
- val norm_thms : (thm*thm) list
- val red1 : thm
- val red2 : thm
- val refl_thms : thm list
- val subst_thms : thm list
- val trans_thms : thm list
-end;
-\end{ttbox}
-The components of {\tt SIMP_DATA} need to be instantiated as follows. Many
-of these components are lists, and can be empty.
-\begin{ttdescription}
-\item[\ttindexbold{refl_thms}]
-supplies reflexivity theorems of the form $\Var{x} \gg
-\Var{x}$. They must not have additional premises as, for example,
-$\Var{x}\in\Var{A} \Imp \Var{x} = \Var{x}\in\Var{A}$ in type theory.
-
-\item[\ttindexbold{trans_thms}]
-supplies transitivity theorems of the form
-$\List{\Var{x}\gg\Var{y}; \Var{y}\gg\Var{z}} \Imp {\Var{x}\gg\Var{z}}$.
-
-\item[\ttindexbold{red1}]
-is a theorem of the form $\List{\Var{P}\gg\Var{Q};
-\Var{P}} \Imp \Var{Q}$, where $\gg$ is a relation between formulae, such as
-$\bimp$ in {\tt FOL}.
-
-\item[\ttindexbold{red2}]
-is a theorem of the form $\List{\Var{P}\gg\Var{Q};
-\Var{Q}} \Imp \Var{P}$, where $\gg$ is a relation between formulae, such as
-$\bimp$ in {\tt FOL}.
-
-\item[\ttindexbold{mk_rew_rules}]
-is a function that extracts rewrite rules from theorems. A rewrite rule is
-a theorem of the form $\List{\ldots}\Imp s \gg t$. In its simplest form,
-{\tt mk_rew_rules} maps a theorem $t$ to the singleton list $[t]$. More
-sophisticated versions may do things like
-\[
-\begin{array}{l@{}r@{\quad\mapsto\quad}l}
-\mbox{create formula rewrites:}& P & [P \bimp True] \\[.5ex]
-\mbox{remove negations:}& \neg P & [P \bimp False] \\[.5ex]
-\mbox{create conditional rewrites:}& P\imp s\gg t & [P\Imp s\gg t] \\[.5ex]
-\mbox{break up conjunctions:}&
- (s@1 \gg@1 t@1) \conj (s@2 \gg@2 t@2) & [s@1 \gg@1 t@1, s@2 \gg@2 t@2]
-\end{array}
-\]
-The more theorems are turned into rewrite rules, the better. The function
-is used in two places:
-\begin{itemize}
-\item
-$ss$~\ttindex{addrews}~$thms$ applies {\tt mk_rew_rules} to each element of
-$thms$ before adding it to $ss$.
-\item
-simplification with assumptions (as in \ttindex{ASM_SIMP_TAC}) uses
-{\tt mk_rew_rules} to turn assumptions into rewrite rules.
-\end{itemize}
-
-\item[\ttindexbold{case_splits}]
-supplies expansion rules for case splits. The simplifier is designed
-for rules roughly of the kind
-\[ \Var{P}(if(\Var{Q},\Var{x},\Var{y})) \bimp (\Var{Q} \imp \Var{P}(\Var{x}))
-\conj (\neg\Var{Q} \imp \Var{P}(\Var{y}))
-\]
-but is insensitive to the form of the right-hand side. Other examples
-include product types, where $split ::
-(\alpha\To\beta\To\gamma)\To\alpha*\beta\To\gamma$:
-\[ \Var{P}(split(\Var{f},\Var{p})) \bimp (\forall a~b. \Var{p} =
-{<}a,b{>} \imp \Var{P}(\Var{f}(a,b)))
-\]
-Each theorem in the list is paired with the name of the constant being
-eliminated, {\tt"if"} and {\tt"split"} in the examples above.
-
-\item[\ttindexbold{norm_thms}]
-supports an optimization. It should be a list of pairs of rules of the
-form $\Var{x} \gg norm(\Var{x})$ and $norm(\Var{x}) \gg \Var{x}$. These
-introduce and eliminate {\tt norm}, an arbitrary function that should be
-used nowhere else. This function serves to tag subterms that are in normal
-form. Such rules can speed up rewriting significantly!
-
-\item[\ttindexbold{subst_thms}]
-supplies substitution rules of the form
-\[ \List{\Var{x} \gg \Var{y}; \Var{P}(\Var{x})} \Imp \Var{P}(\Var{y}) \]
-They are used to derive congruence rules via \ttindex{mk_congs} and
-\ttindex{mk_typed_congs}. If $f :: [\tau@1,\cdots,\tau@n]\To\tau$ is a
-constant or free variable, the computation of a congruence rule
-\[\List{\Var{x@1} \gg@1 \Var{y@1}; \ldots; \Var{x@n} \gg@n \Var{y@n}}
-\Imp f(\Var{x@1},\ldots,\Var{x@n}) \gg f(\Var{y@1},\ldots,\Var{y@n}) \]
-requires a reflexivity theorem for some reduction ${\gg} ::
-\alpha\To\alpha\To\sigma$ such that $\tau$ is an instance of $\alpha$. If a
-suitable reflexivity law is missing, no congruence rule for $f$ can be
-generated. Otherwise an $n$-ary congruence rule of the form shown above is
-derived, subject to the availability of suitable substitution laws for each
-argument position.
-
-A substitution law is suitable for argument $i$ if it
-uses a reduction ${\gg@i} :: \alpha@i\To\alpha@i\To\sigma@i$ such that
-$\tau@i$ is an instance of $\alpha@i$. If a suitable substitution law for
-argument $i$ is missing, the $i^{th}$ premise of the above congruence rule
-cannot be generated and hence argument $i$ cannot be rewritten. In the
-worst case, if there are no suitable substitution laws at all, the derived
-congruence simply degenerates into a reflexivity law.
-
-\item[\ttindexbold{dest_red}]
-takes reductions apart. Given a term $t$ representing the judgement
-\mbox{$a \gg b$}, \verb$dest_red$~$t$ should return a triple $(c,ta,tb)$
-where $ta$ and $tb$ represent $a$ and $b$, and $c$ is a term of the form
-\verb$Const(_,_)$, the reduction constant $\gg$.
-
-Suppose the logic has a coercion function like $Trueprop::o\To prop$, as do
-{\tt FOL} and~{\tt HOL}\@. If $\gg$ is a binary operator (not necessarily
-infix), the following definition does the job:
-\begin{verbatim}
- fun dest_red( _ $ (c $ ta $ tb) ) = (c,ta,tb);
-\end{verbatim}
-The wildcard pattern {\tt_} matches the coercion function.
-\end{ttdescription}
-
-
-\section{A sample instantiation}
-Here is the instantiation of {\tt SIMP_DATA} for FOL. The code for {\tt
- mk_rew_rules} is not shown; see the file {\tt FOL/simpdata.ML}.
-\begin{ttbox}
-structure FOL_SimpData : SIMP_DATA =
- struct
- val refl_thms = [ \(\Var{x}=\Var{x}\), \(\Var{P}\bimp\Var{P}\) ]
- val trans_thms = [ \(\List{\Var{x}=\Var{y};\Var{y}=\Var{z}}\Imp\Var{x}=\Var{z}\),
- \(\List{\Var{P}\bimp\Var{Q};\Var{Q}\bimp\Var{R}}\Imp\Var{P}\bimp\Var{R}\) ]
- val red1 = \(\List{\Var{P}\bimp\Var{Q}; \Var{P}} \Imp \Var{Q}\)
- val red2 = \(\List{\Var{P}\bimp\Var{Q}; \Var{Q}} \Imp \Var{P}\)
- val mk_rew_rules = ...
- val case_splits = [ \(\Var{P}(if(\Var{Q},\Var{x},\Var{y})) \bimp\)
- \((\Var{Q} \imp \Var{P}(\Var{x})) \conj (\neg\Var{Q} \imp \Var{P}(\Var{y}))\) ]
- val norm_thms = [ (\(\Var{x}=norm(\Var{x})\),\(norm(\Var{x})=\Var{x}\)),
- (\(\Var{P}\bimp{}NORM(\Var{P}\)), \(NORM(\Var{P})\bimp\Var{P}\)) ]
- val subst_thms = [ \(\List{\Var{x}=\Var{y}; \Var{P}(\Var{x})}\Imp\Var{P}(\Var{y})\) ]
- val dest_red = fn (_ $ (opn $ lhs $ rhs)) => (opn,lhs,rhs)
- end;
-\end{ttbox}
-
-\index{simplification|)}
--- a/doc-src/Ref/simplifier-eg.txt Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,120 +0,0 @@
-
-Pretty.setmargin 70;
-
-
-context Arith.thy;
-goal Arith.thy "0 + (x + 0) = x + 0 + 0";
-by (Simp_tac 1);
-
-
-
-
-> goal Nat.thy "m+0 = m";
-Level 0
-m + 0 = m
- 1. m + 0 = m
-> by (res_inst_tac [("n","m")] induct 1);
-Level 1
-m + 0 = m
- 1. 0 + 0 = 0
- 2. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)
-> by (simp_tac add_ss 1);
-Level 2
-m + 0 = m
- 1. !!x. x + 0 = x ==> Suc(x) + 0 = Suc(x)
-> by (asm_simp_tac add_ss 1);
-Level 3
-m + 0 = m
-No subgoals!
-
-
-> goal Nat.thy "m+Suc(n) = Suc(m+n)";
-Level 0
-m + Suc(n) = Suc(m + n)
- 1. m + Suc(n) = Suc(m + n)
-val it = [] : thm list
-> by (res_inst_tac [("n","m")] induct 1);
-Level 1
-m + Suc(n) = Suc(m + n)
- 1. 0 + Suc(n) = Suc(0 + n)
- 2. !!x. x + Suc(n) = Suc(x + n) ==> Suc(x) + Suc(n) = Suc(Suc(x) + n)
-val it = () : unit
-> by (simp_tac add_ss 1);
-Level 2
-m + Suc(n) = Suc(m + n)
- 1. !!x. x + Suc(n) = Suc(x + n) ==> Suc(x) + Suc(n) = Suc(Suc(x) + n)
-val it = () : unit
-> trace_simp := true;
-val it = () : unit
-> by (asm_simp_tac add_ss 1);
-Rewriting:
-Suc(x) + Suc(n) == Suc(x + Suc(n))
-Rewriting:
-x + Suc(n) == Suc(x + n)
-Rewriting:
-Suc(x) + n == Suc(x + n)
-Rewriting:
-Suc(Suc(x + n)) = Suc(Suc(x + n)) == True
-Level 3
-m + Suc(n) = Suc(m + n)
-No subgoals!
-val it = () : unit
-
-
-
-> val prems = goal Nat.thy "(!!n. f(Suc(n)) = Suc(f(n))) ==> f(i+j) = i+f(j)";
-Level 0
-f(i + j) = i + f(j)
- 1. f(i + j) = i + f(j)
-> prths prems;
-f(Suc(?n)) = Suc(f(?n)) [!!n. f(Suc(n)) = Suc(f(n))]
-
-> by (res_inst_tac [("n","i")] induct 1);
-Level 1
-f(i + j) = i + f(j)
- 1. f(0 + j) = 0 + f(j)
- 2. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)
-> by (simp_tac f_ss 1);
-Level 2
-f(i + j) = i + f(j)
- 1. !!x. f(x + j) = x + f(j) ==> f(Suc(x) + j) = Suc(x) + f(j)
-> by (asm_simp_tac (f_ss addrews prems) 1);
-Level 3
-f(i + j) = i + f(j)
-No subgoals!
-
-
-
-> goal NatSum.thy "Suc(Suc(0))*sum(%i.i,Suc(n)) = n*Suc(n)";
-Level 0
-Suc(Suc(0)) * sum(%i. i, Suc(n)) = n * Suc(n)
- 1. Suc(Suc(0)) * sum(%i. i, Suc(n)) = n * Suc(n)
-
-> by (simp_tac natsum_ss 1);
-Level 1
-Suc(Suc(0)) * sum(%i. i, Suc(n)) = n * Suc(n)
- 1. n + (n + (sum(%i. i, n) + sum(%i. i, n))) = n + n * n
-
-> by (nat_ind_tac "n" 1);
-Level 2
-Suc(Suc(0)) * sum(%i. i, Suc(n)) = n * Suc(n)
- 1. 0 + (0 + (sum(%i. i, 0) + sum(%i. i, 0))) = 0 + 0 * 0
- 2. !!n1. n1 + (n1 + (sum(%i. i, n1) + sum(%i. i, n1))) =
- n1 + n1 * n1 ==>
- Suc(n1) +
- (Suc(n1) + (sum(%i. i, Suc(n1)) + sum(%i. i, Suc(n1)))) =
- Suc(n1) + Suc(n1) * Suc(n1)
-
-> by (simp_tac natsum_ss 1);
-Level 3
-Suc(Suc(0)) * sum(%i. i, Suc(n)) = n * Suc(n)
- 1. !!n1. n1 + (n1 + (sum(%i. i, n1) + sum(%i. i, n1))) =
- n1 + n1 * n1 ==>
- Suc(n1) +
- (Suc(n1) + (sum(%i. i, Suc(n1)) + sum(%i. i, Suc(n1)))) =
- Suc(n1) + Suc(n1) * Suc(n1)
-
-> by (asm_simp_tac natsum_ss 1);
-Level 4
-Suc(Suc(0)) * sum(%i. i, Suc(n)) = n * Suc(n)
-No subgoals!
--- a/doc-src/Ref/simplifier.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,1032 +0,0 @@
-
-\chapter{Simplification}
-\label{chap:simplification}
-\index{simplification|(}
-
-This chapter describes Isabelle's generic simplification package. It performs
-conditional and unconditional rewriting and uses contextual information
-(`local assumptions'). It provides several general hooks, which can provide
-automatic case splits during rewriting, for example. The simplifier is
-already set up for many of Isabelle's logics: FOL, ZF, HOL, HOLCF.
-
-The first section is a quick introduction to the simplifier that
-should be sufficient to get started. The later sections explain more
-advanced features.
-
-
-\section{Simplification for dummies}
-\label{sec:simp-for-dummies}
-
-Basic use of the simplifier is particularly easy because each theory
-is equipped with sensible default information controlling the rewrite
-process --- namely the implicit {\em current
- simpset}\index{simpset!current}. A suite of simple commands is
-provided that refer to the implicit simpset of the current theory
-context.
-
-\begin{warn}
- Make sure that you are working within the correct theory context.
- Executing proofs interactively, or loading them from ML files
- without associated theories may require setting the current theory
- manually via the \ttindex{context} command.
-\end{warn}
-
-\subsection{Simplification tactics} \label{sec:simp-for-dummies-tacs}
-\begin{ttbox}
-Simp_tac : int -> tactic
-Asm_simp_tac : int -> tactic
-Full_simp_tac : int -> tactic
-Asm_full_simp_tac : int -> tactic
-trace_simp : bool ref \hfill{\bf initially false}
-debug_simp : bool ref \hfill{\bf initially false}
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{Simp_tac} $i$] simplifies subgoal~$i$ using the
- current simpset. It may solve the subgoal completely if it has
- become trivial, using the simpset's solver tactic.
-
-\item[\ttindexbold{Asm_simp_tac}]\index{assumptions!in simplification}
- is like \verb$Simp_tac$, but extracts additional rewrite rules from
- the local assumptions.
-
-\item[\ttindexbold{Full_simp_tac}] is like \verb$Simp_tac$, but also
- simplifies the assumptions (without using the assumptions to
- simplify each other or the actual goal).
-
-\item[\ttindexbold{Asm_full_simp_tac}] is like \verb$Asm_simp_tac$,
- but also simplifies the assumptions. In particular, assumptions can
- simplify each other.
-\footnote{\texttt{Asm_full_simp_tac} used to process the assumptions from
- left to right. For backwards compatibilty reasons only there is now
- \texttt{Asm_lr_simp_tac} that behaves like the old \texttt{Asm_full_simp_tac}.}
-\item[set \ttindexbold{trace_simp};] makes the simplifier output internal
- operations. This includes rewrite steps, but also bookkeeping like
- modifications of the simpset.
-\item[set \ttindexbold{debug_simp};] makes the simplifier output some extra
- information about internal operations. This includes any attempted
- invocation of simplification procedures.
-\end{ttdescription}
-
-\medskip
-
-As an example, consider the theory of arithmetic in HOL. The (rather trivial)
-goal $0 + (x + 0) = x + 0 + 0$ can be solved by a single call of
-\texttt{Simp_tac} as follows:
-\begin{ttbox}
-context Arith.thy;
-Goal "0 + (x + 0) = x + 0 + 0";
-{\out 1. 0 + (x + 0) = x + 0 + 0}
-by (Simp_tac 1);
-{\out Level 1}
-{\out 0 + (x + 0) = x + 0 + 0}
-{\out No subgoals!}
-\end{ttbox}
-
-The simplifier uses the current simpset of \texttt{Arith.thy}, which
-contains suitable theorems like $\Var{n}+0 = \Var{n}$ and $0+\Var{n} =
-\Var{n}$.
-
-\medskip In many cases, assumptions of a subgoal are also needed in
-the simplification process. For example, \texttt{x = 0 ==> x + x = 0}
-is solved by \texttt{Asm_simp_tac} as follows:
-\begin{ttbox}
-{\out 1. x = 0 ==> x + x = 0}
-by (Asm_simp_tac 1);
-\end{ttbox}
-
-\medskip \texttt{Asm_full_simp_tac} is the most powerful of this quartet
-of tactics but may also loop where some of the others terminate. For
-example,
-\begin{ttbox}
-{\out 1. ALL x. f x = g (f (g x)) ==> f 0 = f 0 + 0}
-\end{ttbox}
-is solved by \texttt{Simp_tac}, but \texttt{Asm_simp_tac} and {\tt
- Asm_full_simp_tac} loop because the rewrite rule $f\,\Var{x} =
-g\,(f\,(g\,\Var{x}))$ extracted from the assumption does not
-terminate. Isabelle notices certain simple forms of nontermination,
-but not this one. Because assumptions may simplify each other, there can be
-very subtle cases of nontermination. For example, invoking
-{\tt Asm_full_simp_tac} on
-\begin{ttbox}
-{\out 1. [| P (f x); y = x; f x = f y |] ==> Q}
-\end{ttbox}
-gives rise to the infinite reduction sequence
-\[
-P\,(f\,x) \stackrel{f\,x = f\,y}{\longmapsto} P\,(f\,y) \stackrel{y = x}{\longmapsto}
-P\,(f\,x) \stackrel{f\,x = f\,y}{\longmapsto} \cdots
-\]
-whereas applying the same tactic to
-\begin{ttbox}
-{\out 1. [| y = x; f x = f y; P (f x) |] ==> Q}
-\end{ttbox}
-terminates.
-
-\medskip
-
-Using the simplifier effectively may take a bit of experimentation.
-Set the \verb$trace_simp$\index{tracing!of simplification} flag to get
-a better idea of what is going on. The resulting output can be
-enormous, especially since invocations of the simplifier are often
-nested (e.g.\ when solving conditions of rewrite rules).
-
-
-\subsection{Modifying the current simpset}
-\begin{ttbox}
-Addsimps : thm list -> unit
-Delsimps : thm list -> unit
-Addsimprocs : simproc list -> unit
-Delsimprocs : simproc list -> unit
-Addcongs : thm list -> unit
-Delcongs : thm list -> unit
-Addsplits : thm list -> unit
-Delsplits : thm list -> unit
-\end{ttbox}
-
-Depending on the theory context, the \texttt{Add} and \texttt{Del}
-functions manipulate basic components of the associated current
-simpset. Internally, all rewrite rules have to be expressed as
-(conditional) meta-equalities. This form is derived automatically
-from object-level equations that are supplied by the user. Another
-source of rewrite rules are \emph{simplification procedures}, that is
-\ML\ functions that produce suitable theorems on demand, depending on
-the current redex. Congruences are a more advanced feature; see
-{\S}\ref{sec:simp-congs}.
-
-\begin{ttdescription}
-
-\item[\ttindexbold{Addsimps} $thms$;] adds rewrite rules derived from
- $thms$ to the current simpset.
-
-\item[\ttindexbold{Delsimps} $thms$;] deletes rewrite rules derived
- from $thms$ from the current simpset.
-
-\item[\ttindexbold{Addsimprocs} $procs$;] adds simplification
- procedures $procs$ to the current simpset.
-
-\item[\ttindexbold{Delsimprocs} $procs$;] deletes simplification
- procedures $procs$ from the current simpset.
-
-\item[\ttindexbold{Addcongs} $thms$;] adds congruence rules to the
- current simpset.
-
-\item[\ttindexbold{Delcongs} $thms$;] deletes congruence rules from the
- current simpset.
-
-\item[\ttindexbold{Addsplits} $thms$;] adds splitting rules to the
- current simpset.
-
-\item[\ttindexbold{Delsplits} $thms$;] deletes splitting rules from the
- current simpset.
-
-\end{ttdescription}
-
-When a new theory is built, its implicit simpset is initialized by the union
-of the respective simpsets of its parent theories. In addition, certain
-theory definition constructs (e.g.\ \ttindex{datatype} and \ttindex{primrec}
-in HOL) implicitly augment the current simpset. Ordinary definitions are not
-added automatically!
-
-It is up the user to manipulate the current simpset further by
-explicitly adding or deleting theorems and simplification procedures.
-
-\medskip
-
-Good simpsets are hard to design. Rules that obviously simplify,
-like $\Var{n}+0 = \Var{n}$, should be added to the current simpset right after
-they have been proved. More specific ones (such as distributive laws, which
-duplicate subterms) should be added only for specific proofs and deleted
-afterwards. Conversely, sometimes a rule needs
-to be removed for a certain proof and restored afterwards. The need of
-frequent additions or deletions may indicate a badly designed
-simpset.
-
-\begin{warn}
- The union of the parent simpsets (as described above) is not always
- a good starting point for the new theory. If some ancestors have
- deleted simplification rules because they are no longer wanted,
- while others have left those rules in, then the union will contain
- the unwanted rules. After this union is formed, changes to
- a parent simpset have no effect on the child simpset.
-\end{warn}
-
-
-\section{Simplification sets}\index{simplification sets}
-
-The simplifier is controlled by information contained in {\bf
- simpsets}. These consist of several components, including rewrite
-rules, simplification procedures, congruence rules, and the subgoaler,
-solver and looper tactics. The simplifier should be set up with
-sensible defaults so that most simplifier calls specify only rewrite
-rules or simplification procedures. Experienced users can exploit the
-other components to streamline proofs in more sophisticated manners.
-
-\subsection{Inspecting simpsets}
-\begin{ttbox}
-print_ss : simpset -> unit
-rep_ss : simpset -> \{mss : meta_simpset,
- subgoal_tac: simpset -> int -> tactic,
- loop_tacs : (string * (int -> tactic))list,
- finish_tac : solver list,
- unsafe_finish_tac : solver list\}
-\end{ttbox}
-\begin{ttdescription}
-
-\item[\ttindexbold{print_ss} $ss$;] displays the printable contents of
- simpset $ss$. This includes the rewrite rules and congruences in
- their internal form expressed as meta-equalities. The names of the
- simplification procedures and the patterns they are invoked on are
- also shown. The other parts, functions and tactics, are
- non-printable.
-
-\item[\ttindexbold{rep_ss} $ss$;] decomposes $ss$ as a record of its internal
- components, namely the meta_simpset, the subgoaler, the loop, and the safe
- and unsafe solvers.
-
-\end{ttdescription}
-
-
-\subsection{Building simpsets}
-\begin{ttbox}
-empty_ss : simpset
-merge_ss : simpset * simpset -> simpset
-\end{ttbox}
-\begin{ttdescription}
-
-\item[\ttindexbold{empty_ss}] is the empty simpset. This is not very useful
- under normal circumstances because it doesn't contain suitable tactics
- (subgoaler etc.). When setting up the simplifier for a particular
- object-logic, one will typically define a more appropriate ``almost empty''
- simpset. For example, in HOL this is called \ttindexbold{HOL_basic_ss}.
-
-\item[\ttindexbold{merge_ss} ($ss@1$, $ss@2$)] merges simpsets $ss@1$
- and $ss@2$ by building the union of their respective rewrite rules,
- simplification procedures and congruences. The other components
- (tactics etc.) cannot be merged, though; they are taken from either
- simpset\footnote{Actually from $ss@1$, but it would unwise to count
- on that.}.
-
-\end{ttdescription}
-
-
-\subsection{Rewrite rules}
-\begin{ttbox}
-addsimps : simpset * thm list -> simpset \hfill{\bf infix 4}
-delsimps : simpset * thm list -> simpset \hfill{\bf infix 4}
-\end{ttbox}
-
-\index{rewrite rules|(} Rewrite rules are theorems expressing some
-form of equality, for example:
-\begin{eqnarray*}
- Suc(\Var{m}) + \Var{n} &=& \Var{m} + Suc(\Var{n}) \\
- \Var{P}\conj\Var{P} &\bimp& \Var{P} \\
- \Var{A} \un \Var{B} &\equiv& \{x.x\in \Var{A} \disj x\in \Var{B}\}
-\end{eqnarray*}
-Conditional rewrites such as $\Var{m}<\Var{n} \Imp \Var{m}/\Var{n} =
-0$ are also permitted; the conditions can be arbitrary formulas.
-
-Internally, all rewrite rules are translated into meta-equalities,
-theorems with conclusion $lhs \equiv rhs$. Each simpset contains a
-function for extracting equalities from arbitrary theorems. For
-example, $\neg(\Var{x}\in \{\})$ could be turned into $\Var{x}\in \{\}
-\equiv False$. This function can be installed using
-\ttindex{setmksimps} but only the definer of a logic should need to do
-this; see {\S}\ref{sec:setmksimps}. The function processes theorems
-added by \texttt{addsimps} as well as local assumptions.
-
-\begin{ttdescription}
-
-\item[$ss$ \ttindexbold{addsimps} $thms$] adds rewrite rules derived
- from $thms$ to the simpset $ss$.
-
-\item[$ss$ \ttindexbold{delsimps} $thms$] deletes rewrite rules
- derived from $thms$ from the simpset $ss$.
-
-\end{ttdescription}
-
-\begin{warn}
- The simplifier will accept all standard rewrite rules: those where
- all unknowns are of base type. Hence ${\Var{i}+(\Var{j}+\Var{k})} =
- {(\Var{i}+\Var{j})+\Var{k}}$ is OK.
-
- It will also deal gracefully with all rules whose left-hand sides
- are so-called {\em higher-order patterns}~\cite{nipkow-patterns}.
- \indexbold{higher-order pattern}\indexbold{pattern, higher-order}
- These are terms in $\beta$-normal form (this will always be the case
- unless you have done something strange) where each occurrence of an
- unknown is of the form $\Var{F}(x@1,\dots,x@n)$, where the $x@i$ are
- distinct bound variables. Hence $(\forall x.\Var{P}(x) \land
- \Var{Q}(x)) \bimp (\forall x.\Var{P}(x)) \land (\forall
- x.\Var{Q}(x))$ is also OK, in both directions.
-
- In some rare cases the rewriter will even deal with quite general
- rules: for example ${\Var{f}(\Var{x})\in range(\Var{f})} = True$
- rewrites $g(a) \in range(g)$ to $True$, but will fail to match
- $g(h(b)) \in range(\lambda x.g(h(x)))$. However, you can replace
- the offending subterms (in our case $\Var{f}(\Var{x})$, which is not
- a pattern) by adding new variables and conditions: $\Var{y} =
- \Var{f}(\Var{x}) \Imp \Var{y}\in range(\Var{f}) = True$ is
- acceptable as a conditional rewrite rule since conditions can be
- arbitrary terms.
-
- There is basically no restriction on the form of the right-hand
- sides. They may not contain extraneous term or type variables,
- though.
-\end{warn}
-\index{rewrite rules|)}
-
-
-\subsection{*The subgoaler}\label{sec:simp-subgoaler}
-\begin{ttbox}
-setsubgoaler :
- simpset * (simpset -> int -> tactic) -> simpset \hfill{\bf infix 4}
-prems_of_ss : simpset -> thm list
-\end{ttbox}
-
-The subgoaler is the tactic used to solve subgoals arising out of
-conditional rewrite rules or congruence rules. The default should be
-simplification itself. Occasionally this strategy needs to be
-changed. For example, if the premise of a conditional rule is an
-instance of its conclusion, as in $Suc(\Var{m}) < \Var{n} \Imp \Var{m}
-< \Var{n}$, the default strategy could loop.
-
-\begin{ttdescription}
-
-\item[$ss$ \ttindexbold{setsubgoaler} $tacf$] sets the subgoaler of
- $ss$ to $tacf$. The function $tacf$ will be applied to the current
- simplifier context expressed as a simpset.
-
-\item[\ttindexbold{prems_of_ss} $ss$] retrieves the current set of
- premises from simplifier context $ss$. This may be non-empty only
- if the simplifier has been told to utilize local assumptions in the
- first place, e.g.\ if invoked via \texttt{asm_simp_tac}.
-
-\end{ttdescription}
-
-As an example, consider the following subgoaler:
-\begin{ttbox}
-fun subgoaler ss =
- assume_tac ORELSE'
- resolve_tac (prems_of_ss ss) ORELSE'
- asm_simp_tac ss;
-\end{ttbox}
-This tactic first tries to solve the subgoal by assumption or by
-resolving with with one of the premises, calling simplification only
-if that fails.
-
-
-\subsection{*The solver}\label{sec:simp-solver}
-\begin{ttbox}
-mk_solver : string -> (thm list -> int -> tactic) -> solver
-setSolver : simpset * solver -> simpset \hfill{\bf infix 4}
-addSolver : simpset * solver -> simpset \hfill{\bf infix 4}
-setSSolver : simpset * solver -> simpset \hfill{\bf infix 4}
-addSSolver : simpset * solver -> simpset \hfill{\bf infix 4}
-\end{ttbox}
-
-A solver is a tactic that attempts to solve a subgoal after
-simplification. Typically it just proves trivial subgoals such as
-\texttt{True} and $t=t$. It could use sophisticated means such as {\tt
- blast_tac}, though that could make simplification expensive.
-To keep things more abstract, solvers are packaged up in type
-\texttt{solver}. The only way to create a solver is via \texttt{mk_solver}.
-
-Rewriting does not instantiate unknowns. For example, rewriting
-cannot prove $a\in \Var{A}$ since this requires
-instantiating~$\Var{A}$. The solver, however, is an arbitrary tactic
-and may instantiate unknowns as it pleases. This is the only way the
-simplifier can handle a conditional rewrite rule whose condition
-contains extra variables. When a simplification tactic is to be
-combined with other provers, especially with the classical reasoner,
-it is important whether it can be considered safe or not. For this
-reason a simpset contains two solvers, a safe and an unsafe one.
-
-The standard simplification strategy solely uses the unsafe solver,
-which is appropriate in most cases. For special applications where
-the simplification process is not allowed to instantiate unknowns
-within the goal, simplification starts with the safe solver, but may
-still apply the ordinary unsafe one in nested simplifications for
-conditional rules or congruences. Note that in this way the overall
-tactic is not totally safe: it may instantiate unknowns that appear also
-in other subgoals.
-
-\begin{ttdescription}
-\item[\ttindexbold{mk_solver} $s$ $tacf$] converts $tacf$ into a new solver;
- the string $s$ is only attached as a comment and has no other significance.
-
-\item[$ss$ \ttindexbold{setSSolver} $tacf$] installs $tacf$ as the
- \emph{safe} solver of $ss$.
-
-\item[$ss$ \ttindexbold{addSSolver} $tacf$] adds $tacf$ as an
- additional \emph{safe} solver; it will be tried after the solvers
- which had already been present in $ss$.
-
-\item[$ss$ \ttindexbold{setSolver} $tacf$] installs $tacf$ as the
- unsafe solver of $ss$.
-
-\item[$ss$ \ttindexbold{addSolver} $tacf$] adds $tacf$ as an
- additional unsafe solver; it will be tried after the solvers which
- had already been present in $ss$.
-
-\end{ttdescription}
-
-\medskip
-
-\index{assumptions!in simplification} The solver tactic is invoked
-with a list of theorems, namely assumptions that hold in the local
-context. This may be non-empty only if the simplifier has been told
-to utilize local assumptions in the first place, e.g.\ if invoked via
-\texttt{asm_simp_tac}. The solver is also presented the full goal
-including its assumptions in any case. Thus it can use these (e.g.\
-by calling \texttt{assume_tac}), even if the list of premises is not
-passed.
-
-\medskip
-
-As explained in {\S}\ref{sec:simp-subgoaler}, the subgoaler is also used
-to solve the premises of congruence rules. These are usually of the
-form $s = \Var{x}$, where $s$ needs to be simplified and $\Var{x}$
-needs to be instantiated with the result. Typically, the subgoaler
-will invoke the simplifier at some point, which will eventually call
-the solver. For this reason, solver tactics must be prepared to solve
-goals of the form $t = \Var{x}$, usually by reflexivity. In
-particular, reflexivity should be tried before any of the fancy
-tactics like \texttt{blast_tac}.
-
-It may even happen that due to simplification the subgoal is no longer
-an equality. For example $False \bimp \Var{Q}$ could be rewritten to
-$\neg\Var{Q}$. To cover this case, the solver could try resolving
-with the theorem $\neg False$.
-
-\medskip
-
-\begin{warn}
- If a premise of a congruence rule cannot be proved, then the
- congruence is ignored. This should only happen if the rule is
- \emph{conditional} --- that is, contains premises not of the form $t
- = \Var{x}$; otherwise it indicates that some congruence rule, or
- possibly the subgoaler or solver, is faulty.
-\end{warn}
-
-
-\subsection{*The looper}\label{sec:simp-looper}
-\begin{ttbox}
-setloop : simpset * (int -> tactic) -> simpset \hfill{\bf infix 4}
-addloop : simpset * (string * (int -> tactic)) -> simpset \hfill{\bf infix 4}
-delloop : simpset * string -> simpset \hfill{\bf infix 4}
-addsplits : simpset * thm list -> simpset \hfill{\bf infix 4}
-delsplits : simpset * thm list -> simpset \hfill{\bf infix 4}
-\end{ttbox}
-
-The looper is a list of tactics that are applied after simplification, in case
-the solver failed to solve the simplified goal. If the looper
-succeeds, the simplification process is started all over again. Each
-of the subgoals generated by the looper is attacked in turn, in
-reverse order.
-
-A typical looper is \index{case splitting}: the expansion of a conditional.
-Another possibility is to apply an elimination rule on the
-assumptions. More adventurous loopers could start an induction.
-
-\begin{ttdescription}
-
-\item[$ss$ \ttindexbold{setloop} $tacf$] installs $tacf$ as the only looper
- tactic of $ss$.
-
-\item[$ss$ \ttindexbold{addloop} $(name,tacf)$] adds $tacf$ as an additional
- looper tactic with name $name$; it will be tried after the looper tactics
- that had already been present in $ss$.
-
-\item[$ss$ \ttindexbold{delloop} $name$] deletes the looper tactic $name$
- from $ss$.
-
-\item[$ss$ \ttindexbold{addsplits} $thms$] adds
- split tactics for $thms$ as additional looper tactics of $ss$.
-
-\item[$ss$ \ttindexbold{addsplits} $thms$] deletes the
- split tactics for $thms$ from the looper tactics of $ss$.
-
-\end{ttdescription}
-
-The splitter replaces applications of a given function; the right-hand side
-of the replacement can be anything. For example, here is a splitting rule
-for conditional expressions:
-\[ \Var{P}(if(\Var{Q},\Var{x},\Var{y})) \bimp (\Var{Q} \imp \Var{P}(\Var{x}))
-\conj (\neg\Var{Q} \imp \Var{P}(\Var{y}))
-\]
-Another example is the elimination operator for Cartesian products (which
-happens to be called~$split$):
-\[ \Var{P}(split(\Var{f},\Var{p})) \bimp (\forall a~b. \Var{p} =
-\langle a,b\rangle \imp \Var{P}(\Var{f}(a,b)))
-\]
-
-For technical reasons, there is a distinction between case splitting in the
-conclusion and in the premises of a subgoal. The former is done by
-\texttt{split_tac} with rules like \texttt{split_if} or \texttt{option.split},
-which do not split the subgoal, while the latter is done by
-\texttt{split_asm_tac} with rules like \texttt{split_if_asm} or
-\texttt{option.split_asm}, which split the subgoal.
-The operator \texttt{addsplits} automatically takes care of which tactic to
-call, analyzing the form of the rules given as argument.
-\begin{warn}
-Due to \texttt{split_asm_tac}, the simplifier may split subgoals!
-\end{warn}
-
-Case splits should be allowed only when necessary; they are expensive
-and hard to control. Here is an example of use, where \texttt{split_if}
-is the first rule above:
-\begin{ttbox}
-by (simp_tac (simpset()
- addloop ("split if", split_tac [split_if])) 1);
-\end{ttbox}
-Users would usually prefer the following shortcut using \texttt{addsplits}:
-\begin{ttbox}
-by (simp_tac (simpset() addsplits [split_if]) 1);
-\end{ttbox}
-Case-splitting on conditional expressions is usually beneficial, so it is
-enabled by default in the object-logics \texttt{HOL} and \texttt{FOL}.
-
-
-\section{The simplification tactics}\label{simp-tactics}
-\index{simplification!tactics}\index{tactics!simplification}
-\begin{ttbox}
-generic_simp_tac : bool -> bool * bool * bool ->
- simpset -> int -> tactic
-simp_tac : simpset -> int -> tactic
-asm_simp_tac : simpset -> int -> tactic
-full_simp_tac : simpset -> int -> tactic
-asm_full_simp_tac : simpset -> int -> tactic
-safe_asm_full_simp_tac : simpset -> int -> tactic
-\end{ttbox}
-
-\texttt{generic_simp_tac} is the basic tactic that is underlying any actual
-simplification work. The others are just instantiations of it. The rewriting
-strategy is always strictly bottom up, except for congruence rules,
-which are applied while descending into a term. Conditions in conditional
-rewrite rules are solved recursively before the rewrite rule is applied.
-
-\begin{ttdescription}
-
-\item[\ttindexbold{generic_simp_tac} $safe$ ($simp\_asm$, $use\_asm$, $mutual$)]
- gives direct access to the various simplification modes:
- \begin{itemize}
- \item if $safe$ is {\tt true}, the safe solver is used as explained in
- {\S}\ref{sec:simp-solver},
- \item $simp\_asm$ determines whether the local assumptions are simplified,
- \item $use\_asm$ determines whether the assumptions are used as local rewrite
- rules, and
- \item $mutual$ determines whether assumptions can simplify each other rather
- than being processed from left to right.
- \end{itemize}
- This generic interface is intended
- for building special tools, e.g.\ for combining the simplifier with the
- classical reasoner. It is rarely used directly.
-
-\item[\ttindexbold{simp_tac}, \ttindexbold{asm_simp_tac},
- \ttindexbold{full_simp_tac}, \ttindexbold{asm_full_simp_tac}] are
- the basic simplification tactics that work exactly like their
- namesakes in {\S}\ref{sec:simp-for-dummies}, except that they are
- explicitly supplied with a simpset.
-
-\end{ttdescription}
-
-\medskip
-
-Local modifications of simpsets within a proof are often much cleaner
-by using above tactics in conjunction with explicit simpsets, rather
-than their capitalized counterparts. For example
-\begin{ttbox}
-Addsimps \(thms\);
-by (Simp_tac \(i\));
-Delsimps \(thms\);
-\end{ttbox}
-can be expressed more appropriately as
-\begin{ttbox}
-by (simp_tac (simpset() addsimps \(thms\)) \(i\));
-\end{ttbox}
-
-\medskip
-
-Also note that functions depending implicitly on the current theory
-context (like capital \texttt{Simp_tac} and the other commands of
-{\S}\ref{sec:simp-for-dummies}) should be considered harmful outside of
-actual proof scripts. In particular, ML programs like theory
-definition packages or special tactics should refer to simpsets only
-explicitly, via the above tactics used in conjunction with
-\texttt{simpset_of} or the \texttt{SIMPSET} tacticals.
-
-
-\section{Forward rules and conversions}
-\index{simplification!forward rules}\index{simplification!conversions}
-\begin{ttbox}\index{*simplify}\index{*asm_simplify}\index{*full_simplify}\index{*asm_full_simplify}\index{*Simplifier.rewrite}\index{*Simplifier.asm_rewrite}\index{*Simplifier.full_rewrite}\index{*Simplifier.asm_full_rewrite}
-simplify : simpset -> thm -> thm
-asm_simplify : simpset -> thm -> thm
-full_simplify : simpset -> thm -> thm
-asm_full_simplify : simpset -> thm -> thm\medskip
-Simplifier.rewrite : simpset -> cterm -> thm
-Simplifier.asm_rewrite : simpset -> cterm -> thm
-Simplifier.full_rewrite : simpset -> cterm -> thm
-Simplifier.asm_full_rewrite : simpset -> cterm -> thm
-\end{ttbox}
-
-The first four of these functions provide \emph{forward} rules for
-simplification. Their effect is analogous to the corresponding
-tactics described in {\S}\ref{simp-tactics}, but affect the whole
-theorem instead of just a certain subgoal. Also note that the
-looper~/ solver process as described in {\S}\ref{sec:simp-looper} and
-{\S}\ref{sec:simp-solver} is omitted in forward simplification.
-
-The latter four are \emph{conversions}, establishing proven equations
-of the form $t \equiv u$ where the l.h.s.\ $t$ has been given as
-argument.
-
-\begin{warn}
- Forward simplification rules and conversions should be used rarely
- in ordinary proof scripts. The main intention is to provide an
- internal interface to the simplifier for special utilities.
-\end{warn}
-
-
-\section{Permutative rewrite rules}
-\index{rewrite rules!permutative|(}
-
-A rewrite rule is {\bf permutative} if the left-hand side and right-hand
-side are the same up to renaming of variables. The most common permutative
-rule is commutativity: $x+y = y+x$. Other examples include $(x-y)-z =
-(x-z)-y$ in arithmetic and $insert(x,insert(y,A)) = insert(y,insert(x,A))$
-for sets. Such rules are common enough to merit special attention.
-
-Because ordinary rewriting loops given such rules, the simplifier
-employs a special strategy, called {\bf ordered
- rewriting}\index{rewriting!ordered}. There is a standard
-lexicographic ordering on terms. This should be perfectly OK in most
-cases, but can be changed for special applications.
-
-\begin{ttbox}
-settermless : simpset * (term * term -> bool) -> simpset \hfill{\bf infix 4}
-\end{ttbox}
-\begin{ttdescription}
-
-\item[$ss$ \ttindexbold{settermless} $rel$] installs relation $rel$ as
- term order in simpset $ss$.
-
-\end{ttdescription}
-
-\medskip
-
-A permutative rewrite rule is applied only if it decreases the given
-term with respect to this ordering. For example, commutativity
-rewrites~$b+a$ to $a+b$, but then stops because $a+b$ is strictly less
-than $b+a$. The Boyer-Moore theorem prover~\cite{bm88book} also
-employs ordered rewriting.
-
-Permutative rewrite rules are added to simpsets just like other
-rewrite rules; the simplifier recognizes their special status
-automatically. They are most effective in the case of
-associative-commutative operators. (Associativity by itself is not
-permutative.) When dealing with an AC-operator~$f$, keep the
-following points in mind:
-\begin{itemize}\index{associative-commutative operators}
-
-\item The associative law must always be oriented from left to right,
- namely $f(f(x,y),z) = f(x,f(y,z))$. The opposite orientation, if
- used with commutativity, leads to looping in conjunction with the
- standard term order.
-
-\item To complete your set of rewrite rules, you must add not just
- associativity~(A) and commutativity~(C) but also a derived rule, {\bf
- left-com\-mut\-ativ\-ity} (LC): $f(x,f(y,z)) = f(y,f(x,z))$.
-\end{itemize}
-Ordered rewriting with the combination of A, C, and~LC sorts a term
-lexicographically:
-\[\def\maps#1{\stackrel{#1}{\longmapsto}}
- (b+c)+a \maps{A} b+(c+a) \maps{C} b+(a+c) \maps{LC} a+(b+c) \]
-Martin and Nipkow~\cite{martin-nipkow} discuss the theory and give many
-examples; other algebraic structures are amenable to ordered rewriting,
-such as boolean rings.
-
-\subsection{Example: sums of natural numbers}
-
-This example is again set in HOL (see \texttt{HOL/ex/NatSum}). Theory
-\thydx{Arith} contains natural numbers arithmetic. Its associated simpset
-contains many arithmetic laws including distributivity of~$\times$ over~$+$,
-while \texttt{add_ac} is a list consisting of the A, C and LC laws for~$+$ on
-type \texttt{nat}. Let us prove the theorem
-\[ \sum@{i=1}^n i = n\times(n+1)/2. \]
-%
-A functional~\texttt{sum} represents the summation operator under the
-interpretation $\texttt{sum} \, f \, (n + 1) = \sum@{i=0}^n f\,i$. We
-extend \texttt{Arith} as follows:
-\begin{ttbox}
-NatSum = Arith +
-consts sum :: [nat=>nat, nat] => nat
-primrec
- "sum f 0 = 0"
- "sum f (Suc n) = f(n) + sum f n"
-end
-\end{ttbox}
-The \texttt{primrec} declaration automatically adds rewrite rules for
-\texttt{sum} to the default simpset. We now remove the
-\texttt{nat_cancel} simplification procedures (in order not to spoil
-the example) and insert the AC-rules for~$+$:
-\begin{ttbox}
-Delsimprocs nat_cancel;
-Addsimps add_ac;
-\end{ttbox}
-Our desired theorem now reads $\texttt{sum} \, (\lambda i.i) \, (n+1) =
-n\times(n+1)/2$. The Isabelle goal has both sides multiplied by~$2$:
-\begin{ttbox}
-Goal "2 * sum (\%i.i) (Suc n) = n * Suc n";
-{\out Level 0}
-{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
-{\out 1. 2 * sum (\%i. i) (Suc n) = n * Suc n}
-\end{ttbox}
-Induction should not be applied until the goal is in the simplest
-form:
-\begin{ttbox}
-by (Simp_tac 1);
-{\out Level 1}
-{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
-{\out 1. n + (sum (\%i. i) n + sum (\%i. i) n) = n * n}
-\end{ttbox}
-Ordered rewriting has sorted the terms in the left-hand side. The
-subgoal is now ready for induction:
-\begin{ttbox}
-by (induct_tac "n" 1);
-{\out Level 2}
-{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
-{\out 1. 0 + (sum (\%i. i) 0 + sum (\%i. i) 0) = 0 * 0}
-\ttbreak
-{\out 2. !!n. n + (sum (\%i. i) n + sum (\%i. i) n) = n * n}
-{\out ==> Suc n + (sum (\%i. i) (Suc n) + sum (\%i.\,i) (Suc n)) =}
-{\out Suc n * Suc n}
-\end{ttbox}
-Simplification proves both subgoals immediately:\index{*ALLGOALS}
-\begin{ttbox}
-by (ALLGOALS Asm_simp_tac);
-{\out Level 3}
-{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
-{\out No subgoals!}
-\end{ttbox}
-Simplification cannot prove the induction step if we omit \texttt{add_ac} from
-the simpset. Observe that like terms have not been collected:
-\begin{ttbox}
-{\out Level 3}
-{\out 2 * sum (\%i. i) (Suc n) = n * Suc n}
-{\out 1. !!n. n + sum (\%i. i) n + (n + sum (\%i. i) n) = n + n * n}
-{\out ==> n + (n + sum (\%i. i) n) + (n + (n + sum (\%i.\,i) n)) =}
-{\out n + (n + (n + n * n))}
-\end{ttbox}
-Ordered rewriting proves this by sorting the left-hand side. Proving
-arithmetic theorems without ordered rewriting requires explicit use of
-commutativity. This is tedious; try it and see!
-
-Ordered rewriting is equally successful in proving
-$\sum@{i=1}^n i^3 = n^2\times(n+1)^2/4$.
-
-
-\subsection{Re-orienting equalities}
-Ordered rewriting with the derived rule \texttt{symmetry} can reverse
-equations:
-\begin{ttbox}
-val symmetry = prove_goal HOL.thy "(x=y) = (y=x)"
- (fn _ => [Blast_tac 1]);
-\end{ttbox}
-This is frequently useful. Assumptions of the form $s=t$, where $t$ occurs
-in the conclusion but not~$s$, can often be brought into the right form.
-For example, ordered rewriting with \texttt{symmetry} can prove the goal
-\[ f(a)=b \conj f(a)=c \imp b=c. \]
-Here \texttt{symmetry} reverses both $f(a)=b$ and $f(a)=c$
-because $f(a)$ is lexicographically greater than $b$ and~$c$. These
-re-oriented equations, as rewrite rules, replace $b$ and~$c$ in the
-conclusion by~$f(a)$.
-
-Another example is the goal $\neg(t=u) \imp \neg(u=t)$.
-The differing orientations make this appear difficult to prove. Ordered
-rewriting with \texttt{symmetry} makes the equalities agree. (Without
-knowing more about~$t$ and~$u$ we cannot say whether they both go to $t=u$
-or~$u=t$.) Then the simplifier can prove the goal outright.
-
-\index{rewrite rules!permutative|)}
-
-
-\section{*Setting up the Simplifier}\label{sec:setting-up-simp}
-\index{simplification!setting up}
-
-Setting up the simplifier for new logics is complicated in the general case.
-This section describes how the simplifier is installed for intuitionistic
-first-order logic; the code is largely taken from {\tt FOL/simpdata.ML} of the
-Isabelle sources.
-
-The case splitting tactic, which resides on a separate files, is not part of
-Pure Isabelle. It needs to be loaded explicitly by the object-logic as
-follows (below \texttt{\~\relax\~\relax} refers to \texttt{\$ISABELLE_HOME}):
-\begin{ttbox}
-use "\~\relax\~\relax/src/Provers/splitter.ML";
-\end{ttbox}
-
-Simplification requires converting object-equalities to meta-level rewrite
-rules. This demands rules stating that equal terms and equivalent formulae
-are also equal at the meta-level. The rule declaration part of the file
-\texttt{FOL/IFOL.thy} contains the two lines
-\begin{ttbox}\index{*eq_reflection theorem}\index{*iff_reflection theorem}
-eq_reflection "(x=y) ==> (x==y)"
-iff_reflection "(P<->Q) ==> (P==Q)"
-\end{ttbox}
-Of course, you should only assert such rules if they are true for your
-particular logic. In Constructive Type Theory, equality is a ternary
-relation of the form $a=b\in A$; the type~$A$ determines the meaning
-of the equality essentially as a partial equivalence relation. The
-present simplifier cannot be used. Rewriting in \texttt{CTT} uses
-another simplifier, which resides in the file {\tt
- Provers/typedsimp.ML} and is not documented. Even this does not
-work for later variants of Constructive Type Theory that use
-intensional equality~\cite{nordstrom90}.
-
-
-\subsection{A collection of standard rewrite rules}
-
-We first prove lots of standard rewrite rules about the logical
-connectives. These include cancellation and associative laws. We
-define a function that echoes the desired law and then supplies it the
-prover for intuitionistic FOL:
-\begin{ttbox}
-fun int_prove_fun s =
- (writeln s;
- prove_goal IFOL.thy s
- (fn prems => [ (cut_facts_tac prems 1),
- (IntPr.fast_tac 1) ]));
-\end{ttbox}
-The following rewrite rules about conjunction are a selection of those
-proved on \texttt{FOL/simpdata.ML}. Later, these will be supplied to the
-standard simpset.
-\begin{ttbox}
-val conj_simps = map int_prove_fun
- ["P & True <-> P", "True & P <-> P",
- "P & False <-> False", "False & P <-> False",
- "P & P <-> P",
- "P & ~P <-> False", "~P & P <-> False",
- "(P & Q) & R <-> P & (Q & R)"];
-\end{ttbox}
-The file also proves some distributive laws. As they can cause exponential
-blowup, they will not be included in the standard simpset. Instead they
-are merely bound to an \ML{} identifier, for user reference.
-\begin{ttbox}
-val distrib_simps = map int_prove_fun
- ["P & (Q | R) <-> P&Q | P&R",
- "(Q | R) & P <-> Q&P | R&P",
- "(P | Q --> R) <-> (P --> R) & (Q --> R)"];
-\end{ttbox}
-
-
-\subsection{Functions for preprocessing the rewrite rules}
-\label{sec:setmksimps}
-\begin{ttbox}\indexbold{*setmksimps}
-setmksimps : simpset * (thm -> thm list) -> simpset \hfill{\bf infix 4}
-\end{ttbox}
-The next step is to define the function for preprocessing rewrite rules.
-This will be installed by calling \texttt{setmksimps} below. Preprocessing
-occurs whenever rewrite rules are added, whether by user command or
-automatically. Preprocessing involves extracting atomic rewrites at the
-object-level, then reflecting them to the meta-level.
-
-To start, the function \texttt{gen_all} strips any meta-level
-quantifiers from the front of the given theorem.
-
-The function \texttt{atomize} analyses a theorem in order to extract
-atomic rewrite rules. The head of all the patterns, matched by the
-wildcard~\texttt{_}, is the coercion function \texttt{Trueprop}.
-\begin{ttbox}
-fun atomize th = case concl_of th of
- _ $ (Const("op &",_) $ _ $ _) => atomize(th RS conjunct1) \at
- atomize(th RS conjunct2)
- | _ $ (Const("op -->",_) $ _ $ _) => atomize(th RS mp)
- | _ $ (Const("All",_) $ _) => atomize(th RS spec)
- | _ $ (Const("True",_)) => []
- | _ $ (Const("False",_)) => []
- | _ => [th];
-\end{ttbox}
-There are several cases, depending upon the form of the conclusion:
-\begin{itemize}
-\item Conjunction: extract rewrites from both conjuncts.
-\item Implication: convert $P\imp Q$ to the meta-implication $P\Imp Q$ and
- extract rewrites from~$Q$; these will be conditional rewrites with the
- condition~$P$.
-\item Universal quantification: remove the quantifier, replacing the bound
- variable by a schematic variable, and extract rewrites from the body.
-\item \texttt{True} and \texttt{False} contain no useful rewrites.
-\item Anything else: return the theorem in a singleton list.
-\end{itemize}
-The resulting theorems are not literally atomic --- they could be
-disjunctive, for example --- but are broken down as much as possible.
-See the file \texttt{ZF/simpdata.ML} for a sophisticated translation of
-set-theoretic formulae into rewrite rules.
-
-For standard situations like the above,
-there is a generic auxiliary function \ttindexbold{mk_atomize} that takes a
-list of pairs $(name, thms)$, where $name$ is an operator name and
-$thms$ is a list of theorems to resolve with in case the pattern matches,
-and returns a suitable \texttt{atomize} function.
-
-
-The simplified rewrites must now be converted into meta-equalities. The
-rule \texttt{eq_reflection} converts equality rewrites, while {\tt
- iff_reflection} converts if-and-only-if rewrites. The latter possibility
-can arise in two other ways: the negative theorem~$\neg P$ is converted to
-$P\equiv\texttt{False}$, and any other theorem~$P$ is converted to
-$P\equiv\texttt{True}$. The rules \texttt{iff_reflection_F} and {\tt
- iff_reflection_T} accomplish this conversion.
-\begin{ttbox}
-val P_iff_F = int_prove_fun "~P ==> (P <-> False)";
-val iff_reflection_F = P_iff_F RS iff_reflection;
-\ttbreak
-val P_iff_T = int_prove_fun "P ==> (P <-> True)";
-val iff_reflection_T = P_iff_T RS iff_reflection;
-\end{ttbox}
-The function \texttt{mk_eq} converts a theorem to a meta-equality
-using the case analysis described above.
-\begin{ttbox}
-fun mk_eq th = case concl_of th of
- _ $ (Const("op =",_)$_$_) => th RS eq_reflection
- | _ $ (Const("op <->",_)$_$_) => th RS iff_reflection
- | _ $ (Const("Not",_)$_) => th RS iff_reflection_F
- | _ => th RS iff_reflection_T;
-\end{ttbox}
-The
-three functions \texttt{gen_all}, \texttt{atomize} and \texttt{mk_eq}
-will be composed together and supplied below to \texttt{setmksimps}.
-
-
-\subsection{Making the initial simpset}
-
-It is time to assemble these items. The list \texttt{IFOL_simps} contains the
-default rewrite rules for intuitionistic first-order logic. The first of
-these is the reflexive law expressed as the equivalence
-$(a=a)\bimp\texttt{True}$; the rewrite rule $a=a$ is clearly useless.
-\begin{ttbox}
-val IFOL_simps =
- [refl RS P_iff_T] \at conj_simps \at disj_simps \at not_simps \at
- imp_simps \at iff_simps \at quant_simps;
-\end{ttbox}
-The list \texttt{triv_rls} contains trivial theorems for the solver. Any
-subgoal that is simplified to one of these will be removed.
-\begin{ttbox}
-val notFalseI = int_prove_fun "~False";
-val triv_rls = [TrueI,refl,iff_refl,notFalseI];
-\end{ttbox}
-We also define the function \ttindex{mk_meta_cong} to convert the conclusion
-of congruence rules into meta-equalities.
-\begin{ttbox}
-fun mk_meta_cong rl = standard (mk_meta_eq (mk_meta_prems rl));
-\end{ttbox}
-%
-The basic simpset for intuitionistic FOL is \ttindexbold{FOL_basic_ss}. It
-preprocess rewrites using
-{\tt gen_all}, \texttt{atomize} and \texttt{mk_eq}.
-It solves simplified subgoals using \texttt{triv_rls} and assumptions, and by
-detecting contradictions. It uses \ttindex{asm_simp_tac} to tackle subgoals
-of conditional rewrites.
-
-Other simpsets built from \texttt{FOL_basic_ss} will inherit these items.
-In particular, \ttindexbold{IFOL_ss}, which introduces {\tt
- IFOL_simps} as rewrite rules. \ttindexbold{FOL_ss} will later
-extend \texttt{IFOL_ss} with classical rewrite rules such as $\neg\neg
-P\bimp P$.
-\index{*setmksimps}\index{*setSSolver}\index{*setSolver}\index{*setsubgoaler}
-\index{*addsimps}\index{*addcongs}
-\begin{ttbox}
-fun unsafe_solver prems = FIRST'[resolve_tac (triv_rls {\at} prems),
- atac, etac FalseE];
-
-fun safe_solver prems = FIRST'[match_tac (triv_rls {\at} prems),
- eq_assume_tac, ematch_tac [FalseE]];
-
-val FOL_basic_ss =
- empty_ss setsubgoaler asm_simp_tac
- addsimprocs [defALL_regroup, defEX_regroup]
- setSSolver safe_solver
- setSolver unsafe_solver
- setmksimps (map mk_eq o atomize o gen_all)
- setmkcong mk_meta_cong;
-
-val IFOL_ss =
- FOL_basic_ss addsimps (IFOL_simps {\at}
- int_ex_simps {\at} int_all_simps)
- addcongs [imp_cong];
-\end{ttbox}
-This simpset takes \texttt{imp_cong} as a congruence rule in order to use
-contextual information to simplify the conclusions of implications:
-\[ \List{\Var{P}\bimp\Var{P'};\; \Var{P'} \Imp \Var{Q}\bimp\Var{Q'}} \Imp
- (\Var{P}\imp\Var{Q}) \bimp (\Var{P'}\imp\Var{Q'})
-\]
-By adding the congruence rule \texttt{conj_cong}, we could obtain a similar
-effect for conjunctions.
-
-
-\index{simplification|)}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End:
--- a/doc-src/Ref/substitution.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,217 +0,0 @@
-
-\chapter{Substitution Tactics} \label{substitution}
-\index{tactics!substitution|(}\index{equality|(}
-
-Replacing equals by equals is a basic form of reasoning. Isabelle supports
-several kinds of equality reasoning. {\bf Substitution} means replacing
-free occurrences of~$t$ by~$u$ in a subgoal. This is easily done, given an
-equality $t=u$, provided the logic possesses the appropriate rule. The
-tactic \texttt{hyp_subst_tac} performs substitution even in the assumptions.
-But it works via object-level implication, and therefore must be specially
-set up for each suitable object-logic.
-
-Substitution should not be confused with object-level {\bf rewriting}.
-Given equalities of the form $t=u$, rewriting replaces instances of~$t$ by
-corresponding instances of~$u$, and continues until it reaches a normal
-form. Substitution handles `one-off' replacements by particular
-equalities while rewriting handles general equations.
-Chapter~\ref{chap:simplification} discusses Isabelle's rewriting tactics.
-
-
-\section{Substitution rules}
-\index{substitution!rules}\index{*subst theorem}
-Many logics include a substitution rule of the form
-$$
-\List{\Var{a}=\Var{b}; \Var{P}(\Var{a})} \Imp
-\Var{P}(\Var{b}) \eqno(subst)
-$$
-In backward proof, this may seem difficult to use: the conclusion
-$\Var{P}(\Var{b})$ admits far too many unifiers. But, if the theorem {\tt
-eqth} asserts $t=u$, then \hbox{\tt eqth RS subst} is the derived rule
-\[ \Var{P}(t) \Imp \Var{P}(u). \]
-Provided $u$ is not an unknown, resolution with this rule is
-well-behaved.\footnote{Unifying $\Var{P}(u)$ with a formula~$Q$
-expresses~$Q$ in terms of its dependence upon~$u$. There are still $2^k$
-unifiers, if $Q$ has $k$ occurrences of~$u$, but Isabelle ensures that
-the first unifier includes all the occurrences.} To replace $u$ by~$t$ in
-subgoal~$i$, use
-\begin{ttbox}
-resolve_tac [eqth RS subst] \(i\){\it.}
-\end{ttbox}
-To replace $t$ by~$u$ in
-subgoal~$i$, use
-\begin{ttbox}
-resolve_tac [eqth RS ssubst] \(i\){\it,}
-\end{ttbox}
-where \tdxbold{ssubst} is the `swapped' substitution rule
-$$
-\List{\Var{a}=\Var{b}; \Var{P}(\Var{b})} \Imp
-\Var{P}(\Var{a}). \eqno(ssubst)
-$$
-If \tdx{sym} denotes the symmetry rule
-\(\Var{a}=\Var{b}\Imp\Var{b}=\Var{a}\), then \texttt{ssubst} is just
-\hbox{\tt sym RS subst}. Many logics with equality include the rules {\tt
-subst} and \texttt{ssubst}, as well as \texttt{refl}, \texttt{sym} and \texttt{trans}
-(for the usual equality laws). Examples include \texttt{FOL} and \texttt{HOL},
-but not \texttt{CTT} (Constructive Type Theory).
-
-Elim-resolution is well-behaved with assumptions of the form $t=u$.
-To replace $u$ by~$t$ or $t$ by~$u$ in subgoal~$i$, use
-\begin{ttbox}
-eresolve_tac [subst] \(i\) {\rm or} eresolve_tac [ssubst] \(i\){\it.}
-\end{ttbox}
-
-Logics HOL, FOL and ZF define the tactic \ttindexbold{stac} by
-\begin{ttbox}
-fun stac eqth = CHANGED o rtac (eqth RS ssubst);
-\end{ttbox}
-Now \texttt{stac~eqth} is like \texttt{resolve_tac [eqth RS ssubst]} but with the
-valuable property of failing if the substitution has no effect.
-
-
-\section{Substitution in the hypotheses}
-\index{assumptions!substitution in}
-Substitution rules, like other rules of natural deduction, do not affect
-the assumptions. This can be inconvenient. Consider proving the subgoal
-\[ \List{c=a; c=b} \Imp a=b. \]
-Calling \texttt{eresolve_tac\ts[ssubst]\ts\(i\)} simply discards the
-assumption~$c=a$, since $c$ does not occur in~$a=b$. Of course, we can
-work out a solution. First apply \texttt{eresolve_tac\ts[subst]\ts\(i\)},
-replacing~$a$ by~$c$:
-\[ c=b \Imp c=b \]
-Equality reasoning can be difficult, but this trivial proof requires
-nothing more sophisticated than substitution in the assumptions.
-Object-logics that include the rule~$(subst)$ provide tactics for this
-purpose:
-\begin{ttbox}
-hyp_subst_tac : int -> tactic
-bound_hyp_subst_tac : int -> tactic
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{hyp_subst_tac} {\it i}]
- selects an equality assumption of the form $t=u$ or $u=t$, where $t$ is a
- free variable or parameter. Deleting this assumption, it replaces $t$
- by~$u$ throughout subgoal~$i$, including the other assumptions.
-
-\item[\ttindexbold{bound_hyp_subst_tac} {\it i}]
- is similar but only substitutes for parameters (bound variables).
- Uses for this are discussed below.
-\end{ttdescription}
-The term being replaced must be a free variable or parameter. Substitution
-for constants is usually unhelpful, since they may appear in other
-theorems. For instance, the best way to use the assumption $0=1$ is to
-contradict a theorem that states $0\not=1$, rather than to replace 0 by~1
-in the subgoal!
-
-Substitution for unknowns, such as $\Var{x}=0$, is a bad idea: we might prove
-the subgoal more easily by instantiating~$\Var{x}$ to~1.
-Substitution for free variables is unhelpful if they appear in the
-premises of a rule being derived: the substitution affects object-level
-assumptions, not meta-level assumptions. For instance, replacing~$a$
-by~$b$ could make the premise~$P(a)$ worthless. To avoid this problem, use
-\texttt{bound_hyp_subst_tac}; alternatively, call \ttindex{cut_facts_tac} to
-insert the atomic premises as object-level assumptions.
-
-
-\section{Setting up the package}
-Many Isabelle object-logics, such as \texttt{FOL}, \texttt{HOL} and their
-descendants, come with \texttt{hyp_subst_tac} already defined. A few others,
-such as \texttt{CTT}, do not support this tactic because they lack the
-rule~$(subst)$. When defining a new logic that includes a substitution
-rule and implication, you must set up \texttt{hyp_subst_tac} yourself. It
-is packaged as the \ML{} functor \ttindex{HypsubstFun}, which takes the
-argument signature~\texttt{HYPSUBST_DATA}:
-\begin{ttbox}
-signature HYPSUBST_DATA =
- sig
- structure Simplifier : SIMPLIFIER
- val dest_Trueprop : term -> term
- val dest_eq : term -> (term*term)*typ
- val dest_imp : term -> term*term
- val eq_reflection : thm (* a=b ==> a==b *)
- val rev_eq_reflection: thm (* a==b ==> a=b *)
- val imp_intr : thm (*(P ==> Q) ==> P-->Q *)
- val rev_mp : thm (* [| P; P-->Q |] ==> Q *)
- val subst : thm (* [| a=b; P(a) |] ==> P(b) *)
- val sym : thm (* a=b ==> b=a *)
- val thin_refl : thm (* [|x=x; P|] ==> P *)
- end;
-\end{ttbox}
-Thus, the functor requires the following items:
-\begin{ttdescription}
-\item[Simplifier] should be an instance of the simplifier (see
- Chapter~\ref{chap:simplification}).
-
-\item[\ttindexbold{dest_Trueprop}] should coerce a meta-level formula to the
- corresponding object-level one. Typically, it should return $P$ when
- applied to the term $\texttt{Trueprop}\,P$ (see example below).
-
-\item[\ttindexbold{dest_eq}] should return the triple~$((t,u),T)$, where $T$ is
- the type of~$t$ and~$u$, when applied to the \ML{} term that
- represents~$t=u$. For other terms, it should raise an exception.
-
-\item[\ttindexbold{dest_imp}] should return the pair~$(P,Q)$ when applied to
- the \ML{} term that represents the implication $P\imp Q$. For other terms,
- it should raise an exception.
-
-\item[\tdxbold{eq_reflection}] is the theorem discussed
- in~\S\ref{sec:setting-up-simp}.
-
-\item[\tdxbold{rev_eq_reflection}] is the reverse of \texttt{eq_reflection}.
-
-\item[\tdxbold{imp_intr}] should be the implies introduction
-rule $(\Var{P}\Imp\Var{Q})\Imp \Var{P}\imp\Var{Q}$.
-
-\item[\tdxbold{rev_mp}] should be the `reversed' implies elimination
-rule $\List{\Var{P}; \;\Var{P}\imp\Var{Q}} \Imp \Var{Q}$.
-
-\item[\tdxbold{subst}] should be the substitution rule
-$\List{\Var{a}=\Var{b};\; \Var{P}(\Var{a})} \Imp \Var{P}(\Var{b})$.
-
-\item[\tdxbold{sym}] should be the symmetry rule
-$\Var{a}=\Var{b}\Imp\Var{b}=\Var{a}$.
-
-\item[\tdxbold{thin_refl}] should be the rule
-$\List{\Var{a}=\Var{a};\; \Var{P}} \Imp \Var{P}$, which is used to erase
-trivial equalities.
-\end{ttdescription}
-%
-The functor resides in file \texttt{Provers/hypsubst.ML} in the Isabelle
-distribution directory. It is not sensitive to the precise formalization
-of the object-logic. It is not concerned with the names of the equality
-and implication symbols, or the types of formula and terms.
-
-Coding the functions \texttt{dest_Trueprop}, \texttt{dest_eq} and
-\texttt{dest_imp} requires knowledge of Isabelle's representation of terms.
-For \texttt{FOL}, they are declared by
-\begin{ttbox}
-fun dest_Trueprop (Const ("Trueprop", _) $ P) = P
- | dest_Trueprop t = raise TERM ("dest_Trueprop", [t]);
-
-fun dest_eq (Const("op =",T) $ t $ u) = ((t, u), domain_type T)
-
-fun dest_imp (Const("op -->",_) $ A $ B) = (A, B)
- | dest_imp t = raise TERM ("dest_imp", [t]);
-\end{ttbox}
-Recall that \texttt{Trueprop} is the coercion from type~$o$ to type~$prop$,
-while \hbox{\tt op =} is the internal name of the infix operator~\texttt{=}.
-Function \ttindexbold{domain_type}, given the function type $S\To T$, returns
-the type~$S$. Pattern-matching expresses the function concisely, using
-wildcards~({\tt_}) for the types.
-
-The tactic \texttt{hyp_subst_tac} works as follows. First, it identifies a
-suitable equality assumption, possibly re-orienting it using~\texttt{sym}.
-Then it moves other assumptions into the conclusion of the goal, by repeatedly
-calling \texttt{etac~rev_mp}. Then, it uses \texttt{asm_full_simp_tac} or
-\texttt{ssubst} to substitute throughout the subgoal. (If the equality
-involves unknowns then it must use \texttt{ssubst}.) Then, it deletes the
-equality. Finally, it moves the assumptions back to their original positions
-by calling \hbox{\tt resolve_tac\ts[imp_intr]}.
-
-\index{equality|)}\index{tactics!substitution|)}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End:
--- a/doc-src/Ref/syntax.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,240 +0,0 @@
-
-\chapter{Syntax Transformations} \label{chap:syntax}
-\newcommand\ttapp{\mathrel{\hbox{\tt\$}}}
-\newcommand\mtt[1]{\mbox{\tt #1}}
-\newcommand\ttfct[1]{\mathop{\mtt{#1}}\nolimits}
-\newcommand\Constant{\ttfct{Constant}}
-\newcommand\Variable{\ttfct{Variable}}
-\newcommand\Appl[1]{\ttfct{Appl}\,[#1]}
-\index{syntax!transformations|(}
-
-
-\section{Transforming parse trees to ASTs}\label{sec:astofpt}
-\index{ASTs!made from parse trees}
-\newcommand\astofpt[1]{\lbrakk#1\rbrakk}
-
-The parse tree is the raw output of the parser. Translation functions,
-called {\bf parse AST translations}\indexbold{translations!parse AST},
-transform the parse tree into an abstract syntax tree.
-
-The parse tree is constructed by nesting the right-hand sides of the
-productions used to recognize the input. Such parse trees are simply lists
-of tokens and constituent parse trees, the latter representing the
-nonterminals of the productions. Let us refer to the actual productions in
-the form displayed by {\tt print_syntax} (see \S\ref{sec:inspct-thy} for an
-example).
-
-Ignoring parse \AST{} translations, parse trees are transformed to \AST{}s
-by stripping out delimiters and copy productions. More precisely, the
-mapping $\astofpt{-}$ is derived from the productions as follows:
-\begin{itemize}
-\item Name tokens: $\astofpt{t} = \Variable s$, where $t$ is an \ndx{id},
- \ndx{var}, \ndx{tid}, \ndx{tvar}, \ndx{num}, \ndx{xnum} or \ndx{xstr} token,
- and $s$ its associated string. Note that for {\tt xstr} this does not
- include the quotes.
-
-\item Copy productions:\index{productions!copy}
- $\astofpt{\ldots P \ldots} = \astofpt{P}$. Here $\ldots$ stands for
- strings of delimiters, which are discarded. $P$ stands for the single
- constituent that is not a delimiter; it is either a nonterminal symbol or
- a name token.
-
- \item 0-ary productions: $\astofpt{\ldots \mtt{=>} c} = \Constant c$.
- Here there are no constituents other than delimiters, which are
- discarded.
-
- \item $n$-ary productions, where $n \ge 1$: delimiters are discarded and
- the remaining constituents $P@1$, \ldots, $P@n$ are built into an
- application whose head constant is~$c$:
- \[ \astofpt{\ldots P@1 \ldots P@n \ldots \mtt{=>} c} =
- \Appl{\Constant c, \astofpt{P@1}, \ldots, \astofpt{P@n}}
- \]
-\end{itemize}
-Figure~\ref{fig:parse_ast} presents some simple examples, where {\tt ==},
-{\tt _appl}, {\tt _args}, and so forth name productions of the Pure syntax.
-These examples illustrate the need for further translations to make \AST{}s
-closer to the typed $\lambda$-calculus. The Pure syntax provides
-predefined parse \AST{} translations\index{translations!parse AST} for
-ordinary applications, type applications, nested abstractions, meta
-implications and function types. Figure~\ref{fig:parse_ast_tr} shows their
-effect on some representative input strings.
-
-
-\begin{figure}
-\begin{center}
-\tt\begin{tabular}{ll}
-\rm input string & \rm \AST \\\hline
-"f" & f \\
-"'a" & 'a \\
-"t == u" & ("==" t u) \\
-"f(x)" & ("_appl" f x) \\
-"f(x, y)" & ("_appl" f ("_args" x y)) \\
-"f(x, y, z)" & ("_appl" f ("_args" x ("_args" y z))) \\
-"\%x y.\ t" & ("_lambda" ("_idts" x y) t) \\
-\end{tabular}
-\end{center}
-\caption{Parsing examples using the Pure syntax}\label{fig:parse_ast}
-\end{figure}
-
-\begin{figure}
-\begin{center}
-\tt\begin{tabular}{ll}
-\rm input string & \rm \AST{} \\\hline
-"f(x, y, z)" & (f x y z) \\
-"'a ty" & (ty 'a) \\
-"('a, 'b) ty" & (ty 'a 'b) \\
-"\%x y z.\ t" & ("_abs" x ("_abs" y ("_abs" z t))) \\
-"\%x ::\ 'a.\ t" & ("_abs" ("_constrain" x 'a) t) \\
-"[| P; Q; R |] => S" & ("==>" P ("==>" Q ("==>" R S))) \\
-"['a, 'b, 'c] => 'd" & ("fun" 'a ("fun" 'b ("fun" 'c 'd)))
-\end{tabular}
-\end{center}
-\caption{Built-in parse \AST{} translations}\label{fig:parse_ast_tr}
-\end{figure}
-
-The names of constant heads in the \AST{} control the translation process.
-The list of constants invoking parse \AST{} translations appears in the
-output of {\tt print_syntax} under {\tt parse_ast_translation}.
-
-
-\section{Transforming ASTs to terms}\label{sec:termofast}
-\index{terms!made from ASTs}
-\newcommand\termofast[1]{\lbrakk#1\rbrakk}
-
-The \AST{}, after application of macros (see \S\ref{sec:macros}), is
-transformed into a term. This term is probably ill-typed since type
-inference has not occurred yet. The term may contain type constraints
-consisting of applications with head {\tt "_constrain"}; the second
-argument is a type encoded as a term. Type inference later introduces
-correct types or rejects the input.
-
-Another set of translation functions, namely parse
-translations\index{translations!parse}, may affect this process. If we
-ignore parse translations for the time being, then \AST{}s are transformed
-to terms by mapping \AST{} constants to constants, \AST{} variables to
-schematic or free variables and \AST{} applications to applications.
-
-More precisely, the mapping $\termofast{-}$ is defined by
-\begin{itemize}
-\item Constants: $\termofast{\Constant x} = \ttfct{Const} (x,
- \mtt{dummyT})$.
-
-\item Schematic variables: $\termofast{\Variable \mtt{"?}xi\mtt"} =
- \ttfct{Var} ((x, i), \mtt{dummyT})$, where $x$ is the base name and $i$
- the index extracted from~$xi$.
-
-\item Free variables: $\termofast{\Variable x} = \ttfct{Free} (x,
- \mtt{dummyT})$.
-
-\item Function applications with $n$ arguments:
- \[ \termofast{\Appl{f, x@1, \ldots, x@n}} =
- \termofast{f} \ttapp
- \termofast{x@1} \ttapp \ldots \ttapp \termofast{x@n}
- \]
-\end{itemize}
-Here \ttindex{Const}, \ttindex{Var}, \ttindex{Free} and
-\verb|$|\index{$@{\tt\$}} are constructors of the datatype \mltydx{term},
-while \ttindex{dummyT} stands for some dummy type that is ignored during
-type inference.
-
-So far the outcome is still a first-order term. Abstractions and bound
-variables (constructors \ttindex{Abs} and \ttindex{Bound}) are introduced
-by parse translations. Such translations are attached to {\tt "_abs"},
-{\tt "!!"} and user-defined binders.
-
-
-\section{Printing of terms}
-\newcommand\astofterm[1]{\lbrakk#1\rbrakk}\index{ASTs!made from terms}
-
-The output phase is essentially the inverse of the input phase. Terms are
-translated via abstract syntax trees into strings. Finally the strings are
-pretty printed.
-
-Print translations (\S\ref{sec:tr_funs}) may affect the transformation of
-terms into \AST{}s. Ignoring those, the transformation maps
-term constants, variables and applications to the corresponding constructs
-on \AST{}s. Abstractions are mapped to applications of the special
-constant {\tt _abs}.
-
-More precisely, the mapping $\astofterm{-}$ is defined as follows:
-\begin{itemize}
- \item $\astofterm{\ttfct{Const} (x, \tau)} = \Constant x$.
-
- \item $\astofterm{\ttfct{Free} (x, \tau)} = constrain (\Variable x,
- \tau)$.
-
- \item $\astofterm{\ttfct{Var} ((x, i), \tau)} = constrain (\Variable
- \mtt{"?}xi\mtt", \tau)$, where $\mtt?xi$ is the string representation of
- the {\tt indexname} $(x, i)$.
-
- \item For the abstraction $\lambda x::\tau.t$, let $x'$ be a variant
- of~$x$ renamed to differ from all names occurring in~$t$, and let $t'$
- be obtained from~$t$ by replacing all bound occurrences of~$x$ by
- the free variable $x'$. This replaces corresponding occurrences of the
- constructor \ttindex{Bound} by the term $\ttfct{Free} (x',
- \mtt{dummyT})$:
- \[ \astofterm{\ttfct{Abs} (x, \tau, t)} =
- \Appl{\Constant \mtt{"_abs"},
- constrain(\Variable x', \tau), \astofterm{t'}}
- \]
-
- \item $\astofterm{\ttfct{Bound} i} = \Variable \mtt{"B.}i\mtt"$.
- The occurrence of constructor \ttindex{Bound} should never happen
- when printing well-typed terms; it indicates a de Bruijn index with no
- matching abstraction.
-
- \item Where $f$ is not an application,
- \[ \astofterm{f \ttapp x@1 \ttapp \ldots \ttapp x@n} =
- \Appl{\astofterm{f}, \astofterm{x@1}, \ldots,\astofterm{x@n}}
- \]
-\end{itemize}
-%
-Type constraints\index{type constraints} are inserted to allow the printing
-of types. This is governed by the boolean variable \ttindex{show_types}:
-\begin{itemize}
- \item $constrain(x, \tau) = x$ \ if $\tau = \mtt{dummyT}$ \index{*dummyT} or
- \ttindex{show_types} is set to {\tt false}.
-
- \item $constrain(x, \tau) = \Appl{\Constant \mtt{"_constrain"}, x,
- \astofterm{\tau}}$ \ otherwise.
-
- Here, $\astofterm{\tau}$ is the \AST{} encoding of $\tau$: type
- constructors go to {\tt Constant}s; type identifiers go to {\tt
- Variable}s; type applications go to {\tt Appl}s with the type
- constructor as the first element. If \ttindex{show_sorts} is set to
- {\tt true}, some type variables are decorated with an \AST{} encoding
- of their sort.
-\end{itemize}
-%
-The \AST{}, after application of macros (see \S\ref{sec:macros}), is
-transformed into the final output string. The built-in {\bf print AST
- translations}\indexbold{translations!print AST} reverse the
-parse \AST{} translations of Fig.\ts\ref{fig:parse_ast_tr}.
-
-For the actual printing process, the names attached to productions
-of the form $\ldots A^{(p@1)}@1 \ldots A^{(p@n)}@n \ldots \mtt{=>} c$ play
-a vital role. Each \AST{} with constant head $c$, namely $\mtt"c\mtt"$ or
-$(\mtt"c\mtt"~ x@1 \ldots x@n)$, is printed according to the production
-for~$c$. Each argument~$x@i$ is converted to a string, and put in
-parentheses if its priority~$(p@i)$ requires this. The resulting strings
-and their syntactic sugar (denoted by \dots{} above) are joined to make a
-single string.
-
-If an application $(\mtt"c\mtt"~ x@1 \ldots x@m)$ has more arguments
-than the corresponding production, it is first split into
-$((\mtt"c\mtt"~ x@1 \ldots x@n) ~ x@{n+1} \ldots x@m)$. Applications
-with too few arguments or with non-constant head or without a
-corresponding production are printed as $f(x@1, \ldots, x@l)$ or
-$(\alpha@1, \ldots, \alpha@l) ty$. Multiple productions associated
-with some name $c$ are tried in order of appearance. An occurrence of
-$\Variable x$ is simply printed as~$x$.
-
-Blanks are {\em not\/} inserted automatically. If blanks are required to
-separate tokens, specify them in the mixfix declaration, possibly preceded
-by a slash~({\tt/}) to allow a line break.
-\index{ASTs|)}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End:
--- a/doc-src/Ref/tactic.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,173 +0,0 @@
-
-\chapter{Tactics} \label{tactics}
-\index{tactics|(}
-
-\section{Other basic tactics}
-
-\subsection{Inserting premises and facts}\label{cut_facts_tac}
-\index{tactics!for inserting facts}\index{assumptions!inserting}
-\begin{ttbox}
-cut_facts_tac : thm list -> int -> tactic
-\end{ttbox}
-These tactics add assumptions to a subgoal.
-\begin{ttdescription}
-\item[\ttindexbold{cut_facts_tac} {\it thms} {\it i}]
- adds the {\it thms} as new assumptions to subgoal~$i$. Once they have
- been inserted as assumptions, they become subject to tactics such as {\tt
- eresolve_tac} and {\tt rewrite_goals_tac}. Only rules with no premises
- are inserted: Isabelle cannot use assumptions that contain $\Imp$
- or~$\Forall$. Sometimes the theorems are premises of a rule being
- derived, returned by~{\tt goal}; instead of calling this tactic, you
- could state the goal with an outermost meta-quantifier.
-
-\end{ttdescription}
-
-
-\subsection{Composition: resolution without lifting}
-\index{tactics!for composition}
-\begin{ttbox}
-compose_tac: (bool * thm * int) -> int -> tactic
-\end{ttbox}
-{\bf Composing} two rules means resolving them without prior lifting or
-renaming of unknowns. This low-level operation, which underlies the
-resolution tactics, may occasionally be useful for special effects.
-A typical application is \ttindex{res_inst_tac}, which lifts and instantiates a
-rule, then passes the result to {\tt compose_tac}.
-\begin{ttdescription}
-\item[\ttindexbold{compose_tac} ($flag$, $rule$, $m$) $i$]
-refines subgoal~$i$ using $rule$, without lifting. The $rule$ is taken to
-have the form $\List{\psi@1; \ldots; \psi@m} \Imp \psi$, where $\psi$ need
-not be atomic; thus $m$ determines the number of new subgoals. If
-$flag$ is {\tt true} then it performs elim-resolution --- it solves the
-first premise of~$rule$ by assumption and deletes that assumption.
-\end{ttdescription}
-
-
-\section{*Managing lots of rules}
-These operations are not intended for interactive use. They are concerned
-with the processing of large numbers of rules in automatic proof
-strategies. Higher-order resolution involving a long list of rules is
-slow. Filtering techniques can shorten the list of rules given to
-resolution, and can also detect whether a subgoal is too flexible,
-with too many rules applicable.
-
-\subsection{Combined resolution and elim-resolution} \label{biresolve_tac}
-\index{tactics!resolution}
-\begin{ttbox}
-biresolve_tac : (bool*thm)list -> int -> tactic
-bimatch_tac : (bool*thm)list -> int -> tactic
-subgoals_of_brl : bool*thm -> int
-lessb : (bool*thm) * (bool*thm) -> bool
-\end{ttbox}
-{\bf Bi-resolution} takes a list of $\it (flag,rule)$ pairs. For each
-pair, it applies resolution if the flag is~{\tt false} and
-elim-resolution if the flag is~{\tt true}. A single tactic call handles a
-mixture of introduction and elimination rules.
-
-\begin{ttdescription}
-\item[\ttindexbold{biresolve_tac} {\it brls} {\it i}]
-refines the proof state by resolution or elim-resolution on each rule, as
-indicated by its flag. It affects subgoal~$i$ of the proof state.
-
-\item[\ttindexbold{bimatch_tac}]
-is like {\tt biresolve_tac}, but performs matching: unknowns in the
-proof state are never updated (see~{\S}\ref{match_tac}).
-
-\item[\ttindexbold{subgoals_of_brl}({\it flag},{\it rule})]
-returns the number of new subgoals that bi-res\-o\-lu\-tion would yield for the
-pair (if applied to a suitable subgoal). This is $n$ if the flag is
-{\tt false} and $n-1$ if the flag is {\tt true}, where $n$ is the number
-of premises of the rule. Elim-resolution yields one fewer subgoal than
-ordinary resolution because it solves the major premise by assumption.
-
-\item[\ttindexbold{lessb} ({\it brl1},{\it brl2})]
-returns the result of
-\begin{ttbox}
-subgoals_of_brl{\it brl1} < subgoals_of_brl{\it brl2}
-\end{ttbox}
-\end{ttdescription}
-Note that \hbox{\tt sort lessb {\it brls}} sorts a list of $\it
-(flag,rule)$ pairs by the number of new subgoals they will yield. Thus,
-those that yield the fewest subgoals should be tried first.
-
-
-\subsection{Discrimination nets for fast resolution}\label{filt_resolve_tac}
-\index{discrimination nets|bold}
-\index{tactics!resolution}
-\begin{ttbox}
-net_resolve_tac : thm list -> int -> tactic
-net_match_tac : thm list -> int -> tactic
-net_biresolve_tac: (bool*thm) list -> int -> tactic
-net_bimatch_tac : (bool*thm) list -> int -> tactic
-filt_resolve_tac : thm list -> int -> int -> tactic
-could_unify : term*term->bool
-filter_thms : (term*term->bool) -> int*term*thm list -> thm{\ts}list
-\end{ttbox}
-The module {\tt Net} implements a discrimination net data structure for
-fast selection of rules \cite[Chapter 14]{charniak80}. A term is
-classified by the symbol list obtained by flattening it in preorder.
-The flattening takes account of function applications, constants, and free
-and bound variables; it identifies all unknowns and also regards
-\index{lambda abs@$\lambda$-abstractions}
-$\lambda$-abstractions as unknowns, since they could $\eta$-contract to
-anything.
-
-A discrimination net serves as a polymorphic dictionary indexed by terms.
-The module provides various functions for inserting and removing items from
-nets. It provides functions for returning all items whose term could match
-or unify with a target term. The matching and unification tests are
-overly lax (due to the identifications mentioned above) but they serve as
-useful filters.
-
-A net can store introduction rules indexed by their conclusion, and
-elimination rules indexed by their major premise. Isabelle provides
-several functions for `compiling' long lists of rules into fast
-resolution tactics. When supplied with a list of theorems, these functions
-build a discrimination net; the net is used when the tactic is applied to a
-goal. To avoid repeatedly constructing the nets, use currying: bind the
-resulting tactics to \ML{} identifiers.
-
-\begin{ttdescription}
-\item[\ttindexbold{net_resolve_tac} {\it thms}]
-builds a discrimination net to obtain the effect of a similar call to {\tt
-resolve_tac}.
-
-\item[\ttindexbold{net_match_tac} {\it thms}]
-builds a discrimination net to obtain the effect of a similar call to {\tt
-match_tac}.
-
-\item[\ttindexbold{net_biresolve_tac} {\it brls}]
-builds a discrimination net to obtain the effect of a similar call to {\tt
-biresolve_tac}.
-
-\item[\ttindexbold{net_bimatch_tac} {\it brls}]
-builds a discrimination net to obtain the effect of a similar call to {\tt
-bimatch_tac}.
-
-\item[\ttindexbold{filt_resolve_tac} {\it thms} {\it maxr} {\it i}]
-uses discrimination nets to extract the {\it thms} that are applicable to
-subgoal~$i$. If more than {\it maxr\/} theorems are applicable then the
-tactic fails. Otherwise it calls {\tt resolve_tac}.
-
-This tactic helps avoid runaway instantiation of unknowns, for example in
-type inference.
-
-\item[\ttindexbold{could_unify} ({\it t},{\it u})]
-returns {\tt false} if~$t$ and~$u$ are `obviously' non-unifiable, and
-otherwise returns~{\tt true}. It assumes all variables are distinct,
-reporting that {\tt ?a=?a} may unify with {\tt 0=1}.
-
-\item[\ttindexbold{filter_thms} $could\; (limit,prem,thms)$]
-returns the list of potentially resolvable rules (in {\it thms\/}) for the
-subgoal {\it prem}, using the predicate {\it could\/} to compare the
-conclusion of the subgoal with the conclusion of each rule. The resulting list
-is no longer than {\it limit}.
-\end{ttdescription}
-
-\index{tactics|)}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End:
--- a/doc-src/Ref/thm.tex Mon Aug 27 17:11:55 2012 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,636 +0,0 @@
-
-\chapter{Theorems and Forward Proof}
-\index{theorems|(}
-
-Theorems, which represent the axioms, theorems and rules of
-object-logics, have type \mltydx{thm}. This chapter describes
-operations that join theorems in forward proof. Most theorem
-operations are intended for advanced applications, such as programming
-new proof procedures.
-
-
-\subsection{Instantiating unknowns in a theorem} \label{sec:instantiate}
-\index{instantiation}
-\begin{alltt}\footnotesize
-read_instantiate : (string*string) list -> thm -> thm
-read_instantiate_sg : Sign.sg -> (string*string) list -> thm -> thm
-cterm_instantiate : (cterm*cterm) list -> thm -> thm
-instantiate' : ctyp option list -> cterm option list -> thm -> thm
-\end{alltt}
-These meta-rules instantiate type and term unknowns in a theorem. They are
-occasionally useful. They can prevent difficulties with higher-order
-unification, and define specialized versions of rules.
-\begin{ttdescription}
-\item[\ttindexbold{read_instantiate} {\it insts} {\it thm}]
-processes the instantiations {\it insts} and instantiates the rule~{\it
-thm}. The processing of instantiations is described
-in \S\ref{res_inst_tac}, under {\tt res_inst_tac}.
-
-Use {\tt res_inst_tac}, not {\tt read_instantiate}, to instantiate a rule
-and refine a particular subgoal. The tactic allows instantiation by the
-subgoal's parameters, and reads the instantiations using the signature
-associated with the proof state.
-
-Use {\tt read_instantiate_sg} below if {\it insts\/} appears to be treated
-incorrectly.
-
-\item[\ttindexbold{read_instantiate_sg} {\it sg} {\it insts} {\it thm}]
- is like \texttt{read_instantiate {\it insts}~{\it thm}}, but it reads
- the instantiations under signature~{\it sg}. This is necessary to
- instantiate a rule from a general theory, such as first-order logic,
- using the notation of some specialized theory. Use the function {\tt
- sign_of} to get a theory's signature.
-
-\item[\ttindexbold{cterm_instantiate} {\it ctpairs} {\it thm}]
-is similar to {\tt read_instantiate}, but the instantiations are provided
-as pairs of certified terms, not as strings to be read.
-
-\item[\ttindexbold{instantiate'} {\it ctyps} {\it cterms} {\it thm}]
- instantiates {\it thm} according to the positional arguments {\it
- ctyps} and {\it cterms}. Counting from left to right, schematic
- variables $?x$ are either replaced by $t$ for any argument
- \texttt{Some\(\;t\)}, or left unchanged in case of \texttt{None} or
- if the end of the argument list is encountered. Types are
- instantiated before terms.
-
-\end{ttdescription}
-
-
-\subsection{Miscellaneous forward rules}\label{MiscellaneousForwardRules}
-\index{theorems!standardizing}
-\begin{ttbox}
-standard : thm -> thm
-zero_var_indexes : thm -> thm
-make_elim : thm -> thm
-rule_by_tactic : tactic -> thm -> thm
-rotate_prems : int -> thm -> thm
-permute_prems : int -> int -> thm -> thm
-rearrange_prems : int list -> thm -> thm
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{standard} $thm$] puts $thm$ into the standard form
- of object-rules. It discharges all meta-assumptions, replaces free
- variables by schematic variables, renames schematic variables to
- have subscript zero, also strips outer (meta) quantifiers and
- removes dangling sort hypotheses.
-
-\item[\ttindexbold{zero_var_indexes} $thm$]
-makes all schematic variables have subscript zero, renaming them to avoid
-clashes.
-
-\item[\ttindexbold{make_elim} $thm$]
-\index{rules!converting destruction to elimination}
-converts $thm$, which should be a destruction rule of the form
-$\List{P@1;\ldots;P@m}\Imp
-Q$, to the elimination rule $\List{P@1; \ldots; P@m; Q\Imp R}\Imp R$. This
-is the basis for destruct-resolution: {\tt dresolve_tac}, etc.
-
-\item[\ttindexbold{rule_by_tactic} {\it tac} {\it thm}]
- applies {\it tac\/} to the {\it thm}, freezing its variables first, then
- yields the proof state returned by the tactic. In typical usage, the
- {\it thm\/} represents an instance of a rule with several premises, some
- with contradictory assumptions (because of the instantiation). The
- tactic proves those subgoals and does whatever else it can, and returns
- whatever is left.
-
-\item[\ttindexbold{rotate_prems} $k$ $thm$] rotates the premises of $thm$ to
- the left by~$k$ positions (to the right if $k<0$). It simply calls
- \texttt{permute_prems}, below, with $j=0$. Used with
- \texttt{eresolve_tac}\index{*eresolve_tac!on other than first premise}, it
- gives the effect of applying the tactic to some other premise of $thm$ than
- the first.
-
-\item[\ttindexbold{permute_prems} $j$ $k$ $thm$] rotates the premises of $thm$
- leaving the first $j$ premises unchanged. It
- requires $0\leq j\leq n$, where $n$ is the number of premises. If $k$ is
- positive then it rotates the remaining $n-j$ premises to the left; if $k$ is
- negative then it rotates the premises to the right.
-
-\item[\ttindexbold{rearrange_prems} $ps$ $thm$] permutes the premises of $thm$
- where the value at the $i$-th position (counting from $0$) in the list $ps$
- gives the position within the original thm to be transferred to position $i$.
- Any remaining trailing positions are left unchanged.
-\end{ttdescription}
-
-
-\subsection{Taking a theorem apart}
-\index{theorems!taking apart}
-\index{flex-flex constraints}
-\begin{ttbox}
-cprop_of : thm -> cterm
-concl_of : thm -> term
-prems_of : thm -> term list
-cprems_of : thm -> cterm list
-nprems_of : thm -> int
-tpairs_of : thm -> (term*term) list
-theory_of_thm : thm -> theory
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{cprop_of} $thm$] returns the statement of $thm$ as
- a certified term.
-
-\item[\ttindexbold{concl_of} $thm$] returns the conclusion of $thm$ as
- a term.
-
-\item[\ttindexbold{prems_of} $thm$] returns the premises of $thm$ as a
- list of terms.
-
-\item[\ttindexbold{cprems_of} $thm$] returns the premises of $thm$ as
- a list of certified terms.
-
-\item[\ttindexbold{nprems_of} $thm$]
-returns the number of premises in $thm$, and is equivalent to {\tt
- length~(prems_of~$thm$)}.
-
-\item[\ttindexbold{tpairs_of} $thm$] returns the flex-flex constraints
- of $thm$.
-
-\item[\ttindexbold{theory_of_thm} $thm$] returns the theory associated
- with $thm$. Note that this does a lookup in Isabelle's global
- database of loaded theories.
-
-\end{ttdescription}
-
-
-\subsection{*Sort hypotheses} \label{sec:sort-hyps}
-\index{sort hypotheses}
-\begin{ttbox}
-strip_shyps : thm -> thm
-strip_shyps_warning : thm -> thm
-\end{ttbox}
-
-Isabelle's type variables are decorated with sorts, constraining them to
-certain ranges of types. This has little impact when sorts only serve for
-syntactic classification of types --- for example, FOL distinguishes between
-terms and other types. But when type classes are introduced through axioms,
-this may result in some sorts becoming {\em empty\/}: where one cannot exhibit
-a type belonging to it because certain sets of axioms are unsatisfiable.
-
-If a theorem contains a type variable that is constrained by an empty
-sort, then that theorem has no instances. It is basically an instance
-of {\em ex falso quodlibet}. But what if it is used to prove another
-theorem that no longer involves that sort? The latter theorem holds
-only if under an additional non-emptiness assumption.
-
-Therefore, Isabelle's theorems carry around sort hypotheses. The {\tt
-shyps} field is a list of sorts occurring in type variables in the current
-{\tt prop} and {\tt hyps} fields. It may also includes sorts used in the
-theorem's proof that no longer appear in the {\tt prop} or {\tt hyps}
-fields --- so-called {\em dangling\/} sort constraints. These are the
-critical ones, asserting non-emptiness of the corresponding sorts.
-
-Isabelle automatically removes extraneous sorts from the {\tt shyps} field at
-the end of a proof, provided that non-emptiness can be established by looking
-at the theorem's signature: from the {\tt classes} and {\tt arities}
-information. This operation is performed by \texttt{strip_shyps} and
-\texttt{strip_shyps_warning}.
-
-\begin{ttdescription}
-
-\item[\ttindexbold{strip_shyps} $thm$] removes any extraneous sort hypotheses
- that can be witnessed from the type signature.
-
-\item[\ttindexbold{strip_shyps_warning}] is like \texttt{strip_shyps}, but
- issues a warning message of any pending sort hypotheses that do not have a
- (syntactic) witness.
-
-\end{ttdescription}
-
-
-\subsection{Tracing flags for unification}
-\index{tracing!of unification}
-\begin{ttbox}
-Unify.trace_simp : bool ref \hfill\textbf{initially false}
-Unify.trace_types : bool ref \hfill\textbf{initially false}
-Unify.trace_bound : int ref \hfill\textbf{initially 10}
-Unify.search_bound : int ref \hfill\textbf{initially 20}
-\end{ttbox}
-Tracing the search may be useful when higher-order unification behaves
-unexpectedly. Letting {\tt res_inst_tac} circumvent the problem is easier,
-though.
-\begin{ttdescription}
-\item[set Unify.trace_simp;]
-causes tracing of the simplification phase.
-
-\item[set Unify.trace_types;]
-generates warnings of incompleteness, when unification is not considering
-all possible instantiations of type unknowns.
-
-\item[Unify.trace_bound := $n$;]
-causes unification to print tracing information once it reaches depth~$n$.
-Use $n=0$ for full tracing. At the default value of~10, tracing
-information is almost never printed.
-
-\item[Unify.search_bound := $n$;] prevents unification from
- searching past the depth~$n$. Because of this bound, higher-order
- unification cannot return an infinite sequence, though it can return
- an exponentially long one. The search rarely approaches the default value
- of~20. If the search is cut off, unification prints a warning
- \texttt{Unification bound exceeded}.
-\end{ttdescription}
-
-
-\section{*Primitive meta-level inference rules}
-\index{meta-rules|(}
-
-\subsection{Logical equivalence rules}
-\index{meta-equality}
-\begin{ttbox}
-equal_intr : thm -> thm -> thm
-equal_elim : thm -> thm -> thm
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{equal_intr} $thm@1$ $thm@2$]
-applies $({\equiv}I)$ to $thm@1$ and~$thm@2$. It maps the premises~$\psi$
-and~$\phi$ to the conclusion~$\phi\equiv\psi$; the assumptions are those of
-the first premise with~$\phi$ removed, plus those of
-the second premise with~$\psi$ removed.
-
-\item[\ttindexbold{equal_elim} $thm@1$ $thm@2$]
-applies $({\equiv}E)$ to $thm@1$ and~$thm@2$. It maps the premises
-$\phi\equiv\psi$ and $\phi$ to the conclusion~$\psi$.
-\end{ttdescription}
-
-
-\subsection{Equality rules}
-\index{meta-equality}
-\begin{ttbox}
-reflexive : cterm -> thm
-symmetric : thm -> thm
-transitive : thm -> thm -> thm
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{reflexive} $ct$]
-makes the theorem \(ct\equiv ct\).
-
-\item[\ttindexbold{symmetric} $thm$]
-maps the premise $a\equiv b$ to the conclusion $b\equiv a$.
-
-\item[\ttindexbold{transitive} $thm@1$ $thm@2$]
-maps the premises $a\equiv b$ and $b\equiv c$ to the conclusion~${a\equiv c}$.
-\end{ttdescription}
-
-
-\subsection{The $\lambda$-conversion rules}
-\index{lambda calc@$\lambda$-calculus}
-\begin{ttbox}
-beta_conversion : cterm -> thm
-extensional : thm -> thm
-abstract_rule : string -> cterm -> thm -> thm
-combination : thm -> thm -> thm
-\end{ttbox}
-There is no rule for $\alpha$-conversion because Isabelle regards
-$\alpha$-convertible theorems as equal.
-\begin{ttdescription}
-\item[\ttindexbold{beta_conversion} $ct$]
-makes the theorem $((\lambda x.a)(b)) \equiv a[b/x]$, where $ct$ is the
-term $(\lambda x.a)(b)$.
-
-\item[\ttindexbold{extensional} $thm$]
-maps the premise $f(x) \equiv g(x)$ to the conclusion $f\equiv g$.
-Parameter~$x$ is taken from the premise. It may be an unknown or a free
-variable (provided it does not occur in the assumptions); it must not occur
-in $f$ or~$g$.
-
-\item[\ttindexbold{abstract_rule} $v$ $x$ $thm$]
-maps the premise $a\equiv b$ to the conclusion $(\lambda x.a) \equiv
-(\lambda x.b)$, abstracting over all occurrences (if any!) of~$x$.
-Parameter~$x$ is supplied as a cterm. It may be an unknown or a free
-variable (provided it does not occur in the assumptions). In the
-conclusion, the bound variable is named~$v$.
-
-\item[\ttindexbold{combination} $thm@1$ $thm@2$]
-maps the premises $f\equiv g$ and $a\equiv b$ to the conclusion~$f(a)\equiv
-g(b)$.
-\end{ttdescription}
-
-
-\section{Derived rules for goal-directed proof}
-Most of these rules have the sole purpose of implementing particular
-tactics. There are few occasions for applying them directly to a theorem.
-
-\subsection{Resolution}
-\index{resolution}
-\begin{ttbox}
-biresolution : bool -> (bool*thm)list -> int -> thm
- -> thm Seq.seq
-\end{ttbox}
-\begin{ttdescription}
-\item[\ttindexbold{biresolution} $match$ $rules$ $i$ $state$]
-performs bi-resolution on subgoal~$i$ of $state$, using the list of $\it
-(flag,rule)$ pairs. For each pair, it applies resolution if the flag
-is~{\tt false} and elim-resolution if the flag is~{\tt true}. If $match$
-is~{\tt true}, the $state$ is not instantiated.
-\end{ttdescription}
-
-
-\subsection{Composition: resolution without lifting}
-\index{resolution!without lifting}
-\begin{ttbox}
-compose : thm * int * thm -> thm list
-COMP : thm * thm -> thm
-bicompose : bool -> bool * thm * int -> int -> thm
- -> thm Seq.seq
-\end{ttbox}
-In forward proof, a typical use of composition is to regard an assertion of
-the form $\phi\Imp\psi$ as atomic. Schematic variables are not renamed, so
-beware of clashes!
-\begin{ttdescription}
-\item[\ttindexbold{compose} ($thm@1$, $i$, $thm@2$)]
-uses $thm@1$, regarded as an atomic formula, to solve premise~$i$
-of~$thm@2$. Let $thm@1$ and $thm@2$ be $\psi$ and $\List{\phi@1; \ldots;
-\phi@n} \Imp \phi$. For each $s$ that unifies~$\psi$ and $\phi@i$, the
-result list contains the theorem
-\[ (\List{\phi@1; \ldots; \phi@{i-1}; \phi@{i+1}; \ldots; \phi@n} \Imp \phi)s.
-\]
-
-\item[$thm@1$ \ttindexbold{COMP} $thm@2$]
-calls \hbox{\tt compose ($thm@1$, 1, $thm@2$)} and returns the result, if
-unique; otherwise, it raises exception~\xdx{THM}\@. It is
-analogous to {\tt RS}\@.
-
-For example, suppose that $thm@1$ is $a=b\Imp b=a$, a symmetry rule, and
-that $thm@2$ is $\List{P\Imp Q; \neg Q} \Imp\neg P$, which is the
-principle of contrapositives. Then the result would be the
-derived rule $\neg(b=a)\Imp\neg(a=b)$.
-
-\item[\ttindexbold{bicompose} $match$ ($flag$, $rule$, $m$) $i$ $state$]
-refines subgoal~$i$ of $state$ using $rule$, without lifting. The $rule$
-is taken to have the form $\List{\psi@1; \ldots; \psi@m} \Imp \psi$, where
-$\psi$ need not be atomic; thus $m$ determines the number of new
-subgoals. If $flag$ is {\tt true} then it performs elim-resolution --- it
-solves the first premise of~$rule$ by assumption and deletes that
-assumption. If $match$ is~{\tt true}, the $state$ is not instantiated.
-\end{ttdescription}
-
-
-\subsection{Other meta-rules}
-\begin{ttbox}
-rename_params_rule : string list * int -> thm -> thm
-\end{ttbox}
-\begin{ttdescription}
-
-\item[\ttindexbold{rename_params_rule} ({\it names}, {\it i}) $thm$]
-uses the $names$ to rename the parameters of premise~$i$ of $thm$. The
-names must be distinct. If there are fewer names than parameters, then the
-rule renames the innermost parameters and may modify the remaining ones to
-ensure that all the parameters are distinct.
-\index{parameters!renaming}
-
-\end{ttdescription}
-\index{meta-rules|)}
-
-
-\section{Proof terms}\label{sec:proofObjects}
-\index{proof terms|(} Isabelle can record the full meta-level proof of each
-theorem. The proof term contains all logical inferences in detail.
-%while
-%omitting bookkeeping steps that have no logical meaning to an outside
-%observer. Rewriting steps are recorded in similar detail as the output of
-%simplifier tracing.
-Resolution and rewriting steps are broken down to primitive rules of the
-meta-logic. The proof term can be inspected by a separate proof-checker,
-for example.
-
-According to the well-known {\em Curry-Howard isomorphism}, a proof can
-be viewed as a $\lambda$-term. Following this idea, proofs
-in Isabelle are internally represented by a datatype similar to the one for
-terms described in \S\ref{sec:terms}.
-\begin{ttbox}
-infix 8 % %%;
-
-datatype proof =
- PBound of int
- | Abst of string * typ option * proof
- | AbsP of string * term option * proof
- | op % of proof * term option
- | op %% of proof * proof
- | Hyp of term
- | PThm of (string * (string * string list) list) *
- proof * term * typ list option
- | PAxm of string * term * typ list option
- | Oracle of string * term * typ list option
- | MinProof of proof list;
-\end{ttbox}
-
-\begin{ttdescription}
-\item[\ttindexbold{Abst} ($a$, $\tau$, $prf$)] is the abstraction over
-a {\it term variable} of type $\tau$ in the body $prf$. Logically, this
-corresponds to $\bigwedge$ introduction. The name $a$ is used only for
-parsing and printing.
-\item[\ttindexbold{AbsP} ($a$, $\varphi$, $prf$)] is the abstraction
-over a {\it proof variable} standing for a proof of proposition $\varphi$
-in the body $prf$. This corresponds to $\Longrightarrow$ introduction.
-\item[$prf$ \% $t$] \index{\%@{\tt\%}|bold}
-is the application of proof $prf$ to term $t$
-which corresponds to $\bigwedge$ elimination.
-\item[$prf@1$ \%\% $prf@2$] \index{\%\%@{\tt\%\%}|bold}
-is the application of proof $prf@1$ to
-proof $prf@2$ which corresponds to $\Longrightarrow$ elimination.
-\item[\ttindexbold{PBound} $i$] is a {\em proof variable} with de Bruijn
-\cite{debruijn72} index $i$.
-\item[\ttindexbold{Hyp} $\varphi$] corresponds to the use of a meta level
-hypothesis $\varphi$.
-\item[\ttindexbold{PThm} (($name$, $tags$), $prf$, $\varphi$, $\overline{\tau}$)]
-stands for a pre-proved theorem, where $name$ is the name of the theorem,
-$prf$ is its actual proof, $\varphi$ is the proven proposition,
-and $\overline{\tau}$ is
-a type assignment for the type variables occurring in the proposition.
-\item[\ttindexbold{PAxm} ($name$, $\varphi$, $\overline{\tau}$)]
-corresponds to the use of an axiom with name $name$ and proposition
-$\varphi$, where $\overline{\tau}$ is a type assignment for the type
-variables occurring in the proposition.
-\item[\ttindexbold{Oracle} ($name$, $\varphi$, $\overline{\tau}$)]
-denotes the invocation of an oracle with name $name$ which produced
-a proposition $\varphi$, where $\overline{\tau}$ is a type assignment
-for the type variables occurring in the proposition.
-\item[\ttindexbold{MinProof} $prfs$]
-represents a {\em minimal proof} where $prfs$ is a list of theorems,
-axioms or oracles.
-\end{ttdescription}
-Note that there are no separate constructors
-for abstraction and application on the level of {\em types}, since
-instantiation of type variables is accomplished via the type assignments
-attached to {\tt Thm}, {\tt Axm} and {\tt Oracle}.
-
-Each theorem's derivation is stored as the {\tt der} field of its internal
-record:
-\begin{ttbox}
-#2 (#der (rep_thm conjI));
-{\out PThm (("HOL.conjI", []),}
-{\out AbsP ("H", None, AbsP ("H", None, \dots)), \dots, None) %}
-{\out None % None : Proofterm.proof}
-\end{ttbox}
-This proof term identifies a labelled theorem, {\tt conjI} of theory
-\texttt{HOL}, whose underlying proof is
-{\tt AbsP ("H", None, AbsP ("H", None, $\dots$))}.
-The theorem is applied to two (implicit) term arguments, which correspond
-to the two variables occurring in its proposition.
-
-Isabelle's inference kernel can produce proof objects with different
-levels of detail. This is controlled via the global reference variable
-\ttindexbold{proofs}:
-\begin{ttdescription}
-\item[proofs := 0;] only record uses of oracles
-\item[proofs := 1;] record uses of oracles as well as dependencies
- on other theorems and axioms
-\item[proofs := 2;] record inferences in full detail
-\end{ttdescription}
-Reconstruction and checking of proofs as described in \S\ref{sec:reconstruct_proofs}
-will not work for proofs constructed with {\tt proofs} set to
-{\tt 0} or {\tt 1}.
-Theorems involving oracles will be printed with a
-suffixed \verb|[!]| to point out the different quality of confidence achieved.
-
-\medskip
-
-The dependencies of theorems can be viewed using the function
-\ttindexbold{thm_deps}\index{theorems!dependencies}:
-\begin{ttbox}
-thm_deps [\(thm@1\), \(\ldots\), \(thm@n\)];
-\end{ttbox}
-generates the dependency graph of the theorems $thm@1$, $\ldots$, $thm@n$ and
-displays it using Isabelle's graph browser. For this to work properly,
-the theorems in question have to be proved with {\tt proofs} set to a value
-greater than {\tt 0}. You can use
-\begin{ttbox}
-ThmDeps.enable : unit -> unit
-ThmDeps.disable : unit -> unit
-\end{ttbox}
-to set \texttt{proofs} appropriately.
-
-\subsection{Reconstructing and checking proof terms}\label{sec:reconstruct_proofs}
-\index{proof terms!reconstructing}
-\index{proof terms!checking}
-
-When looking at the above datatype of proofs more closely, one notices that
-some arguments of constructors are {\it optional}. The reason for this is that
-keeping a full proof term for each theorem would result in enormous memory
-requirements. Fortunately, typical proof terms usually contain quite a lot of
-redundant information that can be reconstructed from the context. Therefore,
-Isabelle's inference kernel creates only {\em partial} (or {\em implicit})
-\index{proof terms!partial} proof terms, in which
-all typing information in terms, all term and type labels of abstractions
-{\tt AbsP} and {\tt Abst}, and (if possible) some argument terms of
-\verb!%! are omitted. The following functions are available for
-reconstructing and checking proof terms:
-\begin{ttbox}
-Reconstruct.reconstruct_proof :
- Sign.sg -> term -> Proofterm.proof -> Proofterm.proof
-Reconstruct.expand_proof :
- Sign.sg -> string list -> Proofterm.proof -> Proofterm.proof
-ProofChecker.thm_of_proof : theory -> Proofterm.proof -> thm
-\end{ttbox}
-
-\begin{ttdescription}
-\item[Reconstruct.reconstruct_proof $sg$ $t$ $prf$]
-turns the partial proof $prf$ into a full proof of the
-proposition denoted by $t$, with respect to signature $sg$.
-Reconstruction will fail with an error message if $prf$
-is not a proof of $t$, is ill-formed, or does not contain
-sufficient information for reconstruction by
-{\em higher order pattern unification}
-\cite{nipkow-patterns, Berghofer-Nipkow:2000:TPHOL}.
-The latter may only happen for proofs
-built up ``by hand'' but not for those produced automatically
-by Isabelle's inference kernel.
-\item[Reconstruct.expand_proof $sg$
- \ttlbrack$name@1$, $\ldots$, $name@n${\ttrbrack} $prf$]
-expands and reconstructs the proofs of all theorems with names
-$name@1$, $\ldots$, $name@n$ in the (full) proof $prf$.
-\item[ProofChecker.thm_of_proof $thy$ $prf$] turns the (full) proof
-$prf$ into a theorem with respect to theory $thy$ by replaying
-it using only primitive rules from Isabelle's inference kernel.
-\end{ttdescription}
-
-\subsection{Parsing and printing proof terms}
-\index{proof terms!parsing}
-\index{proof terms!printing}
-
-Isabelle offers several functions for parsing and printing
-proof terms. The concrete syntax for proof terms is described
-in Fig.\ts\ref{fig:proof_gram}.
-Implicit term arguments in partial proofs are indicated
-by ``{\tt _}''.
-Type arguments for theorems and axioms may be specified using
-\verb!%! or ``$\cdot$'' with an argument of the form {\tt TYPE($type$)}
-(see \S\ref{sec:basic_syntax}).
-They must appear before any other term argument of a theorem
-or axiom. In contrast to term arguments, type arguments may
-be completely omitted.
-\begin{ttbox}
-ProofSyntax.read_proof : theory -> bool -> string -> Proofterm.proof
-ProofSyntax.pretty_proof : Sign.sg -> Proofterm.proof -> Pretty.T
-ProofSyntax.pretty_proof_of : bool -> thm -> Pretty.T
-ProofSyntax.print_proof_of : bool -> thm -> unit
-\end{ttbox}
-\begin{figure}
-\begin{center}
-\begin{tabular}{rcl}
-$proof$ & $=$ & {\tt Lam} $params${\tt .} $proof$ ~~$|$~~
- $\Lambda params${\tt .} $proof$ \\
- & $|$ & $proof$ \verb!%! $any$ ~~$|$~~
- $proof$ $\cdot$ $any$ \\
- & $|$ & $proof$ \verb!%%! $proof$ ~~$|$~~
- $proof$ {\boldmath$\cdot$} $proof$ \\
- & $|$ & $id$ ~~$|$~~ $longid$ \\\\
-$param$ & $=$ & $idt$ ~~$|$~~ $idt$ {\tt :} $prop$ ~~$|$~~
- {\tt (} $param$ {\tt )} \\\\
-$params$ & $=$ & $param$ ~~$|$~~ $param$ $params$
-\end{tabular}
-\end{center}
-\caption{Proof term syntax}\label{fig:proof_gram}
-\end{figure}
-The function {\tt read_proof} reads in a proof term with
-respect to a given theory. The boolean flag indicates whether
-the proof term to be parsed contains explicit typing information
-to be taken into account.
-Usually, typing information is left implicit and
-is inferred during proof reconstruction. The pretty printing
-functions operating on theorems take a boolean flag as an
-argument which indicates whether the proof term should
-be reconstructed before printing.
-
-The following example (based on Isabelle/HOL) illustrates how
-to parse and check proof terms. We start by parsing a partial
-proof term
-\begin{ttbox}
-val prf = ProofSyntax.read_proof Main.thy false
- "impI % _ % _ %% (Lam H : _. conjE % _ % _ % _ %% H %%
- (Lam (H1 : _) H2 : _. conjI % _ % _ %% H2 %% H1))";
-{\out val prf = PThm (("HOL.impI", []), \dots, \dots, None) % None % None %%}
-{\out AbsP ("H", None, PThm (("HOL.conjE", []), \dots, \dots, None) %}
-{\out None % None % None %% PBound 0 %%}
-{\out AbsP ("H1", None, AbsP ("H2", None, \dots))) : Proofterm.proof}
-\end{ttbox}
-The statement to be established by this proof is
-\begin{ttbox}
-val t = term_of
- (read_cterm (sign_of Main.thy) ("A & B --> B & A", propT));
-{\out val t = Const ("Trueprop", "bool => prop") $}
-{\out (Const ("op -->", "[bool, bool] => bool") $}
-{\out \dots $ \dots : Term.term}
-\end{ttbox}
-Using {\tt t} we can reconstruct the full proof
-\begin{ttbox}
-val prf' = Reconstruct.reconstruct_proof (sign_of Main.thy) t prf;
-{\out val prf' = PThm (("HOL.impI", []), \dots, \dots, Some []) %}
-{\out Some (Const ("op &", \dots) $ Free ("A", \dots) $ Free ("B", \dots)) %}
-{\out Some (Const ("op &", \dots) $ Free ("B", \dots) $ Free ("A", \dots)) %%}
-{\out AbsP ("H", Some (Const ("Trueprop", \dots) $ \dots), \dots)}
-{\out : Proofterm.proof}
-\end{ttbox}
-This proof can finally be turned into a theorem
-\begin{ttbox}
-val thm = ProofChecker.thm_of_proof Main.thy prf';
-{\out val thm = "A & B --> B & A" : Thm.thm}
-\end{ttbox}
-
-\index{proof terms|)}
-\index{theorems|)}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "ref"
-%%% End: