renamed doc-src to src/Doc;
authorwenzelm
Tue Aug 28 18:57:32 2012 +0200 (2012-08-28)
changeset 489855386df44a037
parent 48984 f51d4a302962
child 48986 037d32448e29
renamed doc-src to src/Doc;
renamed TutorialI to Tutorial;
Admin/Release/makedist
ROOTS
doc-src/Classes/Classes.thy
doc-src/Classes/Setup.thy
doc-src/Classes/document/build
doc-src/Classes/document/root.tex
doc-src/Classes/document/style.sty
doc-src/Codegen/Adaptation.thy
doc-src/Codegen/Evaluation.thy
doc-src/Codegen/Foundations.thy
doc-src/Codegen/Further.thy
doc-src/Codegen/Inductive_Predicate.thy
doc-src/Codegen/Introduction.thy
doc-src/Codegen/Refinement.thy
doc-src/Codegen/Setup.thy
doc-src/Codegen/document/adapt.tex
doc-src/Codegen/document/architecture.tex
doc-src/Codegen/document/build
doc-src/Codegen/document/root.tex
doc-src/Codegen/document/style.sty
doc-src/Functions/Functions.thy
doc-src/Functions/document/build
doc-src/Functions/document/conclusion.tex
doc-src/Functions/document/intro.tex
doc-src/Functions/document/mathpartir.sty
doc-src/Functions/document/root.tex
doc-src/Functions/document/style.sty
doc-src/HOL/document/HOL.tex
doc-src/HOL/document/build
doc-src/HOL/document/root.tex
doc-src/Intro/document/advanced.tex
doc-src/Intro/document/build
doc-src/Intro/document/foundations.tex
doc-src/Intro/document/getting.tex
doc-src/Intro/document/root.tex
doc-src/IsarImplementation/Base.thy
doc-src/IsarImplementation/Eq.thy
doc-src/IsarImplementation/Integration.thy
doc-src/IsarImplementation/Isar.thy
doc-src/IsarImplementation/Local_Theory.thy
doc-src/IsarImplementation/Logic.thy
doc-src/IsarImplementation/ML.thy
doc-src/IsarImplementation/Prelim.thy
doc-src/IsarImplementation/Proof.thy
doc-src/IsarImplementation/Syntax.thy
doc-src/IsarImplementation/Tactic.thy
doc-src/IsarImplementation/document/build
doc-src/IsarImplementation/document/root.tex
doc-src/IsarImplementation/document/style.sty
doc-src/IsarRef/Base.thy
doc-src/IsarRef/Document_Preparation.thy
doc-src/IsarRef/First_Order_Logic.thy
doc-src/IsarRef/Framework.thy
doc-src/IsarRef/Generic.thy
doc-src/IsarRef/HOL_Specific.thy
doc-src/IsarRef/Inner_Syntax.thy
doc-src/IsarRef/ML_Tactic.thy
doc-src/IsarRef/Misc.thy
doc-src/IsarRef/Outer_Syntax.thy
doc-src/IsarRef/Preface.thy
doc-src/IsarRef/Proof.thy
doc-src/IsarRef/Quick_Reference.thy
doc-src/IsarRef/Spec.thy
doc-src/IsarRef/Symbols.thy
doc-src/IsarRef/Synopsis.thy
doc-src/IsarRef/document/build
doc-src/IsarRef/document/isar-vm.eps
doc-src/IsarRef/document/isar-vm.pdf
doc-src/IsarRef/document/isar-vm.svg
doc-src/IsarRef/document/root.tex
doc-src/IsarRef/document/showsymbols
doc-src/IsarRef/document/style.sty
doc-src/LaTeXsugar/Sugar.thy
doc-src/LaTeXsugar/document/build
doc-src/LaTeXsugar/document/mathpartir.sty
doc-src/LaTeXsugar/document/root.bib
doc-src/LaTeXsugar/document/root.tex
doc-src/Locales/Examples.thy
doc-src/Locales/Examples1.thy
doc-src/Locales/Examples2.thy
doc-src/Locales/Examples3.thy
doc-src/Locales/document/build
doc-src/Locales/document/root.bib
doc-src/Locales/document/root.tex
doc-src/Logics/abstract.txt
doc-src/Logics/document/CTT.tex
doc-src/Logics/document/LK.tex
doc-src/Logics/document/Sequents.tex
doc-src/Logics/document/build
doc-src/Logics/document/preface.tex
doc-src/Logics/document/root.tex
doc-src/Logics/document/syntax.tex
doc-src/Main/Main_Doc.thy
doc-src/Main/document/build
doc-src/Main/document/root.tex
doc-src/Nitpick/document/build
doc-src/Nitpick/document/root.tex
doc-src/ProgProve/Basics.thy
doc-src/ProgProve/Bool_nat_list.thy
doc-src/ProgProve/Isar.thy
doc-src/ProgProve/LaTeXsugar.thy
doc-src/ProgProve/Logic.thy
doc-src/ProgProve/MyList.thy
doc-src/ProgProve/Types_and_funs.thy
doc-src/ProgProve/document/bang.eps
doc-src/ProgProve/document/bang.pdf
doc-src/ProgProve/document/build
doc-src/ProgProve/document/intro-isabelle.tex
doc-src/ProgProve/document/mathpartir.sty
doc-src/ProgProve/document/prelude.tex
doc-src/ProgProve/document/root.bib
doc-src/ProgProve/document/root.tex
doc-src/ProgProve/document/svmono.cls
doc-src/ROOT
doc-src/Ref/abstract.txt
doc-src/Ref/document/build
doc-src/Ref/document/classical.tex
doc-src/Ref/document/root.tex
doc-src/Ref/document/simplifier.tex
doc-src/Ref/document/substitution.tex
doc-src/Ref/document/syntax.tex
doc-src/Ref/document/tactic.tex
doc-src/Ref/document/thm.tex
doc-src/Ref/undocumented.tex
doc-src/Sledgehammer/document/build
doc-src/Sledgehammer/document/root.tex
doc-src/System/Base.thy
doc-src/System/Basics.thy
doc-src/System/Interfaces.thy
doc-src/System/Misc.thy
doc-src/System/Presentation.thy
doc-src/System/Scala.thy
doc-src/System/Sessions.thy
doc-src/System/document/browser_screenshot.eps
doc-src/System/document/browser_screenshot.png
doc-src/System/document/build
doc-src/System/document/root.tex
doc-src/TutorialI/Advanced/Partial.thy
doc-src/TutorialI/Advanced/WFrec.thy
doc-src/TutorialI/Advanced/simp2.thy
doc-src/TutorialI/CTL/Base.thy
doc-src/TutorialI/CTL/CTL.thy
doc-src/TutorialI/CTL/CTLind.thy
doc-src/TutorialI/CTL/PDL.thy
doc-src/TutorialI/CodeGen/CodeGen.thy
doc-src/TutorialI/Datatype/ABexpr.thy
doc-src/TutorialI/Datatype/Fundata.thy
doc-src/TutorialI/Datatype/Nested.thy
doc-src/TutorialI/Datatype/unfoldnested.thy
doc-src/TutorialI/Documents/Documents.thy
doc-src/TutorialI/Fun/fun0.thy
doc-src/TutorialI/Ifexpr/Ifexpr.thy
doc-src/TutorialI/Inductive/AB.thy
doc-src/TutorialI/Inductive/Advanced.thy
doc-src/TutorialI/Inductive/Even.thy
doc-src/TutorialI/Inductive/Mutual.thy
doc-src/TutorialI/Inductive/Star.thy
doc-src/TutorialI/Misc/AdvancedInd.thy
doc-src/TutorialI/Misc/Itrev.thy
doc-src/TutorialI/Misc/Option2.thy
doc-src/TutorialI/Misc/Plus.thy
doc-src/TutorialI/Misc/Tree.thy
doc-src/TutorialI/Misc/Tree2.thy
doc-src/TutorialI/Misc/appendix.thy
doc-src/TutorialI/Misc/case_exprs.thy
doc-src/TutorialI/Misc/fakenat.thy
doc-src/TutorialI/Misc/natsum.thy
doc-src/TutorialI/Misc/pairs2.thy
doc-src/TutorialI/Misc/prime_def.thy
doc-src/TutorialI/Misc/simp.thy
doc-src/TutorialI/Misc/types.thy
doc-src/TutorialI/Protocol/Event.thy
doc-src/TutorialI/Protocol/Message.thy
doc-src/TutorialI/Protocol/NS_Public.thy
doc-src/TutorialI/Protocol/Public.thy
doc-src/TutorialI/Recdef/Induction.thy
doc-src/TutorialI/Recdef/Nested0.thy
doc-src/TutorialI/Recdef/Nested1.thy
doc-src/TutorialI/Recdef/Nested2.thy
doc-src/TutorialI/Recdef/examples.thy
doc-src/TutorialI/Recdef/simplification.thy
doc-src/TutorialI/Recdef/termination.thy
doc-src/TutorialI/Rules/Basic.thy
doc-src/TutorialI/Rules/Blast.thy
doc-src/TutorialI/Rules/Force.thy
doc-src/TutorialI/Rules/Forward.thy
doc-src/TutorialI/Rules/Primes.thy
doc-src/TutorialI/Rules/Tacticals.thy
doc-src/TutorialI/Rules/find2.thy
doc-src/TutorialI/Sets/Examples.thy
doc-src/TutorialI/Sets/Functions.thy
doc-src/TutorialI/Sets/Recur.thy
doc-src/TutorialI/Sets/Relations.thy
doc-src/TutorialI/ToyList/ToyList.thy
doc-src/TutorialI/ToyList/ToyList1
doc-src/TutorialI/ToyList/ToyList2
doc-src/TutorialI/Trie/Trie.thy
doc-src/TutorialI/Types/Axioms.thy
doc-src/TutorialI/Types/Numbers.thy
doc-src/TutorialI/Types/Overloading.thy
doc-src/TutorialI/Types/Pairs.thy
doc-src/TutorialI/Types/Records.thy
doc-src/TutorialI/Types/Setup.thy
doc-src/TutorialI/Types/Typedefs.thy
doc-src/TutorialI/document/Isa-logics.eps
doc-src/TutorialI/document/Isa-logics.pdf
doc-src/TutorialI/document/advanced0.tex
doc-src/TutorialI/document/appendix0.tex
doc-src/TutorialI/document/basics.tex
doc-src/TutorialI/document/build
doc-src/TutorialI/document/cl2emono-modified.sty
doc-src/TutorialI/document/ctl0.tex
doc-src/TutorialI/document/documents0.tex
doc-src/TutorialI/document/fp.tex
doc-src/TutorialI/document/inductive0.tex
doc-src/TutorialI/document/isa-index
doc-src/TutorialI/document/numerics.tex
doc-src/TutorialI/document/pghead.eps
doc-src/TutorialI/document/pghead.pdf
doc-src/TutorialI/document/preface.tex
doc-src/TutorialI/document/protocol.tex
doc-src/TutorialI/document/root.tex
doc-src/TutorialI/document/rules.tex
doc-src/TutorialI/document/sets.tex
doc-src/TutorialI/document/tutorial.sty
doc-src/TutorialI/document/typedef.pdf
doc-src/TutorialI/document/typedef.ps
doc-src/TutorialI/document/types0.tex
doc-src/TutorialI/todo.tobias
doc-src/ZF/FOL_examples.thy
doc-src/ZF/IFOL_examples.thy
doc-src/ZF/If.thy
doc-src/ZF/ZF_Isar.thy
doc-src/ZF/ZF_examples.thy
doc-src/ZF/document/FOL.tex
doc-src/ZF/document/ZF.tex
doc-src/ZF/document/build
doc-src/ZF/document/logics.sty
doc-src/ZF/document/root.tex
doc-src/antiquote_setup.ML
doc-src/extra.sty
doc-src/fixbookmarks
doc-src/iman.sty
doc-src/isar.sty
doc-src/manual.bib
doc-src/mathsing.sty
doc-src/more_antiquote.ML
doc-src/pdfsetup.sty
doc-src/preface.tex
doc-src/prepare_document
doc-src/proof.sty
doc-src/sedindex
doc-src/ttbox.sty
doc-src/underscore.sty
src/Doc/Classes/Classes.thy
src/Doc/Classes/Setup.thy
src/Doc/Classes/document/build
src/Doc/Classes/document/root.tex
src/Doc/Classes/document/style.sty
src/Doc/Codegen/Adaptation.thy
src/Doc/Codegen/Evaluation.thy
src/Doc/Codegen/Foundations.thy
src/Doc/Codegen/Further.thy
src/Doc/Codegen/Inductive_Predicate.thy
src/Doc/Codegen/Introduction.thy
src/Doc/Codegen/Refinement.thy
src/Doc/Codegen/Setup.thy
src/Doc/Codegen/document/adapt.tex
src/Doc/Codegen/document/architecture.tex
src/Doc/Codegen/document/build
src/Doc/Codegen/document/root.tex
src/Doc/Codegen/document/style.sty
src/Doc/Functions/Functions.thy
src/Doc/Functions/document/build
src/Doc/Functions/document/conclusion.tex
src/Doc/Functions/document/intro.tex
src/Doc/Functions/document/mathpartir.sty
src/Doc/Functions/document/root.tex
src/Doc/Functions/document/style.sty
src/Doc/HOL/document/HOL.tex
src/Doc/HOL/document/build
src/Doc/HOL/document/root.tex
src/Doc/Intro/document/advanced.tex
src/Doc/Intro/document/build
src/Doc/Intro/document/foundations.tex
src/Doc/Intro/document/getting.tex
src/Doc/Intro/document/root.tex
src/Doc/IsarImplementation/Base.thy
src/Doc/IsarImplementation/Eq.thy
src/Doc/IsarImplementation/Integration.thy
src/Doc/IsarImplementation/Isar.thy
src/Doc/IsarImplementation/Local_Theory.thy
src/Doc/IsarImplementation/Logic.thy
src/Doc/IsarImplementation/ML.thy
src/Doc/IsarImplementation/Prelim.thy
src/Doc/IsarImplementation/Proof.thy
src/Doc/IsarImplementation/Syntax.thy
src/Doc/IsarImplementation/Tactic.thy
src/Doc/IsarImplementation/document/build
src/Doc/IsarImplementation/document/root.tex
src/Doc/IsarImplementation/document/style.sty
src/Doc/IsarRef/Base.thy
src/Doc/IsarRef/Document_Preparation.thy
src/Doc/IsarRef/First_Order_Logic.thy
src/Doc/IsarRef/Framework.thy
src/Doc/IsarRef/Generic.thy
src/Doc/IsarRef/HOL_Specific.thy
src/Doc/IsarRef/Inner_Syntax.thy
src/Doc/IsarRef/ML_Tactic.thy
src/Doc/IsarRef/Misc.thy
src/Doc/IsarRef/Outer_Syntax.thy
src/Doc/IsarRef/Preface.thy
src/Doc/IsarRef/Proof.thy
src/Doc/IsarRef/Quick_Reference.thy
src/Doc/IsarRef/Spec.thy
src/Doc/IsarRef/Symbols.thy
src/Doc/IsarRef/Synopsis.thy
src/Doc/IsarRef/document/build
src/Doc/IsarRef/document/isar-vm.eps
src/Doc/IsarRef/document/isar-vm.pdf
src/Doc/IsarRef/document/isar-vm.svg
src/Doc/IsarRef/document/root.tex
src/Doc/IsarRef/document/showsymbols
src/Doc/IsarRef/document/style.sty
src/Doc/LaTeXsugar/Sugar.thy
src/Doc/LaTeXsugar/document/build
src/Doc/LaTeXsugar/document/mathpartir.sty
src/Doc/LaTeXsugar/document/root.bib
src/Doc/LaTeXsugar/document/root.tex
src/Doc/Locales/Examples.thy
src/Doc/Locales/Examples1.thy
src/Doc/Locales/Examples2.thy
src/Doc/Locales/Examples3.thy
src/Doc/Locales/document/build
src/Doc/Locales/document/root.bib
src/Doc/Locales/document/root.tex
src/Doc/Logics/abstract.txt
src/Doc/Logics/document/CTT.tex
src/Doc/Logics/document/LK.tex
src/Doc/Logics/document/Sequents.tex
src/Doc/Logics/document/build
src/Doc/Logics/document/preface.tex
src/Doc/Logics/document/root.tex
src/Doc/Logics/document/syntax.tex
src/Doc/Main/Main_Doc.thy
src/Doc/Main/document/build
src/Doc/Main/document/root.tex
src/Doc/Nitpick/document/build
src/Doc/Nitpick/document/root.tex
src/Doc/ProgProve/Basics.thy
src/Doc/ProgProve/Bool_nat_list.thy
src/Doc/ProgProve/Isar.thy
src/Doc/ProgProve/LaTeXsugar.thy
src/Doc/ProgProve/Logic.thy
src/Doc/ProgProve/MyList.thy
src/Doc/ProgProve/Types_and_funs.thy
src/Doc/ProgProve/document/bang.eps
src/Doc/ProgProve/document/bang.pdf
src/Doc/ProgProve/document/build
src/Doc/ProgProve/document/intro-isabelle.tex
src/Doc/ProgProve/document/mathpartir.sty
src/Doc/ProgProve/document/prelude.tex
src/Doc/ProgProve/document/root.bib
src/Doc/ProgProve/document/root.tex
src/Doc/ProgProve/document/svmono.cls
src/Doc/ROOT
src/Doc/Ref/abstract.txt
src/Doc/Ref/document/build
src/Doc/Ref/document/classical.tex
src/Doc/Ref/document/root.tex
src/Doc/Ref/document/simplifier.tex
src/Doc/Ref/document/substitution.tex
src/Doc/Ref/document/syntax.tex
src/Doc/Ref/document/tactic.tex
src/Doc/Ref/document/thm.tex
src/Doc/Ref/undocumented.tex
src/Doc/Sledgehammer/document/build
src/Doc/Sledgehammer/document/root.tex
src/Doc/System/Base.thy
src/Doc/System/Basics.thy
src/Doc/System/Interfaces.thy
src/Doc/System/Misc.thy
src/Doc/System/Presentation.thy
src/Doc/System/Scala.thy
src/Doc/System/Sessions.thy
src/Doc/System/document/browser_screenshot.eps
src/Doc/System/document/browser_screenshot.png
src/Doc/System/document/build
src/Doc/System/document/root.tex
src/Doc/Tutorial/Advanced/Partial.thy
src/Doc/Tutorial/Advanced/WFrec.thy
src/Doc/Tutorial/Advanced/simp2.thy
src/Doc/Tutorial/CTL/Base.thy
src/Doc/Tutorial/CTL/CTL.thy
src/Doc/Tutorial/CTL/CTLind.thy
src/Doc/Tutorial/CTL/PDL.thy
src/Doc/Tutorial/CodeGen/CodeGen.thy
src/Doc/Tutorial/Datatype/ABexpr.thy
src/Doc/Tutorial/Datatype/Fundata.thy
src/Doc/Tutorial/Datatype/Nested.thy
src/Doc/Tutorial/Datatype/unfoldnested.thy
src/Doc/Tutorial/Documents/Documents.thy
src/Doc/Tutorial/Fun/fun0.thy
src/Doc/Tutorial/Ifexpr/Ifexpr.thy
src/Doc/Tutorial/Inductive/AB.thy
src/Doc/Tutorial/Inductive/Advanced.thy
src/Doc/Tutorial/Inductive/Even.thy
src/Doc/Tutorial/Inductive/Mutual.thy
src/Doc/Tutorial/Inductive/Star.thy
src/Doc/Tutorial/Misc/AdvancedInd.thy
src/Doc/Tutorial/Misc/Itrev.thy
src/Doc/Tutorial/Misc/Option2.thy
src/Doc/Tutorial/Misc/Plus.thy
src/Doc/Tutorial/Misc/Tree.thy
src/Doc/Tutorial/Misc/Tree2.thy
src/Doc/Tutorial/Misc/appendix.thy
src/Doc/Tutorial/Misc/case_exprs.thy
src/Doc/Tutorial/Misc/fakenat.thy
src/Doc/Tutorial/Misc/natsum.thy
src/Doc/Tutorial/Misc/pairs2.thy
src/Doc/Tutorial/Misc/prime_def.thy
src/Doc/Tutorial/Misc/simp.thy
src/Doc/Tutorial/Misc/types.thy
src/Doc/Tutorial/Protocol/Event.thy
src/Doc/Tutorial/Protocol/Message.thy
src/Doc/Tutorial/Protocol/NS_Public.thy
src/Doc/Tutorial/Protocol/Public.thy
src/Doc/Tutorial/Recdef/Induction.thy
src/Doc/Tutorial/Recdef/Nested0.thy
src/Doc/Tutorial/Recdef/Nested1.thy
src/Doc/Tutorial/Recdef/Nested2.thy
src/Doc/Tutorial/Recdef/examples.thy
src/Doc/Tutorial/Recdef/simplification.thy
src/Doc/Tutorial/Recdef/termination.thy
src/Doc/Tutorial/Rules/Basic.thy
src/Doc/Tutorial/Rules/Blast.thy
src/Doc/Tutorial/Rules/Force.thy
src/Doc/Tutorial/Rules/Forward.thy
src/Doc/Tutorial/Rules/Primes.thy
src/Doc/Tutorial/Rules/Tacticals.thy
src/Doc/Tutorial/Rules/find2.thy
src/Doc/Tutorial/Sets/Examples.thy
src/Doc/Tutorial/Sets/Functions.thy
src/Doc/Tutorial/Sets/Recur.thy
src/Doc/Tutorial/Sets/Relations.thy
src/Doc/Tutorial/ToyList/ToyList.thy
src/Doc/Tutorial/ToyList/ToyList1
src/Doc/Tutorial/ToyList/ToyList2
src/Doc/Tutorial/Trie/Trie.thy
src/Doc/Tutorial/Types/Axioms.thy
src/Doc/Tutorial/Types/Numbers.thy
src/Doc/Tutorial/Types/Overloading.thy
src/Doc/Tutorial/Types/Pairs.thy
src/Doc/Tutorial/Types/Records.thy
src/Doc/Tutorial/Types/Setup.thy
src/Doc/Tutorial/Types/Typedefs.thy
src/Doc/Tutorial/document/Isa-logics.eps
src/Doc/Tutorial/document/Isa-logics.pdf
src/Doc/Tutorial/document/advanced0.tex
src/Doc/Tutorial/document/appendix0.tex
src/Doc/Tutorial/document/basics.tex
src/Doc/Tutorial/document/build
src/Doc/Tutorial/document/cl2emono-modified.sty
src/Doc/Tutorial/document/ctl0.tex
src/Doc/Tutorial/document/documents0.tex
src/Doc/Tutorial/document/fp.tex
src/Doc/Tutorial/document/inductive0.tex
src/Doc/Tutorial/document/isa-index
src/Doc/Tutorial/document/numerics.tex
src/Doc/Tutorial/document/pghead.eps
src/Doc/Tutorial/document/pghead.pdf
src/Doc/Tutorial/document/preface.tex
src/Doc/Tutorial/document/protocol.tex
src/Doc/Tutorial/document/root.tex
src/Doc/Tutorial/document/rules.tex
src/Doc/Tutorial/document/sets.tex
src/Doc/Tutorial/document/tutorial.sty
src/Doc/Tutorial/document/typedef.pdf
src/Doc/Tutorial/document/typedef.ps
src/Doc/Tutorial/document/types0.tex
src/Doc/Tutorial/todo.tobias
src/Doc/ZF/FOL_examples.thy
src/Doc/ZF/IFOL_examples.thy
src/Doc/ZF/If.thy
src/Doc/ZF/ZF_Isar.thy
src/Doc/ZF/ZF_examples.thy
src/Doc/ZF/document/FOL.tex
src/Doc/ZF/document/ZF.tex
src/Doc/ZF/document/build
src/Doc/ZF/document/logics.sty
src/Doc/ZF/document/root.tex
src/Doc/antiquote_setup.ML
src/Doc/extra.sty
src/Doc/fixbookmarks
src/Doc/iman.sty
src/Doc/isar.sty
src/Doc/manual.bib
src/Doc/mathsing.sty
src/Doc/more_antiquote.ML
src/Doc/pdfsetup.sty
src/Doc/preface.tex
src/Doc/prepare_document
src/Doc/proof.sty
src/Doc/sedindex
src/Doc/ttbox.sty
src/Doc/underscore.sty
     1.1 --- a/Admin/Release/makedist	Tue Aug 28 18:46:15 2012 +0200
     1.2 +++ b/Admin/Release/makedist	Tue Aug 28 18:57:32 2012 +0200
     1.3 @@ -149,7 +149,7 @@
     1.4  
     1.5  ./Admin/build all || fail "Failed to build distribution"
     1.6  
     1.7 -cp -a doc-src doc-src.orig
     1.8 +cp -a src/Doc src/Doc.orig
     1.9  ./bin/isabelle build_doc -a || fail "Failed to build documentation"
    1.10  
    1.11  if [ -n "$ISABELLE_JEDIT_BUILD_HOME" ]; then
    1.12 @@ -161,9 +161,9 @@
    1.13  fi
    1.14  
    1.15  rm -rf Admin
    1.16 -rm -rf doc-src
    1.17 +rm -rf src/Doc
    1.18  
    1.19 -mv doc-src.orig doc-src
    1.20 +mv src/Doc.orig src/Doc
    1.21  
    1.22  mkdir -p contrib
    1.23  cat >contrib/README <<EOF
     2.1 --- a/ROOTS	Tue Aug 28 18:46:15 2012 +0200
     2.2 +++ b/ROOTS	Tue Aug 28 18:57:32 2012 +0200
     2.3 @@ -8,4 +8,4 @@
     2.4  src/FOLP
     2.5  src/LCF
     2.6  src/Sequents
     2.7 -doc-src
     2.8 +src/Doc
     3.1 --- a/doc-src/Classes/Classes.thy	Tue Aug 28 18:46:15 2012 +0200
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,642 +0,0 @@
     3.4 -theory Classes
     3.5 -imports Main Setup
     3.6 -begin
     3.7 -
     3.8 -section {* Introduction *}
     3.9 -
    3.10 -text {*
    3.11 -  Type classes were introduced by Wadler and Blott \cite{wadler89how}
    3.12 -  into the Haskell language to allow for a reasonable implementation
    3.13 -  of overloading\footnote{throughout this tutorial, we are referring
    3.14 -  to classical Haskell 1.0 type classes, not considering later
    3.15 -  additions in expressiveness}.  As a canonical example, a polymorphic
    3.16 -  equality function @{text "eq \<Colon> \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} which is overloaded on
    3.17 -  different types for @{text "\<alpha>"}, which is achieved by splitting
    3.18 -  introduction of the @{text eq} function from its overloaded
    3.19 -  definitions by means of @{text class} and @{text instance}
    3.20 -  declarations: \footnote{syntax here is a kind of isabellized
    3.21 -  Haskell}
    3.22 -
    3.23 -  \begin{quote}
    3.24 -
    3.25 -  \noindent@{text "class eq where"} \\
    3.26 -  \hspace*{2ex}@{text "eq \<Colon> \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"}
    3.27 -
    3.28 -  \medskip\noindent@{text "instance nat \<Colon> eq where"} \\
    3.29 -  \hspace*{2ex}@{text "eq 0 0 = True"} \\
    3.30 -  \hspace*{2ex}@{text "eq 0 _ = False"} \\
    3.31 -  \hspace*{2ex}@{text "eq _ 0 = False"} \\
    3.32 -  \hspace*{2ex}@{text "eq (Suc n) (Suc m) = eq n m"}
    3.33 -
    3.34 -  \medskip\noindent@{text "instance (\<alpha>\<Colon>eq, \<beta>\<Colon>eq) pair \<Colon> eq where"} \\
    3.35 -  \hspace*{2ex}@{text "eq (x1, y1) (x2, y2) = eq x1 x2 \<and> eq y1 y2"}
    3.36 -
    3.37 -  \medskip\noindent@{text "class ord extends eq where"} \\
    3.38 -  \hspace*{2ex}@{text "less_eq \<Colon> \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} \\
    3.39 -  \hspace*{2ex}@{text "less \<Colon> \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"}
    3.40 -
    3.41 -  \end{quote}
    3.42 -
    3.43 -  \noindent Type variables are annotated with (finitely many) classes;
    3.44 -  these annotations are assertions that a particular polymorphic type
    3.45 -  provides definitions for overloaded functions.
    3.46 -
    3.47 -  Indeed, type classes not only allow for simple overloading but form
    3.48 -  a generic calculus, an instance of order-sorted algebra
    3.49 -  \cite{nipkow-sorts93,Nipkow-Prehofer:1993,Wenzel:1997:TPHOL}.
    3.50 -
    3.51 -  From a software engineering point of view, type classes roughly
    3.52 -  correspond to interfaces in object-oriented languages like Java; so,
    3.53 -  it is naturally desirable that type classes do not only provide
    3.54 -  functions (class parameters) but also state specifications
    3.55 -  implementations must obey.  For example, the @{text "class eq"}
    3.56 -  above could be given the following specification, demanding that
    3.57 -  @{text "class eq"} is an equivalence relation obeying reflexivity,
    3.58 -  symmetry and transitivity:
    3.59 -
    3.60 -  \begin{quote}
    3.61 -
    3.62 -  \noindent@{text "class eq where"} \\
    3.63 -  \hspace*{2ex}@{text "eq \<Colon> \<alpha> \<Rightarrow> \<alpha> \<Rightarrow> bool"} \\
    3.64 -  @{text "satisfying"} \\
    3.65 -  \hspace*{2ex}@{text "refl: eq x x"} \\
    3.66 -  \hspace*{2ex}@{text "sym: eq x y \<longleftrightarrow> eq x y"} \\
    3.67 -  \hspace*{2ex}@{text "trans: eq x y \<and> eq y z \<longrightarrow> eq x z"}
    3.68 -
    3.69 -  \end{quote}
    3.70 -
    3.71 -  \noindent From a theoretical point of view, type classes are
    3.72 -  lightweight modules; Haskell type classes may be emulated by SML
    3.73 -  functors \cite{classes_modules}.  Isabelle/Isar offers a discipline
    3.74 -  of type classes which brings all those aspects together:
    3.75 -
    3.76 -  \begin{enumerate}
    3.77 -    \item specifying abstract parameters together with
    3.78 -       corresponding specifications,
    3.79 -    \item instantiating those abstract parameters by a particular
    3.80 -       type
    3.81 -    \item in connection with a ``less ad-hoc'' approach to overloading,
    3.82 -    \item with a direct link to the Isabelle module system:
    3.83 -      locales \cite{kammueller-locales}.
    3.84 -  \end{enumerate}
    3.85 -
    3.86 -  \noindent Isar type classes also directly support code generation in
    3.87 -  a Haskell like fashion. Internally, they are mapped to more
    3.88 -  primitive Isabelle concepts \cite{Haftmann-Wenzel:2006:classes}.
    3.89 -
    3.90 -  This tutorial demonstrates common elements of structured
    3.91 -  specifications and abstract reasoning with type classes by the
    3.92 -  algebraic hierarchy of semigroups, monoids and groups.  Our
    3.93 -  background theory is that of Isabelle/HOL \cite{isa-tutorial}, for
    3.94 -  which some familiarity is assumed.
    3.95 -*}
    3.96 -
    3.97 -section {* A simple algebra example \label{sec:example} *}
    3.98 -
    3.99 -subsection {* Class definition *}
   3.100 -
   3.101 -text {*
   3.102 -  Depending on an arbitrary type @{text "\<alpha>"}, class @{text
   3.103 -  "semigroup"} introduces a binary operator @{text "(\<otimes>)"} that is
   3.104 -  assumed to be associative:
   3.105 -*}
   3.106 -
   3.107 -class %quote semigroup =
   3.108 -  fixes mult :: "\<alpha> \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"    (infixl "\<otimes>" 70)
   3.109 -  assumes assoc: "(x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
   3.110 -
   3.111 -text {*
   3.112 -  \noindent This @{command class} specification consists of two parts:
   3.113 -  the \qn{operational} part names the class parameter (@{element
   3.114 -  "fixes"}), the \qn{logical} part specifies properties on them
   3.115 -  (@{element "assumes"}).  The local @{element "fixes"} and @{element
   3.116 -  "assumes"} are lifted to the theory toplevel, yielding the global
   3.117 -  parameter @{term [source] "mult \<Colon> \<alpha>\<Colon>semigroup \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>"} and the
   3.118 -  global theorem @{fact "semigroup.assoc:"}~@{prop [source] "\<And>x y z \<Colon>
   3.119 -  \<alpha>\<Colon>semigroup. (x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"}.
   3.120 -*}
   3.121 -
   3.122 -
   3.123 -subsection {* Class instantiation \label{sec:class_inst} *}
   3.124 -
   3.125 -text {*
   3.126 -  The concrete type @{typ int} is made a @{class semigroup} instance
   3.127 -  by providing a suitable definition for the class parameter @{text
   3.128 -  "(\<otimes>)"} and a proof for the specification of @{fact assoc}.  This is
   3.129 -  accomplished by the @{command instantiation} target:
   3.130 -*}
   3.131 -
   3.132 -instantiation %quote int :: semigroup
   3.133 -begin
   3.134 -
   3.135 -definition %quote
   3.136 -  mult_int_def: "i \<otimes> j = i + (j\<Colon>int)"
   3.137 -
   3.138 -instance %quote proof
   3.139 -  fix i j k :: int have "(i + j) + k = i + (j + k)" by simp
   3.140 -  then show "(i \<otimes> j) \<otimes> k = i \<otimes> (j \<otimes> k)"
   3.141 -    unfolding mult_int_def .
   3.142 -qed
   3.143 -
   3.144 -end %quote
   3.145 -
   3.146 -text {*
   3.147 -  \noindent @{command instantiation} defines class parameters at a
   3.148 -  particular instance using common specification tools (here,
   3.149 -  @{command definition}).  The concluding @{command instance} opens a
   3.150 -  proof that the given parameters actually conform to the class
   3.151 -  specification.  Note that the first proof step is the @{method
   3.152 -  default} method, which for such instance proofs maps to the @{method
   3.153 -  intro_classes} method.  This reduces an instance judgement to the
   3.154 -  relevant primitive proof goals; typically it is the first method
   3.155 -  applied in an instantiation proof.
   3.156 -
   3.157 -  From now on, the type-checker will consider @{typ int} as a @{class
   3.158 -  semigroup} automatically, i.e.\ any general results are immediately
   3.159 -  available on concrete instances.
   3.160 -
   3.161 -  \medskip Another instance of @{class semigroup} yields the natural
   3.162 -  numbers:
   3.163 -*}
   3.164 -
   3.165 -instantiation %quote nat :: semigroup
   3.166 -begin
   3.167 -
   3.168 -primrec %quote mult_nat where
   3.169 -  "(0\<Colon>nat) \<otimes> n = n"
   3.170 -  | "Suc m \<otimes> n = Suc (m \<otimes> n)"
   3.171 -
   3.172 -instance %quote proof
   3.173 -  fix m n q :: nat 
   3.174 -  show "m \<otimes> n \<otimes> q = m \<otimes> (n \<otimes> q)"
   3.175 -    by (induct m) auto
   3.176 -qed
   3.177 -
   3.178 -end %quote
   3.179 -
   3.180 -text {*
   3.181 -  \noindent Note the occurence of the name @{text mult_nat} in the
   3.182 -  primrec declaration; by default, the local name of a class operation
   3.183 -  @{text f} to be instantiated on type constructor @{text \<kappa>} is
   3.184 -  mangled as @{text f_\<kappa>}.  In case of uncertainty, these names may be
   3.185 -  inspected using the @{command "print_context"} command or the
   3.186 -  corresponding ProofGeneral button.
   3.187 -*}
   3.188 -
   3.189 -subsection {* Lifting and parametric types *}
   3.190 -
   3.191 -text {*
   3.192 -  Overloaded definitions given at a class instantiation may include
   3.193 -  recursion over the syntactic structure of types.  As a canonical
   3.194 -  example, we model product semigroups using our simple algebra:
   3.195 -*}
   3.196 -
   3.197 -instantiation %quote prod :: (semigroup, semigroup) semigroup
   3.198 -begin
   3.199 -
   3.200 -definition %quote
   3.201 -  mult_prod_def: "p\<^isub>1 \<otimes> p\<^isub>2 = (fst p\<^isub>1 \<otimes> fst p\<^isub>2, snd p\<^isub>1 \<otimes> snd p\<^isub>2)"
   3.202 -
   3.203 -instance %quote proof
   3.204 -  fix p\<^isub>1 p\<^isub>2 p\<^isub>3 :: "\<alpha>\<Colon>semigroup \<times> \<beta>\<Colon>semigroup"
   3.205 -  show "p\<^isub>1 \<otimes> p\<^isub>2 \<otimes> p\<^isub>3 = p\<^isub>1 \<otimes> (p\<^isub>2 \<otimes> p\<^isub>3)"
   3.206 -    unfolding mult_prod_def by (simp add: assoc)
   3.207 -qed      
   3.208 -
   3.209 -end %quote
   3.210 -
   3.211 -text {*
   3.212 -  \noindent Associativity of product semigroups is established using
   3.213 -  the definition of @{text "(\<otimes>)"} on products and the hypothetical
   3.214 -  associativity of the type components; these hypotheses are
   3.215 -  legitimate due to the @{class semigroup} constraints imposed on the
   3.216 -  type components by the @{command instance} proposition.  Indeed,
   3.217 -  this pattern often occurs with parametric types and type classes.
   3.218 -*}
   3.219 -
   3.220 -
   3.221 -subsection {* Subclassing *}
   3.222 -
   3.223 -text {*
   3.224 -  We define a subclass @{text monoidl} (a semigroup with a left-hand
   3.225 -  neutral) by extending @{class semigroup} with one additional
   3.226 -  parameter @{text neutral} together with its characteristic property:
   3.227 -*}
   3.228 -
   3.229 -class %quote monoidl = semigroup +
   3.230 -  fixes neutral :: "\<alpha>" ("\<one>")
   3.231 -  assumes neutl: "\<one> \<otimes> x = x"
   3.232 -
   3.233 -text {*
   3.234 -  \noindent Again, we prove some instances, by providing suitable
   3.235 -  parameter definitions and proofs for the additional specifications.
   3.236 -  Observe that instantiations for types with the same arity may be
   3.237 -  simultaneous:
   3.238 -*}
   3.239 -
   3.240 -instantiation %quote nat and int :: monoidl
   3.241 -begin
   3.242 -
   3.243 -definition %quote
   3.244 -  neutral_nat_def: "\<one> = (0\<Colon>nat)"
   3.245 -
   3.246 -definition %quote
   3.247 -  neutral_int_def: "\<one> = (0\<Colon>int)"
   3.248 -
   3.249 -instance %quote proof
   3.250 -  fix n :: nat
   3.251 -  show "\<one> \<otimes> n = n"
   3.252 -    unfolding neutral_nat_def by simp
   3.253 -next
   3.254 -  fix k :: int
   3.255 -  show "\<one> \<otimes> k = k"
   3.256 -    unfolding neutral_int_def mult_int_def by simp
   3.257 -qed
   3.258 -
   3.259 -end %quote
   3.260 -
   3.261 -instantiation %quote prod :: (monoidl, monoidl) monoidl
   3.262 -begin
   3.263 -
   3.264 -definition %quote
   3.265 -  neutral_prod_def: "\<one> = (\<one>, \<one>)"
   3.266 -
   3.267 -instance %quote proof
   3.268 -  fix p :: "\<alpha>\<Colon>monoidl \<times> \<beta>\<Colon>monoidl"
   3.269 -  show "\<one> \<otimes> p = p"
   3.270 -    unfolding neutral_prod_def mult_prod_def by (simp add: neutl)
   3.271 -qed
   3.272 -
   3.273 -end %quote
   3.274 -
   3.275 -text {*
   3.276 -  \noindent Fully-fledged monoids are modelled by another subclass,
   3.277 -  which does not add new parameters but tightens the specification:
   3.278 -*}
   3.279 -
   3.280 -class %quote monoid = monoidl +
   3.281 -  assumes neutr: "x \<otimes> \<one> = x"
   3.282 -
   3.283 -instantiation %quote nat and int :: monoid 
   3.284 -begin
   3.285 -
   3.286 -instance %quote proof
   3.287 -  fix n :: nat
   3.288 -  show "n \<otimes> \<one> = n"
   3.289 -    unfolding neutral_nat_def by (induct n) simp_all
   3.290 -next
   3.291 -  fix k :: int
   3.292 -  show "k \<otimes> \<one> = k"
   3.293 -    unfolding neutral_int_def mult_int_def by simp
   3.294 -qed
   3.295 -
   3.296 -end %quote
   3.297 -
   3.298 -instantiation %quote prod :: (monoid, monoid) monoid
   3.299 -begin
   3.300 -
   3.301 -instance %quote proof 
   3.302 -  fix p :: "\<alpha>\<Colon>monoid \<times> \<beta>\<Colon>monoid"
   3.303 -  show "p \<otimes> \<one> = p"
   3.304 -    unfolding neutral_prod_def mult_prod_def by (simp add: neutr)
   3.305 -qed
   3.306 -
   3.307 -end %quote
   3.308 -
   3.309 -text {*
   3.310 -  \noindent To finish our small algebra example, we add a @{text
   3.311 -  group} class with a corresponding instance:
   3.312 -*}
   3.313 -
   3.314 -class %quote group = monoidl +
   3.315 -  fixes inverse :: "\<alpha> \<Rightarrow> \<alpha>"    ("(_\<div>)" [1000] 999)
   3.316 -  assumes invl: "x\<div> \<otimes> x = \<one>"
   3.317 -
   3.318 -instantiation %quote int :: group
   3.319 -begin
   3.320 -
   3.321 -definition %quote
   3.322 -  inverse_int_def: "i\<div> = - (i\<Colon>int)"
   3.323 -
   3.324 -instance %quote proof
   3.325 -  fix i :: int
   3.326 -  have "-i + i = 0" by simp
   3.327 -  then show "i\<div> \<otimes> i = \<one>"
   3.328 -    unfolding mult_int_def neutral_int_def inverse_int_def .
   3.329 -qed
   3.330 -
   3.331 -end %quote
   3.332 -
   3.333 -
   3.334 -section {* Type classes as locales *}
   3.335 -
   3.336 -subsection {* A look behind the scenes *}
   3.337 -
   3.338 -text {*
   3.339 -  The example above gives an impression how Isar type classes work in
   3.340 -  practice.  As stated in the introduction, classes also provide a
   3.341 -  link to Isar's locale system.  Indeed, the logical core of a class
   3.342 -  is nothing other than a locale:
   3.343 -*}
   3.344 -
   3.345 -class %quote idem =
   3.346 -  fixes f :: "\<alpha> \<Rightarrow> \<alpha>"
   3.347 -  assumes idem: "f (f x) = f x"
   3.348 -
   3.349 -text {*
   3.350 -  \noindent essentially introduces the locale
   3.351 -*} (*<*)setup %invisible {* Sign.add_path "foo" *}
   3.352 -(*>*)
   3.353 -locale %quote idem =
   3.354 -  fixes f :: "\<alpha> \<Rightarrow> \<alpha>"
   3.355 -  assumes idem: "f (f x) = f x"
   3.356 -
   3.357 -text {* \noindent together with corresponding constant(s): *}
   3.358 -
   3.359 -consts %quote f :: "\<alpha> \<Rightarrow> \<alpha>"
   3.360 -
   3.361 -text {*
   3.362 -  \noindent The connection to the type system is done by means
   3.363 -  of a primitive type class
   3.364 -*} (*<*)setup %invisible {* Sign.add_path "foo" *}
   3.365 -(*>*)
   3.366 -classes %quote idem < type
   3.367 -(*<*)axiomatization where idem: "f (f (x::\<alpha>\<Colon>idem)) = f x"
   3.368 -setup %invisible {* Sign.parent_path *}(*>*)
   3.369 -
   3.370 -text {* \noindent together with a corresponding interpretation: *}
   3.371 -
   3.372 -interpretation %quote idem_class:
   3.373 -  idem "f \<Colon> (\<alpha>\<Colon>idem) \<Rightarrow> \<alpha>"
   3.374 -(*<*)proof qed (rule idem)(*>*)
   3.375 -
   3.376 -text {*
   3.377 -  \noindent This gives you the full power of the Isabelle module system;
   3.378 -  conclusions in locale @{text idem} are implicitly propagated
   3.379 -  to class @{text idem}.
   3.380 -*} (*<*)setup %invisible {* Sign.parent_path *}
   3.381 -(*>*)
   3.382 -subsection {* Abstract reasoning *}
   3.383 -
   3.384 -text {*
   3.385 -  Isabelle locales enable reasoning at a general level, while results
   3.386 -  are implicitly transferred to all instances.  For example, we can
   3.387 -  now establish the @{text "left_cancel"} lemma for groups, which
   3.388 -  states that the function @{text "(x \<otimes>)"} is injective:
   3.389 -*}
   3.390 -
   3.391 -lemma %quote (in group) left_cancel: "x \<otimes> y = x \<otimes> z \<longleftrightarrow> y = z"
   3.392 -proof
   3.393 -  assume "x \<otimes> y = x \<otimes> z"
   3.394 -  then have "x\<div> \<otimes> (x \<otimes> y) = x\<div> \<otimes> (x \<otimes> z)" by simp
   3.395 -  then have "(x\<div> \<otimes> x) \<otimes> y = (x\<div> \<otimes> x) \<otimes> z" using assoc by simp
   3.396 -  then show "y = z" using neutl and invl by simp
   3.397 -next
   3.398 -  assume "y = z"
   3.399 -  then show "x \<otimes> y = x \<otimes> z" by simp
   3.400 -qed
   3.401 -
   3.402 -text {*
   3.403 -  \noindent Here the \qt{@{keyword "in"} @{class group}} target
   3.404 -  specification indicates that the result is recorded within that
   3.405 -  context for later use.  This local theorem is also lifted to the
   3.406 -  global one @{fact "group.left_cancel:"} @{prop [source] "\<And>x y z \<Colon>
   3.407 -  \<alpha>\<Colon>group. x \<otimes> y = x \<otimes> z \<longleftrightarrow> y = z"}.  Since type @{text "int"} has been
   3.408 -  made an instance of @{text "group"} before, we may refer to that
   3.409 -  fact as well: @{prop [source] "\<And>x y z \<Colon> int. x \<otimes> y = x \<otimes> z \<longleftrightarrow> y =
   3.410 -  z"}.
   3.411 -*}
   3.412 -
   3.413 -
   3.414 -subsection {* Derived definitions *}
   3.415 -
   3.416 -text {*
   3.417 -  Isabelle locales are targets which support local definitions:
   3.418 -*}
   3.419 -
   3.420 -primrec %quote (in monoid) pow_nat :: "nat \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>" where
   3.421 -  "pow_nat 0 x = \<one>"
   3.422 -  | "pow_nat (Suc n) x = x \<otimes> pow_nat n x"
   3.423 -
   3.424 -text {*
   3.425 -  \noindent If the locale @{text group} is also a class, this local
   3.426 -  definition is propagated onto a global definition of @{term [source]
   3.427 -  "pow_nat \<Colon> nat \<Rightarrow> \<alpha>\<Colon>monoid \<Rightarrow> \<alpha>\<Colon>monoid"} with corresponding theorems
   3.428 -
   3.429 -  @{thm pow_nat.simps [no_vars]}.
   3.430 -
   3.431 -  \noindent As you can see from this example, for local definitions
   3.432 -  you may use any specification tool which works together with
   3.433 -  locales, such as Krauss's recursive function package
   3.434 -  \cite{krauss2006}.
   3.435 -*}
   3.436 -
   3.437 -
   3.438 -subsection {* A functor analogy *}
   3.439 -
   3.440 -text {*
   3.441 -  We introduced Isar classes by analogy to type classes in functional
   3.442 -  programming; if we reconsider this in the context of what has been
   3.443 -  said about type classes and locales, we can drive this analogy
   3.444 -  further by stating that type classes essentially correspond to
   3.445 -  functors that have a canonical interpretation as type classes.
   3.446 -  There is also the possibility of other interpretations.  For
   3.447 -  example, @{text list}s also form a monoid with @{text append} and
   3.448 -  @{term "[]"} as operations, but it seems inappropriate to apply to
   3.449 -  lists the same operations as for genuinely algebraic types.  In such
   3.450 -  a case, we can simply make a particular interpretation of monoids
   3.451 -  for lists:
   3.452 -*}
   3.453 -
   3.454 -interpretation %quote list_monoid: monoid append "[]"
   3.455 -  proof qed auto
   3.456 -
   3.457 -text {*
   3.458 -  \noindent This enables us to apply facts on monoids
   3.459 -  to lists, e.g. @{thm list_monoid.neutl [no_vars]}.
   3.460 -
   3.461 -  When using this interpretation pattern, it may also
   3.462 -  be appropriate to map derived definitions accordingly:
   3.463 -*}
   3.464 -
   3.465 -primrec %quote replicate :: "nat \<Rightarrow> \<alpha> list \<Rightarrow> \<alpha> list" where
   3.466 -  "replicate 0 _ = []"
   3.467 -  | "replicate (Suc n) xs = xs @ replicate n xs"
   3.468 -
   3.469 -interpretation %quote list_monoid: monoid append "[]" where
   3.470 -  "monoid.pow_nat append [] = replicate"
   3.471 -proof -
   3.472 -  interpret monoid append "[]" ..
   3.473 -  show "monoid.pow_nat append [] = replicate"
   3.474 -  proof
   3.475 -    fix n
   3.476 -    show "monoid.pow_nat append [] n = replicate n"
   3.477 -      by (induct n) auto
   3.478 -  qed
   3.479 -qed intro_locales
   3.480 -
   3.481 -text {*
   3.482 -  \noindent This pattern is also helpful to reuse abstract
   3.483 -  specifications on the \emph{same} type.  For example, think of a
   3.484 -  class @{text preorder}; for type @{typ nat}, there are at least two
   3.485 -  possible instances: the natural order or the order induced by the
   3.486 -  divides relation.  But only one of these instances can be used for
   3.487 -  @{command instantiation}; using the locale behind the class @{text
   3.488 -  preorder}, it is still possible to utilise the same abstract
   3.489 -  specification again using @{command interpretation}.
   3.490 -*}
   3.491 -
   3.492 -subsection {* Additional subclass relations *}
   3.493 -
   3.494 -text {*
   3.495 -  Any @{text "group"} is also a @{text "monoid"}; this can be made
   3.496 -  explicit by claiming an additional subclass relation, together with
   3.497 -  a proof of the logical difference:
   3.498 -*}
   3.499 -
   3.500 -subclass %quote (in group) monoid
   3.501 -proof
   3.502 -  fix x
   3.503 -  from invl have "x\<div> \<otimes> x = \<one>" by simp
   3.504 -  with assoc [symmetric] neutl invl have "x\<div> \<otimes> (x \<otimes> \<one>) = x\<div> \<otimes> x" by simp
   3.505 -  with left_cancel show "x \<otimes> \<one> = x" by simp
   3.506 -qed
   3.507 -
   3.508 -text {*
   3.509 -  The logical proof is carried out on the locale level.  Afterwards it
   3.510 -  is propagated to the type system, making @{text group} an instance
   3.511 -  of @{text monoid} by adding an additional edge to the graph of
   3.512 -  subclass relations (\figref{fig:subclass}).
   3.513 -
   3.514 -  \begin{figure}[htbp]
   3.515 -   \begin{center}
   3.516 -     \small
   3.517 -     \unitlength 0.6mm
   3.518 -     \begin{picture}(40,60)(0,0)
   3.519 -       \put(20,60){\makebox(0,0){@{text semigroup}}}
   3.520 -       \put(20,40){\makebox(0,0){@{text monoidl}}}
   3.521 -       \put(00,20){\makebox(0,0){@{text monoid}}}
   3.522 -       \put(40,00){\makebox(0,0){@{text group}}}
   3.523 -       \put(20,55){\vector(0,-1){10}}
   3.524 -       \put(15,35){\vector(-1,-1){10}}
   3.525 -       \put(25,35){\vector(1,-3){10}}
   3.526 -     \end{picture}
   3.527 -     \hspace{8em}
   3.528 -     \begin{picture}(40,60)(0,0)
   3.529 -       \put(20,60){\makebox(0,0){@{text semigroup}}}
   3.530 -       \put(20,40){\makebox(0,0){@{text monoidl}}}
   3.531 -       \put(00,20){\makebox(0,0){@{text monoid}}}
   3.532 -       \put(40,00){\makebox(0,0){@{text group}}}
   3.533 -       \put(20,55){\vector(0,-1){10}}
   3.534 -       \put(15,35){\vector(-1,-1){10}}
   3.535 -       \put(05,15){\vector(3,-1){30}}
   3.536 -     \end{picture}
   3.537 -     \caption{Subclass relationship of monoids and groups:
   3.538 -        before and after establishing the relationship
   3.539 -        @{text "group \<subseteq> monoid"};  transitive edges are left out.}
   3.540 -     \label{fig:subclass}
   3.541 -   \end{center}
   3.542 -  \end{figure}
   3.543 -
   3.544 -  For illustration, a derived definition in @{text group} using @{text
   3.545 -  pow_nat}
   3.546 -*}
   3.547 -
   3.548 -definition %quote (in group) pow_int :: "int \<Rightarrow> \<alpha> \<Rightarrow> \<alpha>" where
   3.549 -  "pow_int k x = (if k >= 0
   3.550 -    then pow_nat (nat k) x
   3.551 -    else (pow_nat (nat (- k)) x)\<div>)"
   3.552 -
   3.553 -text {*
   3.554 -  \noindent yields the global definition of @{term [source] "pow_int \<Colon>
   3.555 -  int \<Rightarrow> \<alpha>\<Colon>group \<Rightarrow> \<alpha>\<Colon>group"} with the corresponding theorem @{thm
   3.556 -  pow_int_def [no_vars]}.
   3.557 -*}
   3.558 -
   3.559 -subsection {* A note on syntax *}
   3.560 -
   3.561 -text {*
   3.562 -  As a convenience, class context syntax allows references to local
   3.563 -  class operations and their global counterparts uniformly; type
   3.564 -  inference resolves ambiguities.  For example:
   3.565 -*}
   3.566 -
   3.567 -context %quote semigroup
   3.568 -begin
   3.569 -
   3.570 -term %quote "x \<otimes> y" -- {* example 1 *}
   3.571 -term %quote "(x\<Colon>nat) \<otimes> y" -- {* example 2 *}
   3.572 -
   3.573 -end  %quote
   3.574 -
   3.575 -term %quote "x \<otimes> y" -- {* example 3 *}
   3.576 -
   3.577 -text {*
   3.578 -  \noindent Here in example 1, the term refers to the local class
   3.579 -  operation @{text "mult [\<alpha>]"}, whereas in example 2 the type
   3.580 -  constraint enforces the global class operation @{text "mult [nat]"}.
   3.581 -  In the global context in example 3, the reference is to the
   3.582 -  polymorphic global class operation @{text "mult [?\<alpha> \<Colon> semigroup]"}.
   3.583 -*}
   3.584 -
   3.585 -section {* Further issues *}
   3.586 -
   3.587 -subsection {* Type classes and code generation *}
   3.588 -
   3.589 -text {*
   3.590 -  Turning back to the first motivation for type classes, namely
   3.591 -  overloading, it is obvious that overloading stemming from @{command
   3.592 -  class} statements and @{command instantiation} targets naturally
   3.593 -  maps to Haskell type classes.  The code generator framework
   3.594 -  \cite{isabelle-codegen} takes this into account.  If the target
   3.595 -  language (e.g.~SML) lacks type classes, then they are implemented by
   3.596 -  an explicit dictionary construction.  As example, let's go back to
   3.597 -  the power function:
   3.598 -*}
   3.599 -
   3.600 -definition %quote example :: int where
   3.601 -  "example = pow_int 10 (-2)"
   3.602 -
   3.603 -text {*
   3.604 -  \noindent This maps to Haskell as follows:
   3.605 -*}
   3.606 -(*<*)code_include %invisible Haskell "Natural" -(*>*)
   3.607 -text %quotetypewriter {*
   3.608 -  @{code_stmts example (Haskell)}
   3.609 -*}
   3.610 -
   3.611 -text {*
   3.612 -  \noindent The code in SML has explicit dictionary passing:
   3.613 -*}
   3.614 -text %quotetypewriter {*
   3.615 -  @{code_stmts example (SML)}
   3.616 -*}
   3.617 -
   3.618 -
   3.619 -text {*
   3.620 -  \noindent In Scala, implicts are used as dictionaries:
   3.621 -*}
   3.622 -(*<*)code_include %invisible Scala "Natural" -(*>*)
   3.623 -text %quotetypewriter {*
   3.624 -  @{code_stmts example (Scala)}
   3.625 -*}
   3.626 -
   3.627 -
   3.628 -subsection {* Inspecting the type class universe *}
   3.629 -
   3.630 -text {*
   3.631 -  To facilitate orientation in complex subclass structures, two
   3.632 -  diagnostics commands are provided:
   3.633 -
   3.634 -  \begin{description}
   3.635 -
   3.636 -    \item[@{command "print_classes"}] print a list of all classes
   3.637 -      together with associated operations etc.
   3.638 -
   3.639 -    \item[@{command "class_deps"}] visualizes the subclass relation
   3.640 -      between all classes as a Hasse diagram.
   3.641 -
   3.642 -  \end{description}
   3.643 -*}
   3.644 -
   3.645 -end
     4.1 --- a/doc-src/Classes/Setup.thy	Tue Aug 28 18:46:15 2012 +0200
     4.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     4.3 @@ -1,40 +0,0 @@
     4.4 -theory Setup
     4.5 -imports Main "~~/src/HOL/Library/Code_Integer"
     4.6 -begin
     4.7 -
     4.8 -ML_file "../antiquote_setup.ML"
     4.9 -ML_file "../more_antiquote.ML"
    4.10 -
    4.11 -setup {*
    4.12 -  Antiquote_Setup.setup #>
    4.13 -  More_Antiquote.setup #>
    4.14 -  Code_Target.set_default_code_width 74
    4.15 -*}
    4.16 -
    4.17 -syntax
    4.18 -  "_alpha" :: "type"  ("\<alpha>")
    4.19 -  "_alpha_ofsort" :: "sort \<Rightarrow> type"  ("\<alpha>()\<Colon>_" [0] 1000)
    4.20 -  "_beta" :: "type"  ("\<beta>")
    4.21 -  "_beta_ofsort" :: "sort \<Rightarrow> type"  ("\<beta>()\<Colon>_" [0] 1000)
    4.22 -
    4.23 -parse_ast_translation {*
    4.24 -  let
    4.25 -    fun alpha_ast_tr [] = Ast.Variable "'a"
    4.26 -      | alpha_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
    4.27 -    fun alpha_ofsort_ast_tr [ast] =
    4.28 -          Ast.Appl [Ast.Constant @{syntax_const "_ofsort"}, Ast.Variable "'a", ast]
    4.29 -      | alpha_ofsort_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
    4.30 -    fun beta_ast_tr [] = Ast.Variable "'b"
    4.31 -      | beta_ast_tr asts = raise Ast.AST ("beta_ast_tr", asts);
    4.32 -    fun beta_ofsort_ast_tr [ast] =
    4.33 -          Ast.Appl [Ast.Constant @{syntax_const "_ofsort"}, Ast.Variable "'b", ast]
    4.34 -      | beta_ofsort_ast_tr asts = raise Ast.AST ("beta_ast_tr", asts);
    4.35 -  in
    4.36 -   [(@{syntax_const "_alpha"}, alpha_ast_tr),
    4.37 -    (@{syntax_const "_alpha_ofsort"}, alpha_ofsort_ast_tr),
    4.38 -    (@{syntax_const "_beta"}, beta_ast_tr),
    4.39 -    (@{syntax_const "_beta_ofsort"}, beta_ofsort_ast_tr)]
    4.40 -  end
    4.41 -*}
    4.42 -
    4.43 -end
    4.44 \ No newline at end of file
     5.1 --- a/doc-src/Classes/document/build	Tue Aug 28 18:46:15 2012 +0200
     5.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.3 @@ -1,18 +0,0 @@
     5.4 -#!/bin/bash
     5.5 -
     5.6 -set -e
     5.7 -
     5.8 -FORMAT="$1"
     5.9 -VARIANT="$2"
    5.10 -
    5.11 -"$ISABELLE_TOOL" logo -o isabelle_isar.pdf Isar
    5.12 -"$ISABELLE_TOOL" logo -o isabelle_isar.eps Isar
    5.13 -
    5.14 -cp "$ISABELLE_HOME/doc-src/iman.sty" .
    5.15 -cp "$ISABELLE_HOME/doc-src/extra.sty" .
    5.16 -cp "$ISABELLE_HOME/doc-src/isar.sty" .
    5.17 -cp "$ISABELLE_HOME/doc-src/proof.sty" .
    5.18 -cp "$ISABELLE_HOME/doc-src/manual.bib" .
    5.19 -
    5.20 -"$ISABELLE_HOME/doc-src/prepare_document" "$FORMAT"
    5.21 -
     6.1 --- a/doc-src/Classes/document/root.tex	Tue Aug 28 18:46:15 2012 +0200
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,46 +0,0 @@
     6.4 -\documentclass[12pt,a4paper,fleqn]{article}
     6.5 -\usepackage{latexsym,graphicx}
     6.6 -\usepackage{iman,extra,isar,proof}
     6.7 -\usepackage{isabelle,isabellesym}
     6.8 -\usepackage{style}
     6.9 -\usepackage{pdfsetup}
    6.10 -
    6.11 -
    6.12 -\hyphenation{Isabelle}
    6.13 -\hyphenation{Isar}
    6.14 -\isadroptag{theory}
    6.15 -
    6.16 -\title{\includegraphics[scale=0.5]{isabelle_isar}
    6.17 -  \\[4ex] Haskell-style type classes with Isabelle/Isar}
    6.18 -\author{\emph{Florian Haftmann}}
    6.19 -
    6.20 -\begin{document}
    6.21 -
    6.22 -\maketitle
    6.23 -
    6.24 -\begin{abstract}
    6.25 -  \noindent This tutorial introduces Isar type classes, which 
    6.26 -  are a convenient mechanism for organizing specifications.
    6.27 -  Essentially, they combine an operational aspect (in the
    6.28 -  manner of Haskell) with a logical aspect, both managed uniformly.
    6.29 -\end{abstract}
    6.30 -
    6.31 -\thispagestyle{empty}\clearpage
    6.32 -
    6.33 -\pagenumbering{roman}
    6.34 -\clearfirst
    6.35 -
    6.36 -\input{Classes.tex}
    6.37 -
    6.38 -\begingroup
    6.39 -\bibliographystyle{plain} \small\raggedright\frenchspacing
    6.40 -\bibliography{manual}
    6.41 -\endgroup
    6.42 -
    6.43 -\end{document}
    6.44 -
    6.45 -
    6.46 -%%% Local Variables: 
    6.47 -%%% mode: latex
    6.48 -%%% TeX-master: t
    6.49 -%%% End: 
     7.1 --- a/doc-src/Classes/document/style.sty	Tue Aug 28 18:46:15 2012 +0200
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,58 +0,0 @@
     7.4 -
     7.5 -%% toc
     7.6 -\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
     7.7 -\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
     7.8 -
     7.9 -%% paragraphs
    7.10 -\setlength{\parindent}{1em}
    7.11 -
    7.12 -%% references
    7.13 -\newcommand{\secref}[1]{\S\ref{#1}}
    7.14 -\newcommand{\figref}[1]{figure~\ref{#1}}
    7.15 -
    7.16 -%% logical markup
    7.17 -\newcommand{\strong}[1]{{\bfseries {#1}}}
    7.18 -\newcommand{\qn}[1]{\emph{#1}}
    7.19 -
    7.20 -%% typographic conventions
    7.21 -\newcommand{\qt}[1]{``{#1}''}
    7.22 -\newcommand{\ditem}[1]{\item[\isastyletext #1]}
    7.23 -
    7.24 -%% quote environment
    7.25 -\isakeeptag{quote}
    7.26 -\renewenvironment{quote}
    7.27 -  {\list{}{\leftmargin2em\rightmargin0pt}\parindent0pt\parskip0pt\item\relax}
    7.28 -  {\endlist}
    7.29 -\renewcommand{\isatagquote}{\begin{quote}}
    7.30 -\renewcommand{\endisatagquote}{\end{quote}}
    7.31 -\newcommand{\quotebreak}{\\[1.2ex]}
    7.32 -
    7.33 -%% typewriter text
    7.34 -\newenvironment{typewriter}{\renewcommand{\isastyletext}{}%
    7.35 -\renewcommand{\isadigit}[1]{{##1}}%
    7.36 -\parindent0pt%
    7.37 -\makeatletter\isa@parindent0pt\makeatother%
    7.38 -\isabellestyle{tt}\isastyle%
    7.39 -\fontsize{9pt}{9pt}\selectfont}{}
    7.40 -
    7.41 -\isakeeptag{quotetypewriter}
    7.42 -\renewcommand{\isatagquotetypewriter}{\begin{quote}\begin{typewriter}}
    7.43 -\renewcommand{\endisatagquotetypewriter}{\end{typewriter}\end{quote}}
    7.44 -
    7.45 -%% presentation
    7.46 -\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
    7.47 -
    7.48 -%% character detail
    7.49 -\renewcommand{\isadigit}[1]{\isamath{#1}}
    7.50 -\binperiod
    7.51 -\underscoreoff
    7.52 -
    7.53 -%% format
    7.54 -\pagestyle{headings}
    7.55 -\isabellestyle{it}
    7.56 -
    7.57 -
    7.58 -%%% Local Variables: 
    7.59 -%%% mode: latex
    7.60 -%%% TeX-master: "implementation"
    7.61 -%%% End: 
     8.1 --- a/doc-src/Codegen/Adaptation.thy	Tue Aug 28 18:46:15 2012 +0200
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,347 +0,0 @@
     8.4 -theory Adaptation
     8.5 -imports Setup
     8.6 -begin
     8.7 -
     8.8 -setup %invisible {* Code_Target.extend_target ("\<SML>", ("SML", K I))
     8.9 -  #> Code_Target.extend_target ("\<SMLdummy>", ("Haskell", K I)) *}
    8.10 -
    8.11 -section {* Adaptation to target languages \label{sec:adaptation} *}
    8.12 -
    8.13 -subsection {* Adapting code generation *}
    8.14 -
    8.15 -text {*
    8.16 -  The aspects of code generation introduced so far have two aspects
    8.17 -  in common:
    8.18 -
    8.19 -  \begin{itemize}
    8.20 -
    8.21 -    \item They act uniformly, without reference to a specific target
    8.22 -       language.
    8.23 -
    8.24 -    \item They are \emph{safe} in the sense that as long as you trust
    8.25 -       the code generator meta theory and implementation, you cannot
    8.26 -       produce programs that yield results which are not derivable in
    8.27 -       the logic.
    8.28 -
    8.29 -  \end{itemize}
    8.30 -
    8.31 -  \noindent In this section we will introduce means to \emph{adapt}
    8.32 -  the serialiser to a specific target language, i.e.~to print program
    8.33 -  fragments in a way which accommodates \qt{already existing}
    8.34 -  ingredients of a target language environment, for three reasons:
    8.35 -
    8.36 -  \begin{itemize}
    8.37 -    \item improving readability and aesthetics of generated code
    8.38 -    \item gaining efficiency
    8.39 -    \item interface with language parts which have no direct counterpart
    8.40 -      in @{text "HOL"} (say, imperative data structures)
    8.41 -  \end{itemize}
    8.42 -
    8.43 -  \noindent Generally, you should avoid using those features yourself
    8.44 -  \emph{at any cost}:
    8.45 -
    8.46 -  \begin{itemize}
    8.47 -
    8.48 -    \item The safe configuration methods act uniformly on every target
    8.49 -      language, whereas for adaptation you have to treat each target
    8.50 -      language separately.
    8.51 -
    8.52 -    \item Application is extremely tedious since there is no
    8.53 -      abstraction which would allow for a static check, making it easy
    8.54 -      to produce garbage.
    8.55 -
    8.56 -    \item Subtle errors can be introduced unconsciously.
    8.57 -
    8.58 -  \end{itemize}
    8.59 -
    8.60 -  \noindent However, even if you ought refrain from setting up
    8.61 -  adaptation yourself, already the @{text "HOL"} comes with some
    8.62 -  reasonable default adaptations (say, using target language list
    8.63 -  syntax).  There also some common adaptation cases which you can
    8.64 -  setup by importing particular library theories.  In order to
    8.65 -  understand these, we provide some clues here; these however are not
    8.66 -  supposed to replace a careful study of the sources.
    8.67 -*}
    8.68 -
    8.69 -
    8.70 -subsection {* The adaptation principle *}
    8.71 -
    8.72 -text {*
    8.73 -  Figure \ref{fig:adaptation} illustrates what \qt{adaptation} is
    8.74 -  conceptually supposed to be:
    8.75 -
    8.76 -  \begin{figure}[here]
    8.77 -    \includegraphics{adapt}
    8.78 -    \caption{The adaptation principle}
    8.79 -    \label{fig:adaptation}
    8.80 -  \end{figure}
    8.81 -
    8.82 -  \noindent In the tame view, code generation acts as broker between
    8.83 -  @{text logic}, @{text "intermediate language"} and @{text "target
    8.84 -  language"} by means of @{text translation} and @{text
    8.85 -  serialisation}; for the latter, the serialiser has to observe the
    8.86 -  structure of the @{text language} itself plus some @{text reserved}
    8.87 -  keywords which have to be avoided for generated code.  However, if
    8.88 -  you consider @{text adaptation} mechanisms, the code generated by
    8.89 -  the serializer is just the tip of the iceberg:
    8.90 -
    8.91 -  \begin{itemize}
    8.92 -
    8.93 -    \item @{text serialisation} can be \emph{parametrised} such that
    8.94 -      logical entities are mapped to target-specific ones
    8.95 -      (e.g. target-specific list syntax, see also
    8.96 -      \secref{sec:adaptation_mechanisms})
    8.97 -
    8.98 -    \item Such parametrisations can involve references to a
    8.99 -      target-specific standard @{text library} (e.g. using the @{text
   8.100 -      Haskell} @{verbatim Maybe} type instead of the @{text HOL}
   8.101 -      @{type "option"} type); if such are used, the corresponding
   8.102 -      identifiers (in our example, @{verbatim Maybe}, @{verbatim
   8.103 -      Nothing} and @{verbatim Just}) also have to be considered @{text
   8.104 -      reserved}.
   8.105 -
   8.106 -    \item Even more, the user can enrich the library of the
   8.107 -      target-language by providing code snippets (\qt{@{text
   8.108 -      "includes"}}) which are prepended to any generated code (see
   8.109 -      \secref{sec:include}); this typically also involves further
   8.110 -      @{text reserved} identifiers.
   8.111 -
   8.112 -  \end{itemize}
   8.113 -
   8.114 -  \noindent As figure \ref{fig:adaptation} illustrates, all these
   8.115 -  adaptation mechanisms have to act consistently; it is at the
   8.116 -  discretion of the user to take care for this.
   8.117 -*}
   8.118 -
   8.119 -subsection {* Common adaptation patterns *}
   8.120 -
   8.121 -text {*
   8.122 -  The @{theory HOL} @{theory Main} theory already provides a code
   8.123 -  generator setup which should be suitable for most applications.
   8.124 -  Common extensions and modifications are available by certain
   8.125 -  theories of the @{text HOL} library; beside being useful in
   8.126 -  applications, they may serve as a tutorial for customising the code
   8.127 -  generator setup (see below \secref{sec:adaptation_mechanisms}).
   8.128 -
   8.129 -  \begin{description}
   8.130 -
   8.131 -    \item[@{text "Code_Integer"}] represents @{text HOL} integers by
   8.132 -       big integer literals in target languages.
   8.133 -
   8.134 -    \item[@{text "Code_Char"}] represents @{text HOL} characters by
   8.135 -       character literals in target languages.
   8.136 -
   8.137 -    \item[@{text "Code_Char_chr"}] like @{text "Code_Char"}, but
   8.138 -       also offers treatment of character codes; includes @{text
   8.139 -       "Code_Char"}.
   8.140 -
   8.141 -    \item[@{text "Efficient_Nat"}] \label{eff_nat} implements
   8.142 -       natural numbers by integers, which in general will result in
   8.143 -       higher efficiency; pattern matching with @{term "0\<Colon>nat"} /
   8.144 -       @{const "Suc"} is eliminated; includes @{text "Code_Integer"}
   8.145 -       and @{text "Code_Numeral"}.
   8.146 -
   8.147 -    \item[@{theory "Code_Numeral"}] provides an additional datatype
   8.148 -       @{typ index} which is mapped to target-language built-in
   8.149 -       integers.  Useful for code setups which involve e.g.~indexing
   8.150 -       of target-language arrays.  Part of @{text "HOL-Main"}.
   8.151 -
   8.152 -    \item[@{theory "String"}] provides an additional datatype @{typ
   8.153 -       String.literal} which is isomorphic to strings; @{typ
   8.154 -       String.literal}s are mapped to target-language strings.  Useful
   8.155 -       for code setups which involve e.g.~printing (error) messages.
   8.156 -       Part of @{text "HOL-Main"}.
   8.157 -
   8.158 -  \end{description}
   8.159 -
   8.160 -  \begin{warn}
   8.161 -    When importing any of those theories which are not part of
   8.162 -    @{text "HOL-Main"}, they should form the last
   8.163 -    items in an import list.  Since these theories adapt the code
   8.164 -    generator setup in a non-conservative fashion, strange effects may
   8.165 -    occur otherwise.
   8.166 -  \end{warn}
   8.167 -*}
   8.168 -
   8.169 -
   8.170 -subsection {* Parametrising serialisation \label{sec:adaptation_mechanisms} *}
   8.171 -
   8.172 -text {*
   8.173 -  Consider the following function and its corresponding SML code:
   8.174 -*}
   8.175 -
   8.176 -primrec %quote in_interval :: "nat \<times> nat \<Rightarrow> nat \<Rightarrow> bool" where
   8.177 -  "in_interval (k, l) n \<longleftrightarrow> k \<le> n \<and> n \<le> l"
   8.178 -(*<*)
   8.179 -code_type %invisible bool
   8.180 -  (SML)
   8.181 -code_const %invisible True and False and "op \<and>" and Not
   8.182 -  (SML and and and)
   8.183 -(*>*)
   8.184 -text %quotetypewriter {*
   8.185 -  @{code_stmts in_interval (SML)}
   8.186 -*}
   8.187 -
   8.188 -text {*
   8.189 -  \noindent Though this is correct code, it is a little bit
   8.190 -  unsatisfactory: boolean values and operators are materialised as
   8.191 -  distinguished entities with have nothing to do with the SML-built-in
   8.192 -  notion of \qt{bool}.  This results in less readable code;
   8.193 -  additionally, eager evaluation may cause programs to loop or break
   8.194 -  which would perfectly terminate when the existing SML @{verbatim
   8.195 -  "bool"} would be used.  To map the HOL @{typ bool} on SML @{verbatim
   8.196 -  "bool"}, we may use \qn{custom serialisations}:
   8.197 -*}
   8.198 -
   8.199 -code_type %quotett bool
   8.200 -  (SML "bool")
   8.201 -code_const %quotett True and False and "op \<and>"
   8.202 -  (SML "true" and "false" and "_ andalso _")
   8.203 -
   8.204 -text {*
   8.205 -  \noindent The @{command_def code_type} command takes a type constructor
   8.206 -  as arguments together with a list of custom serialisations.  Each
   8.207 -  custom serialisation starts with a target language identifier
   8.208 -  followed by an expression, which during code serialisation is
   8.209 -  inserted whenever the type constructor would occur.  For constants,
   8.210 -  @{command_def code_const} implements the corresponding mechanism.  Each
   8.211 -  ``@{verbatim "_"}'' in a serialisation expression is treated as a
   8.212 -  placeholder for the type constructor's (the constant's) arguments.
   8.213 -*}
   8.214 -
   8.215 -text %quotetypewriter {*
   8.216 -  @{code_stmts in_interval (SML)}
   8.217 -*}
   8.218 -
   8.219 -text {*
   8.220 -  \noindent This still is not perfect: the parentheses around the
   8.221 -  \qt{andalso} expression are superfluous.  Though the serialiser by
   8.222 -  no means attempts to imitate the rich Isabelle syntax framework, it
   8.223 -  provides some common idioms, notably associative infixes with
   8.224 -  precedences which may be used here:
   8.225 -*}
   8.226 -
   8.227 -code_const %quotett "op \<and>"
   8.228 -  (SML infixl 1 "andalso")
   8.229 -
   8.230 -text %quotetypewriter {*
   8.231 -  @{code_stmts in_interval (SML)}
   8.232 -*}
   8.233 -
   8.234 -text {*
   8.235 -  \noindent The attentive reader may ask how we assert that no
   8.236 -  generated code will accidentally overwrite.  For this reason the
   8.237 -  serialiser has an internal table of identifiers which have to be
   8.238 -  avoided to be used for new declarations.  Initially, this table
   8.239 -  typically contains the keywords of the target language.  It can be
   8.240 -  extended manually, thus avoiding accidental overwrites, using the
   8.241 -  @{command_def "code_reserved"} command:
   8.242 -*}
   8.243 -
   8.244 -code_reserved %quote "\<SMLdummy>" bool true false andalso
   8.245 -
   8.246 -text {*
   8.247 -  \noindent Next, we try to map HOL pairs to SML pairs, using the
   8.248 -  infix ``@{verbatim "*"}'' type constructor and parentheses:
   8.249 -*}
   8.250 -(*<*)
   8.251 -code_type %invisible prod
   8.252 -  (SML)
   8.253 -code_const %invisible Pair
   8.254 -  (SML)
   8.255 -(*>*)
   8.256 -code_type %quotett prod
   8.257 -  (SML infix 2 "*")
   8.258 -code_const %quotett Pair
   8.259 -  (SML "!((_),/ (_))")
   8.260 -
   8.261 -text {*
   8.262 -  \noindent The initial bang ``@{verbatim "!"}'' tells the serialiser
   8.263 -  never to put parentheses around the whole expression (they are
   8.264 -  already present), while the parentheses around argument place
   8.265 -  holders tell not to put parentheses around the arguments.  The slash
   8.266 -  ``@{verbatim "/"}'' (followed by arbitrary white space) inserts a
   8.267 -  space which may be used as a break if necessary during pretty
   8.268 -  printing.
   8.269 -
   8.270 -  These examples give a glimpse what mechanisms custom serialisations
   8.271 -  provide; however their usage requires careful thinking in order not
   8.272 -  to introduce inconsistencies -- or, in other words: custom
   8.273 -  serialisations are completely axiomatic.
   8.274 -
   8.275 -  A further noteworthy detail is that any special character in a
   8.276 -  custom serialisation may be quoted using ``@{verbatim "'"}''; thus,
   8.277 -  in ``@{verbatim "fn '_ => _"}'' the first ``@{verbatim "_"}'' is a
   8.278 -  proper underscore while the second ``@{verbatim "_"}'' is a
   8.279 -  placeholder.
   8.280 -*}
   8.281 -
   8.282 -
   8.283 -subsection {* @{text Haskell} serialisation *}
   8.284 -
   8.285 -text {*
   8.286 -  For convenience, the default @{text HOL} setup for @{text Haskell}
   8.287 -  maps the @{class equal} class to its counterpart in @{text Haskell},
   8.288 -  giving custom serialisations for the class @{class equal} (by command
   8.289 -  @{command_def code_class}) and its operation @{const [source] HOL.equal}
   8.290 -*}
   8.291 -
   8.292 -code_class %quotett equal
   8.293 -  (Haskell "Eq")
   8.294 -
   8.295 -code_const %quotett "HOL.equal"
   8.296 -  (Haskell infixl 4 "==")
   8.297 -
   8.298 -text {*
   8.299 -  \noindent A problem now occurs whenever a type which is an instance
   8.300 -  of @{class equal} in @{text HOL} is mapped on a @{text
   8.301 -  Haskell}-built-in type which is also an instance of @{text Haskell}
   8.302 -  @{text Eq}:
   8.303 -*}
   8.304 -
   8.305 -typedecl %quote bar
   8.306 -
   8.307 -instantiation %quote bar :: equal
   8.308 -begin
   8.309 -
   8.310 -definition %quote "HOL.equal (x\<Colon>bar) y \<longleftrightarrow> x = y"
   8.311 -
   8.312 -instance %quote by default (simp add: equal_bar_def)
   8.313 -
   8.314 -end %quote (*<*)
   8.315 -
   8.316 -(*>*) code_type %quotett bar
   8.317 -  (Haskell "Integer")
   8.318 -
   8.319 -text {*
   8.320 -  \noindent The code generator would produce an additional instance,
   8.321 -  which of course is rejected by the @{text Haskell} compiler.  To
   8.322 -  suppress this additional instance, use @{command_def "code_instance"}:
   8.323 -*}
   8.324 -
   8.325 -code_instance %quotett bar :: equal
   8.326 -  (Haskell -)
   8.327 -
   8.328 -
   8.329 -subsection {* Enhancing the target language context \label{sec:include} *}
   8.330 -
   8.331 -text {*
   8.332 -  In rare cases it is necessary to \emph{enrich} the context of a
   8.333 -  target language; this is accomplished using the @{command_def
   8.334 -  "code_include"} command:
   8.335 -*}
   8.336 -
   8.337 -code_include %quotett Haskell "Errno"
   8.338 -{*errno i = error ("Error number: " ++ show i)*}
   8.339 -
   8.340 -code_reserved %quotett Haskell Errno
   8.341 -
   8.342 -text {*
   8.343 -  \noindent Such named @{text include}s are then prepended to every
   8.344 -  generated code.  Inspect such code in order to find out how
   8.345 -  @{command "code_include"} behaves with respect to a particular
   8.346 -  target language.
   8.347 -*}
   8.348 -
   8.349 -end
   8.350 -
     9.1 --- a/doc-src/Codegen/Evaluation.thy	Tue Aug 28 18:46:15 2012 +0200
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,287 +0,0 @@
     9.4 -theory Evaluation
     9.5 -imports Setup
     9.6 -begin
     9.7 -
     9.8 -section {* Evaluation \label{sec:evaluation} *}
     9.9 -
    9.10 -text {*
    9.11 -  Recalling \secref{sec:principle}, code generation turns a system of
    9.12 -  equations into a program with the \emph{same} equational semantics.
    9.13 -  As a consequence, this program can be used as a \emph{rewrite
    9.14 -  engine} for terms: rewriting a term @{term "t"} using a program to a
    9.15 -  term @{term "t'"} yields the theorems @{prop "t \<equiv> t'"}.  This
    9.16 -  application of code generation in the following is referred to as
    9.17 -  \emph{evaluation}.
    9.18 -*}
    9.19 -
    9.20 -
    9.21 -subsection {* Evaluation techniques *}
    9.22 -
    9.23 -text {*
    9.24 -  The existing infrastructure provides a rich palette of evaluation
    9.25 -  techniques, each comprising different aspects:
    9.26 -
    9.27 -  \begin{description}
    9.28 -
    9.29 -    \item[Expressiveness.]  Depending on how good symbolic computation
    9.30 -      is supported, the class of terms which can be evaluated may be
    9.31 -      bigger or smaller.
    9.32 -
    9.33 -    \item[Efficiency.]  The more machine-near the technique, the
    9.34 -      faster it is.
    9.35 -
    9.36 -    \item[Trustability.]  Techniques which a huge (and also probably
    9.37 -      more configurable infrastructure) are more fragile and less
    9.38 -      trustable.
    9.39 -
    9.40 -  \end{description}
    9.41 -*}
    9.42 -
    9.43 -
    9.44 -subsubsection {* The simplifier (@{text simp}) *}
    9.45 -
    9.46 -text {*
    9.47 -  The simplest way for evaluation is just using the simplifier with
    9.48 -  the original code equations of the underlying program.  This gives
    9.49 -  fully symbolic evaluation and highest trustablity, with the usual
    9.50 -  performance of the simplifier.  Note that for operations on abstract
    9.51 -  datatypes (cf.~\secref{sec:invariant}), the original theorems as
    9.52 -  given by the users are used, not the modified ones.
    9.53 -*}
    9.54 -
    9.55 -
    9.56 -subsubsection {* Normalization by evaluation (@{text nbe}) *}
    9.57 -
    9.58 -text {*
    9.59 -  Normalization by evaluation \cite{Aehlig-Haftmann-Nipkow:2008:nbe}
    9.60 -  provides a comparably fast partially symbolic evaluation which
    9.61 -  permits also normalization of functions and uninterpreted symbols;
    9.62 -  the stack of code to be trusted is considerable.
    9.63 -*}
    9.64 -
    9.65 -
    9.66 -subsubsection {* Evaluation in ML (@{text code}) *}
    9.67 -
    9.68 -text {*
    9.69 -  Highest performance can be achieved by evaluation in ML, at the cost
    9.70 -  of being restricted to ground results and a layered stack of code to
    9.71 -  be trusted, including code generator configurations by the user.
    9.72 -
    9.73 -  Evaluation is carried out in a target language \emph{Eval} which
    9.74 -  inherits from \emph{SML} but for convenience uses parts of the
    9.75 -  Isabelle runtime environment.  The soundness of computation carried
    9.76 -  out there depends crucially on the correctness of the code
    9.77 -  generator setup; this is one of the reasons why you should not use
    9.78 -  adaptation (see \secref{sec:adaptation}) frivolously.
    9.79 -*}
    9.80 -
    9.81 -
    9.82 -subsection {* Aspects of evaluation *}
    9.83 -
    9.84 -text {*
    9.85 -  Each of the techniques can be combined with different aspects.  The
    9.86 -  most important distinction is between dynamic and static evaluation.
    9.87 -  Dynamic evaluation takes the code generator configuration \qt{as it
    9.88 -  is} at the point where evaluation is issued.  Best example is the
    9.89 -  @{command_def value} command which allows ad-hoc evaluation of
    9.90 -  terms:
    9.91 -*}
    9.92 -
    9.93 -value %quote "42 / (12 :: rat)"
    9.94 -
    9.95 -text {*
    9.96 -  \noindent By default @{command value} tries all available evaluation
    9.97 -  techniques and prints the result of the first succeeding one.  A particular
    9.98 -  technique may be specified in square brackets, e.g.
    9.99 -*}
   9.100 -
   9.101 -value %quote [nbe] "42 / (12 :: rat)"
   9.102 -
   9.103 -text {*
   9.104 -  To employ dynamic evaluation in the document generation, there is also
   9.105 -  a @{text value} antiquotation. By default, it also tries all available evaluation
   9.106 -  techniques and prints the result of the first succeeding one, unless a particular
   9.107 -  technique is specified in square brackets.
   9.108 -
   9.109 -  Static evaluation freezes the code generator configuration at a
   9.110 -  certain point and uses this context whenever evaluation is issued
   9.111 -  later on.  This is particularly appropriate for proof procedures
   9.112 -  which use evaluation, since then the behaviour of evaluation is not
   9.113 -  changed or even compromised later on by actions of the user.
   9.114 -
   9.115 -  As a technical complication, terms after evaluation in ML must be
   9.116 -  turned into Isabelle's internal term representation again.  Since
   9.117 -  this is also configurable, it is never fully trusted.  For this
   9.118 -  reason, evaluation in ML comes with further aspects:
   9.119 -
   9.120 -  \begin{description}
   9.121 -
   9.122 -    \item[Plain evaluation.]  A term is normalized using the provided
   9.123 -      term reconstruction from ML to Isabelle; for applications which
   9.124 -      do not need to be fully trusted.
   9.125 -
   9.126 -    \item[Property conversion.]  Evaluates propositions; since these
   9.127 -      are monomorphic, the term reconstruction is fixed once and for all
   9.128 -      and therefore trustable.
   9.129 -
   9.130 -    \item[Conversion.]  Evaluates an arbitrary term @{term "t"} first
   9.131 -      by plain evaluation and certifies the result @{term "t'"} by
   9.132 -      checking the equation @{term "t \<equiv> t'"} using property
   9.133 -      conversion.
   9.134 -
   9.135 -  \end{description}
   9.136 -
   9.137 -  \noindent The picture is further complicated by the roles of
   9.138 -  exceptions.  Here three cases have to be distinguished:
   9.139 -
   9.140 -  \begin{itemize}
   9.141 -
   9.142 -    \item Evaluation of @{term t} terminates with a result @{term
   9.143 -      "t'"}.
   9.144 -
   9.145 -    \item Evaluation of @{term t} terminates which en exception
   9.146 -      indicating a pattern match failure or a non-implemented
   9.147 -      function.  As sketched in \secref{sec:partiality}, this can be
   9.148 -      interpreted as partiality.
   9.149 -     
   9.150 -    \item Evaluation raises any other kind of exception.
   9.151 -     
   9.152 -  \end{itemize}
   9.153 -
   9.154 -  \noindent For conversions, the first case yields the equation @{term
   9.155 -  "t = t'"}, the second defaults to reflexivity @{term "t = t"}.
   9.156 -  Exceptions of the third kind are propagated to the user.
   9.157 -
   9.158 -  By default return values of plain evaluation are optional, yielding
   9.159 -  @{text "SOME t'"} in the first case, @{text "NONE"} in the
   9.160 -  second, and propagating the exception in the third case.  A strict
   9.161 -  variant of plain evaluation either yields @{text "t'"} or propagates
   9.162 -  any exception, a liberal variant caputures any exception in a result
   9.163 -  of type @{text "Exn.result"}.
   9.164 -  
   9.165 -  For property conversion (which coincides with conversion except for
   9.166 -  evaluation in ML), methods are provided which solve a given goal by
   9.167 -  evaluation.
   9.168 -*}
   9.169 -
   9.170 -
   9.171 -subsection {* Schematic overview *}
   9.172 -
   9.173 -text {*
   9.174 -  \newcommand{\ttsize}{\fontsize{5.8pt}{8pt}\selectfont}
   9.175 -  \fontsize{9pt}{12pt}\selectfont
   9.176 -  \begin{tabular}{ll||c|c|c}
   9.177 -    & & @{text simp} & @{text nbe} & @{text code} \tabularnewline \hline \hline
   9.178 -    \multirow{5}{1ex}{\rotatebox{90}{dynamic}}
   9.179 -      & interactive evaluation 
   9.180 -      & @{command value} @{text "[simp]"} & @{command value} @{text "[nbe]"} & @{command value} @{text "[code]"}
   9.181 -      \tabularnewline
   9.182 -    & plain evaluation & & & \ttsize@{ML "Code_Evaluation.dynamic_value"} \tabularnewline \cline{2-5}
   9.183 -    & evaluation method & @{method code_simp} & @{method normalization} & @{method eval} \tabularnewline
   9.184 -    & property conversion & & & \ttsize@{ML "Code_Runtime.dynamic_holds_conv"} \tabularnewline \cline{2-5}
   9.185 -    & conversion & \ttsize@{ML "Code_Simp.dynamic_conv"} & \ttsize@{ML "Nbe.dynamic_conv"}
   9.186 -      & \ttsize@{ML "Code_Evaluation.dynamic_conv"} \tabularnewline \hline \hline
   9.187 -    \multirow{3}{1ex}{\rotatebox{90}{static}}
   9.188 -    & plain evaluation & & & \ttsize@{ML "Code_Evaluation.static_value"} \tabularnewline \cline{2-5}
   9.189 -    & property conversion & &
   9.190 -      & \ttsize@{ML "Code_Runtime.static_holds_conv"} \tabularnewline \cline{2-5}
   9.191 -    & conversion & \ttsize@{ML "Code_Simp.static_conv"}
   9.192 -      & \ttsize@{ML "Nbe.static_conv"}
   9.193 -      & \ttsize@{ML "Code_Evaluation.static_conv"}
   9.194 -  \end{tabular}
   9.195 -*}
   9.196 -
   9.197 -
   9.198 -subsection {* Intimate connection between logic and system runtime *}
   9.199 -
   9.200 -text {*
   9.201 -  The toolbox of static evaluation conversions forms a reasonable base
   9.202 -  to interweave generated code and system tools.  However in some
   9.203 -  situations more direct interaction is desirable.
   9.204 -*}
   9.205 -
   9.206 -
   9.207 -subsubsection {* Static embedding of generated code into system runtime -- the @{text code} antiquotation *}
   9.208 -
   9.209 -text {*
   9.210 -  The @{text code} antiquotation allows to include constants from
   9.211 -  generated code directly into ML system code, as in the following toy
   9.212 -  example:
   9.213 -*}
   9.214 -
   9.215 -datatype %quote form = T | F | And form form | Or form form (*<*)
   9.216 -
   9.217 -(*>*) ML %quotett {*
   9.218 -  fun eval_form @{code T} = true
   9.219 -    | eval_form @{code F} = false
   9.220 -    | eval_form (@{code And} (p, q)) =
   9.221 -        eval_form p andalso eval_form q
   9.222 -    | eval_form (@{code Or} (p, q)) =
   9.223 -        eval_form p orelse eval_form q;
   9.224 -*}
   9.225 -
   9.226 -text {*
   9.227 -  \noindent @{text code} takes as argument the name of a constant;
   9.228 -  after the whole ML is read, the necessary code is generated
   9.229 -  transparently and the corresponding constant names are inserted.
   9.230 -  This technique also allows to use pattern matching on constructors
   9.231 -  stemming from compiled datatypes.  Note that the @{text code}
   9.232 -  antiquotation may not refer to constants which carry adaptations;
   9.233 -  here you have to refer to the corresponding adapted code directly.
   9.234 -
   9.235 -  For a less simplistic example, theory @{text Approximation} in
   9.236 -  the @{text Decision_Procs} session is a good reference.
   9.237 -*}
   9.238 -
   9.239 -
   9.240 -subsubsection {* Static embedding of generated code into system runtime -- @{text code_reflect} *}
   9.241 -
   9.242 -text {*
   9.243 -  The @{text code} antiquoation is lightweight, but the generated code
   9.244 -  is only accessible while the ML section is processed.  Sometimes this
   9.245 -  is not appropriate, especially if the generated code contains datatype
   9.246 -  declarations which are shared with other parts of the system.  In these
   9.247 -  cases, @{command_def code_reflect} can be used:
   9.248 -*}
   9.249 -
   9.250 -code_reflect %quote Sum_Type
   9.251 -  datatypes sum = Inl | Inr
   9.252 -  functions "Sum_Type.Projl" "Sum_Type.Projr"
   9.253 -
   9.254 -text {*
   9.255 -  \noindent @{command_def code_reflect} takes a structure name and
   9.256 -  references to datatypes and functions; for these code is compiled
   9.257 -  into the named ML structure and the \emph{Eval} target is modified
   9.258 -  in a way that future code generation will reference these
   9.259 -  precompiled versions of the given datatypes and functions.  This
   9.260 -  also allows to refer to the referenced datatypes and functions from
   9.261 -  arbitrary ML code as well.
   9.262 -
   9.263 -  A typical example for @{command code_reflect} can be found in the
   9.264 -  @{theory Predicate} theory.
   9.265 -*}
   9.266 -
   9.267 -
   9.268 -subsubsection {* Separate compilation -- @{text code_reflect} *}
   9.269 -
   9.270 -text {*
   9.271 -  For technical reasons it is sometimes necessary to separate
   9.272 -  generation and compilation of code which is supposed to be used in
   9.273 -  the system runtime.  For this @{command code_reflect} with an
   9.274 -  optional @{text "file"} argument can be used:
   9.275 -*}
   9.276 -
   9.277 -code_reflect %quote Rat
   9.278 -  datatypes rat = Frct
   9.279 -  functions Fract
   9.280 -    "(plus :: rat \<Rightarrow> rat \<Rightarrow> rat)" "(minus :: rat \<Rightarrow> rat \<Rightarrow> rat)"
   9.281 -    "(times :: rat \<Rightarrow> rat \<Rightarrow> rat)" "(divide :: rat \<Rightarrow> rat \<Rightarrow> rat)"
   9.282 -  file "examples/rat.ML"
   9.283 -
   9.284 -text {*
   9.285 -  \noindent This merely generates the referenced code to the given
   9.286 -  file which can be included into the system runtime later on.
   9.287 -*}
   9.288 -
   9.289 -end
   9.290 -
    10.1 --- a/doc-src/Codegen/Foundations.thy	Tue Aug 28 18:46:15 2012 +0200
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,347 +0,0 @@
    10.4 -theory Foundations
    10.5 -imports Introduction
    10.6 -begin
    10.7 -
    10.8 -section {* Code generation foundations \label{sec:foundations} *}
    10.9 -
   10.10 -subsection {* Code generator architecture \label{sec:architecture} *}
   10.11 -
   10.12 -text {*
   10.13 -  The code generator is actually a framework consisting of different
   10.14 -  components which can be customised individually.
   10.15 -
   10.16 -  Conceptually all components operate on Isabelle's logic framework
   10.17 -  @{theory Pure}.  Practically, the object logic @{theory HOL}
   10.18 -  provides the necessary facilities to make use of the code generator,
   10.19 -  mainly since it is an extension of @{theory Pure}.
   10.20 -
   10.21 -  The constellation of the different components is visualized in the
   10.22 -  following picture.
   10.23 -
   10.24 -  \begin{figure}[h]
   10.25 -    \includegraphics{architecture}
   10.26 -    \caption{Code generator architecture}
   10.27 -    \label{fig:arch}
   10.28 -  \end{figure}
   10.29 -
   10.30 -  \noindent Central to code generation is the notion of \emph{code
   10.31 -  equations}.  A code equation as a first approximation is a theorem
   10.32 -  of the form @{text "f t\<^isub>1 t\<^isub>2 \<dots> t\<^isub>n \<equiv> t"} (an equation headed by a
   10.33 -  constant @{text f} with arguments @{text "t\<^isub>1 t\<^isub>2 \<dots> t\<^isub>n"} and right
   10.34 -  hand side @{text t}).
   10.35 -
   10.36 -  \begin{itemize}
   10.37 -
   10.38 -    \item Starting point of code generation is a collection of (raw)
   10.39 -      code equations in a theory. It is not relevant where they stem
   10.40 -      from, but typically they were either produced by specification
   10.41 -      tools or proved explicitly by the user.
   10.42 -      
   10.43 -    \item These raw code equations can be subjected to theorem
   10.44 -      transformations.  This \qn{preprocessor} (see
   10.45 -      \secref{sec:preproc}) can apply the full expressiveness of
   10.46 -      ML-based theorem transformations to code generation.  The result
   10.47 -      of preprocessing is a structured collection of code equations.
   10.48 -
   10.49 -    \item These code equations are \qn{translated} to a program in an
   10.50 -      abstract intermediate language.  Think of it as a kind of
   10.51 -      \qt{Mini-Haskell} with four \qn{statements}: @{text data} (for
   10.52 -      datatypes), @{text fun} (stemming from code equations), also
   10.53 -      @{text class} and @{text inst} (for type classes).
   10.54 -
   10.55 -    \item Finally, the abstract program is \qn{serialised} into
   10.56 -      concrete source code of a target language.  This step only
   10.57 -      produces concrete syntax but does not change the program in
   10.58 -      essence; all conceptual transformations occur in the translation
   10.59 -      step.
   10.60 -
   10.61 -  \end{itemize}
   10.62 -
   10.63 -  \noindent From these steps, only the last two are carried out
   10.64 -  outside the logic; by keeping this layer as thin as possible, the
   10.65 -  amount of code to trust is kept to a minimum.
   10.66 -*}
   10.67 -
   10.68 -
   10.69 -subsection {* The preprocessor \label{sec:preproc} *}
   10.70 -
   10.71 -text {*
   10.72 -  Before selected function theorems are turned into abstract code, a
   10.73 -  chain of definitional transformation steps is carried out:
   10.74 -  \emph{preprocessing}.  The preprocessor consists of two
   10.75 -  components: a \emph{simpset} and \emph{function transformers}.
   10.76 -
   10.77 -  The \emph{simpset} can apply the full generality of the Isabelle
   10.78 -  simplifier.  Due to the interpretation of theorems as code
   10.79 -  equations, rewrites are applied to the right hand side and the
   10.80 -  arguments of the left hand side of an equation, but never to the
   10.81 -  constant heading the left hand side.  An important special case are
   10.82 -  \emph{unfold theorems}, which may be declared and removed using the
   10.83 -  @{attribute code_unfold} or \emph{@{attribute code_unfold} del}
   10.84 -  attribute, respectively.
   10.85 -
   10.86 -  Some common applications:
   10.87 -*}
   10.88 -
   10.89 -text_raw {*
   10.90 -  \begin{itemize}
   10.91 -*}
   10.92 -
   10.93 -text {*
   10.94 -     \item replacing non-executable constructs by executable ones:
   10.95 -*}     
   10.96 -
   10.97 -lemma %quote [code_unfold]:
   10.98 -  "x \<in> set xs \<longleftrightarrow> List.member xs x" by (fact in_set_member)
   10.99 -
  10.100 -text {*
  10.101 -     \item replacing executable but inconvenient constructs:
  10.102 -*}
  10.103 -
  10.104 -lemma %quote [code_unfold]:
  10.105 -  "xs = [] \<longleftrightarrow> List.null xs" by (fact eq_Nil_null)
  10.106 -
  10.107 -text {*
  10.108 -     \item eliminating disturbing expressions:
  10.109 -*}
  10.110 -
  10.111 -lemma %quote [code_unfold]:
  10.112 -  "1 = Suc 0" by (fact One_nat_def)
  10.113 -
  10.114 -text_raw {*
  10.115 -  \end{itemize}
  10.116 -*}
  10.117 -
  10.118 -text {*
  10.119 -  \noindent \emph{Function transformers} provide a very general
  10.120 -  interface, transforming a list of function theorems to another list
  10.121 -  of function theorems, provided that neither the heading constant nor
  10.122 -  its type change.  The @{term "0\<Colon>nat"} / @{const Suc} pattern
  10.123 -  elimination implemented in theory @{text Efficient_Nat} (see
  10.124 -  \secref{eff_nat}) uses this interface.
  10.125 -
  10.126 -  \noindent The current setup of the preprocessor may be inspected
  10.127 -  using the @{command_def print_codeproc} command.  @{command_def
  10.128 -  code_thms} (see \secref{sec:equations}) provides a convenient
  10.129 -  mechanism to inspect the impact of a preprocessor setup on code
  10.130 -  equations.
  10.131 -*}
  10.132 -
  10.133 -
  10.134 -subsection {* Understanding code equations \label{sec:equations} *}
  10.135 -
  10.136 -text {*
  10.137 -  As told in \secref{sec:principle}, the notion of code equations is
  10.138 -  vital to code generation.  Indeed most problems which occur in
  10.139 -  practice can be resolved by an inspection of the underlying code
  10.140 -  equations.
  10.141 -
  10.142 -  It is possible to exchange the default code equations for constants
  10.143 -  by explicitly proving alternative ones:
  10.144 -*}
  10.145 -
  10.146 -lemma %quote [code]:
  10.147 -  "dequeue (AQueue xs []) =
  10.148 -     (if xs = [] then (None, AQueue [] [])
  10.149 -       else dequeue (AQueue [] (rev xs)))"
  10.150 -  "dequeue (AQueue xs (y # ys)) =
  10.151 -     (Some y, AQueue xs ys)"
  10.152 -  by (cases xs, simp_all) (cases "rev xs", simp_all)
  10.153 -
  10.154 -text {*
  10.155 -  \noindent The annotation @{text "[code]"} is an @{text attribute}
  10.156 -  which states that the given theorems should be considered as code
  10.157 -  equations for a @{text fun} statement -- the corresponding constant
  10.158 -  is determined syntactically.  The resulting code:
  10.159 -*}
  10.160 -
  10.161 -text %quotetypewriter {*
  10.162 -  @{code_stmts dequeue (consts) dequeue (Haskell)}
  10.163 -*}
  10.164 -
  10.165 -text {*
  10.166 -  \noindent You may note that the equality test @{term "xs = []"} has
  10.167 -  been replaced by the predicate @{term "List.null xs"}.  This is due
  10.168 -  to the default setup of the \qn{preprocessor}.
  10.169 -
  10.170 -  This possibility to select arbitrary code equations is the key
  10.171 -  technique for program and datatype refinement (see
  10.172 -  \secref{sec:refinement}).
  10.173 -
  10.174 -  Due to the preprocessor, there is the distinction of raw code
  10.175 -  equations (before preprocessing) and code equations (after
  10.176 -  preprocessing).
  10.177 -
  10.178 -  The first can be listed (among other data) using the @{command_def
  10.179 -  print_codesetup} command.
  10.180 -
  10.181 -  The code equations after preprocessing are already are blueprint of
  10.182 -  the generated program and can be inspected using the @{command
  10.183 -  code_thms} command:
  10.184 -*}
  10.185 -
  10.186 -code_thms %quote dequeue
  10.187 -
  10.188 -text {*
  10.189 -  \noindent This prints a table with the code equations for @{const
  10.190 -  dequeue}, including \emph{all} code equations those equations depend
  10.191 -  on recursively.  These dependencies themselves can be visualized using
  10.192 -  the @{command_def code_deps} command.
  10.193 -*}
  10.194 -
  10.195 -
  10.196 -subsection {* Equality *}
  10.197 -
  10.198 -text {*
  10.199 -  Implementation of equality deserves some attention.  Here an example
  10.200 -  function involving polymorphic equality:
  10.201 -*}
  10.202 -
  10.203 -primrec %quote collect_duplicates :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" where
  10.204 -  "collect_duplicates xs ys [] = xs"
  10.205 -| "collect_duplicates xs ys (z#zs) = (if z \<in> set xs
  10.206 -    then if z \<in> set ys
  10.207 -      then collect_duplicates xs ys zs
  10.208 -      else collect_duplicates xs (z#ys) zs
  10.209 -    else collect_duplicates (z#xs) (z#ys) zs)"
  10.210 -
  10.211 -text {*
  10.212 -  \noindent During preprocessing, the membership test is rewritten,
  10.213 -  resulting in @{const List.member}, which itself performs an explicit
  10.214 -  equality check, as can be seen in the corresponding @{text SML} code:
  10.215 -*}
  10.216 -
  10.217 -text %quotetypewriter {*
  10.218 -  @{code_stmts collect_duplicates (SML)}
  10.219 -*}
  10.220 -
  10.221 -text {*
  10.222 -  \noindent Obviously, polymorphic equality is implemented the Haskell
  10.223 -  way using a type class.  How is this achieved?  HOL introduces an
  10.224 -  explicit class @{class equal} with a corresponding operation @{const
  10.225 -  HOL.equal} such that @{thm equal [no_vars]}.  The preprocessing
  10.226 -  framework does the rest by propagating the @{class equal} constraints
  10.227 -  through all dependent code equations.  For datatypes, instances of
  10.228 -  @{class equal} are implicitly derived when possible.  For other types,
  10.229 -  you may instantiate @{text equal} manually like any other type class.
  10.230 -*}
  10.231 -
  10.232 -
  10.233 -subsection {* Explicit partiality \label{sec:partiality} *}
  10.234 -
  10.235 -text {*
  10.236 -  Partiality usually enters the game by partial patterns, as
  10.237 -  in the following example, again for amortised queues:
  10.238 -*}
  10.239 -
  10.240 -definition %quote strict_dequeue :: "'a queue \<Rightarrow> 'a \<times> 'a queue" where
  10.241 -  "strict_dequeue q = (case dequeue q
  10.242 -    of (Some x, q') \<Rightarrow> (x, q'))"
  10.243 -
  10.244 -lemma %quote strict_dequeue_AQueue [code]:
  10.245 -  "strict_dequeue (AQueue xs (y # ys)) = (y, AQueue xs ys)"
  10.246 -  "strict_dequeue (AQueue xs []) =
  10.247 -    (case rev xs of y # ys \<Rightarrow> (y, AQueue [] ys))"
  10.248 -  by (simp_all add: strict_dequeue_def) (cases xs, simp_all split: list.split)
  10.249 -
  10.250 -text {*
  10.251 -  \noindent In the corresponding code, there is no equation
  10.252 -  for the pattern @{term "AQueue [] []"}:
  10.253 -*}
  10.254 -
  10.255 -text %quotetypewriter {*
  10.256 -  @{code_stmts strict_dequeue (consts) strict_dequeue (Haskell)}
  10.257 -*}
  10.258 -
  10.259 -text {*
  10.260 -  \noindent In some cases it is desirable to have this
  10.261 -  pseudo-\qt{partiality} more explicitly, e.g.~as follows:
  10.262 -*}
  10.263 -
  10.264 -axiomatization %quote empty_queue :: 'a
  10.265 -
  10.266 -definition %quote strict_dequeue' :: "'a queue \<Rightarrow> 'a \<times> 'a queue" where
  10.267 -  "strict_dequeue' q = (case dequeue q of (Some x, q') \<Rightarrow> (x, q') | _ \<Rightarrow> empty_queue)"
  10.268 -
  10.269 -lemma %quote strict_dequeue'_AQueue [code]:
  10.270 -  "strict_dequeue' (AQueue xs []) = (if xs = [] then empty_queue
  10.271 -     else strict_dequeue' (AQueue [] (rev xs)))"
  10.272 -  "strict_dequeue' (AQueue xs (y # ys)) =
  10.273 -     (y, AQueue xs ys)"
  10.274 -  by (simp_all add: strict_dequeue'_def split: list.splits)
  10.275 -
  10.276 -text {*
  10.277 -  Observe that on the right hand side of the definition of @{const
  10.278 -  "strict_dequeue'"}, the unspecified constant @{const empty_queue} occurs.
  10.279 -
  10.280 -  Normally, if constants without any code equations occur in a
  10.281 -  program, the code generator complains (since in most cases this is
  10.282 -  indeed an error).  But such constants can also be thought
  10.283 -  of as function definitions which always fail,
  10.284 -  since there is never a successful pattern match on the left hand
  10.285 -  side.  In order to categorise a constant into that category
  10.286 -  explicitly, use @{command_def "code_abort"}:
  10.287 -*}
  10.288 -
  10.289 -code_abort %quote empty_queue
  10.290 -
  10.291 -text {*
  10.292 -  \noindent Then the code generator will just insert an error or
  10.293 -  exception at the appropriate position:
  10.294 -*}
  10.295 -
  10.296 -text %quotetypewriter {*
  10.297 -  @{code_stmts strict_dequeue' (consts) empty_queue strict_dequeue' (Haskell)}
  10.298 -*}
  10.299 -
  10.300 -text {*
  10.301 -  \noindent This feature however is rarely needed in practice.  Note
  10.302 -  also that the HOL default setup already declares @{const undefined}
  10.303 -  as @{command "code_abort"}, which is most likely to be used in such
  10.304 -  situations.
  10.305 -*}
  10.306 -
  10.307 -
  10.308 -subsection {* If something goes utterly wrong \label{sec:utterly_wrong} *}
  10.309 -
  10.310 -text {*
  10.311 -  Under certain circumstances, the code generator fails to produce
  10.312 -  code entirely.  To debug these, the following hints may prove
  10.313 -  helpful:
  10.314 -
  10.315 -  \begin{description}
  10.316 -
  10.317 -    \ditem{\emph{Check with a different target language}.}  Sometimes
  10.318 -      the situation gets more clear if you switch to another target
  10.319 -      language; the code generated there might give some hints what
  10.320 -      prevents the code generator to produce code for the desired
  10.321 -      language.
  10.322 -
  10.323 -    \ditem{\emph{Inspect code equations}.}  Code equations are the central
  10.324 -      carrier of code generation.  Most problems occurring while generating
  10.325 -      code can be traced to single equations which are printed as part of
  10.326 -      the error message.  A closer inspection of those may offer the key
  10.327 -      for solving issues (cf.~\secref{sec:equations}).
  10.328 -
  10.329 -    \ditem{\emph{Inspect preprocessor setup}.}  The preprocessor might
  10.330 -      transform code equations unexpectedly; to understand an
  10.331 -      inspection of its setup is necessary (cf.~\secref{sec:preproc}).
  10.332 -
  10.333 -    \ditem{\emph{Generate exceptions}.}  If the code generator
  10.334 -      complains about missing code equations, in can be helpful to
  10.335 -      implement the offending constants as exceptions
  10.336 -      (cf.~\secref{sec:partiality}); this allows at least for a formal
  10.337 -      generation of code, whose inspection may then give clues what is
  10.338 -      wrong.
  10.339 -
  10.340 -    \ditem{\emph{Remove offending code equations}.}  If code
  10.341 -      generation is prevented by just a single equation, this can be
  10.342 -      removed (cf.~\secref{sec:equations}) to allow formal code
  10.343 -      generation, whose result in turn can be used to trace the
  10.344 -      problem.  The most prominent case here are mismatches in type
  10.345 -      class signatures (\qt{wellsortedness error}).
  10.346 -
  10.347 -  \end{description}
  10.348 -*}
  10.349 -
  10.350 -end
    11.1 --- a/doc-src/Codegen/Further.thy	Tue Aug 28 18:46:15 2012 +0200
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,351 +0,0 @@
    11.4 -theory Further
    11.5 -imports Setup
    11.6 -begin
    11.7 -
    11.8 -section {* Further issues \label{sec:further} *}
    11.9 -
   11.10 -subsection {* Specialities of the @{text Scala} target language \label{sec:scala} *}
   11.11 -
   11.12 -text {*
   11.13 -  @{text Scala} deviates from languages of the ML family in a couple
   11.14 -  of aspects; those which affect code generation mainly have to do with
   11.15 -  @{text Scala}'s type system:
   11.16 -
   11.17 -  \begin{itemize}
   11.18 -
   11.19 -    \item @{text Scala} prefers tupled syntax over curried syntax.
   11.20 -
   11.21 -    \item @{text Scala} sacrifices Hindely-Milner type inference for a
   11.22 -      much more rich type system with subtyping etc.  For this reason
   11.23 -      type arguments sometimes have to be given explicitly in square
   11.24 -      brackets (mimicking System F syntax).
   11.25 -
   11.26 -    \item In contrast to @{text Haskell} where most specialities of
   11.27 -      the type system are implemented using \emph{type classes},
   11.28 -      @{text Scala} provides a sophisticated system of \emph{implicit
   11.29 -      arguments}.
   11.30 -
   11.31 -  \end{itemize}
   11.32 -
   11.33 -  \noindent Concerning currying, the @{text Scala} serializer counts
   11.34 -  arguments in code equations to determine how many arguments
   11.35 -  shall be tupled; remaining arguments and abstractions in terms
   11.36 -  rather than function definitions are always curried.
   11.37 -
   11.38 -  The second aspect affects user-defined adaptations with @{command
   11.39 -  code_const}.  For regular terms, the @{text Scala} serializer prints
   11.40 -  all type arguments explicitly.  For user-defined term adaptations
   11.41 -  this is only possible for adaptations which take no arguments: here
   11.42 -  the type arguments are just appended.  Otherwise they are ignored;
   11.43 -  hence user-defined adaptations for polymorphic constants have to be
   11.44 -  designed very carefully to avoid ambiguity.
   11.45 -
   11.46 -  Isabelle's type classes are mapped onto @{text Scala} implicits; in
   11.47 -  cases with diamonds in the subclass hierarchy this can lead to
   11.48 -  ambiguities in the generated code:
   11.49 -*}
   11.50 -
   11.51 -class %quote class1 =
   11.52 -  fixes foo :: "'a \<Rightarrow> 'a"
   11.53 -
   11.54 -class %quote class2 = class1
   11.55 -
   11.56 -class %quote class3 = class1
   11.57 -
   11.58 -text {*
   11.59 -  \noindent Here both @{class class2} and @{class class3} inherit from @{class class1},
   11.60 -  forming the upper part of a diamond.
   11.61 -*}
   11.62 -
   11.63 -definition %quote bar :: "'a :: {class2, class3} \<Rightarrow> 'a" where
   11.64 -  "bar = foo"
   11.65 -
   11.66 -text {*
   11.67 -  \noindent This yields the following code:
   11.68 -*}
   11.69 -
   11.70 -text %quotetypewriter {*
   11.71 -  @{code_stmts bar (Scala)}
   11.72 -*}
   11.73 -
   11.74 -text {*
   11.75 -  \noindent This code is rejected by the @{text Scala} compiler: in
   11.76 -  the definition of @{text bar}, it is not clear from where to derive
   11.77 -  the implicit argument for @{text foo}.
   11.78 -
   11.79 -  The solution to the problem is to close the diamond by a further
   11.80 -  class with inherits from both @{class class2} and @{class class3}:
   11.81 -*}
   11.82 -
   11.83 -class %quote class4 = class2 + class3
   11.84 -
   11.85 -text {*
   11.86 -  \noindent Then the offending code equation can be restricted to
   11.87 -  @{class class4}:
   11.88 -*}
   11.89 -
   11.90 -lemma %quote [code]:
   11.91 -  "(bar :: 'a::class4 \<Rightarrow> 'a) = foo"
   11.92 -  by (simp only: bar_def)
   11.93 -
   11.94 -text {*
   11.95 -  \noindent with the following code:
   11.96 -*}
   11.97 -
   11.98 -text %quotetypewriter {*
   11.99 -  @{code_stmts bar (Scala)}
  11.100 -*}
  11.101 -
  11.102 -text {*
  11.103 -  \noindent which exposes no ambiguity.
  11.104 -
  11.105 -  Since the preprocessor (cf.~\secref{sec:preproc}) propagates sort
  11.106 -  constraints through a system of code equations, it is usually not
  11.107 -  very difficult to identify the set of code equations which actually
  11.108 -  needs more restricted sort constraints.
  11.109 -*}
  11.110 -
  11.111 -subsection {* Modules namespace *}
  11.112 -
  11.113 -text {*
  11.114 -  When invoking the @{command export_code} command it is possible to
  11.115 -  leave out the @{keyword "module_name"} part; then code is
  11.116 -  distributed over different modules, where the module name space
  11.117 -  roughly is induced by the Isabelle theory name space.
  11.118 -
  11.119 -  Then sometimes the awkward situation occurs that dependencies
  11.120 -  between definitions introduce cyclic dependencies between modules,
  11.121 -  which in the @{text Haskell} world leaves you to the mercy of the
  11.122 -  @{text Haskell} implementation you are using, while for @{text
  11.123 -  SML}/@{text OCaml} code generation is not possible.
  11.124 -
  11.125 -  A solution is to declare module names explicitly.  Let use assume
  11.126 -  the three cyclically dependent modules are named \emph{A}, \emph{B}
  11.127 -  and \emph{C}.  Then, by stating
  11.128 -*}
  11.129 -
  11.130 -code_modulename %quote SML
  11.131 -  A ABC
  11.132 -  B ABC
  11.133 -  C ABC
  11.134 -
  11.135 -text {*
  11.136 -  \noindent we explicitly map all those modules on \emph{ABC},
  11.137 -  resulting in an ad-hoc merge of this three modules at serialisation
  11.138 -  time.
  11.139 -*}
  11.140 -
  11.141 -subsection {* Locales and interpretation *}
  11.142 -
  11.143 -text {*
  11.144 -  A technical issue comes to surface when generating code from
  11.145 -  specifications stemming from locale interpretation.
  11.146 -
  11.147 -  Let us assume a locale specifying a power operation on arbitrary
  11.148 -  types:
  11.149 -*}
  11.150 -
  11.151 -locale %quote power =
  11.152 -  fixes power :: "'a \<Rightarrow> 'b \<Rightarrow> 'b"
  11.153 -  assumes power_commute: "power x \<circ> power y = power y \<circ> power x"
  11.154 -begin
  11.155 -
  11.156 -text {*
  11.157 -  \noindent Inside that locale we can lift @{text power} to exponent
  11.158 -  lists by means of specification relative to that locale:
  11.159 -*}
  11.160 -
  11.161 -primrec %quote powers :: "'a list \<Rightarrow> 'b \<Rightarrow> 'b" where
  11.162 -  "powers [] = id"
  11.163 -| "powers (x # xs) = power x \<circ> powers xs"
  11.164 -
  11.165 -lemma %quote powers_append:
  11.166 -  "powers (xs @ ys) = powers xs \<circ> powers ys"
  11.167 -  by (induct xs) simp_all
  11.168 -
  11.169 -lemma %quote powers_power:
  11.170 -  "powers xs \<circ> power x = power x \<circ> powers xs"
  11.171 -  by (induct xs)
  11.172 -    (simp_all del: o_apply id_apply add: o_assoc [symmetric],
  11.173 -      simp del: o_apply add: o_assoc power_commute)
  11.174 -
  11.175 -lemma %quote powers_rev:
  11.176 -  "powers (rev xs) = powers xs"
  11.177 -    by (induct xs) (simp_all add: powers_append powers_power)
  11.178 -
  11.179 -end %quote
  11.180 -
  11.181 -text {*
  11.182 -  After an interpretation of this locale (say, @{command_def
  11.183 -  interpretation} @{text "fun_power:"} @{term [source] "power (\<lambda>n (f
  11.184 -  :: 'a \<Rightarrow> 'a). f ^^ n)"}), one would expect to have a constant @{text
  11.185 -  "fun_power.powers :: nat list \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a"} for which code
  11.186 -  can be generated.  But this not the case: internally, the term
  11.187 -  @{text "fun_power.powers"} is an abbreviation for the foundational
  11.188 -  term @{term [source] "power.powers (\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n)"}
  11.189 -  (see \cite{isabelle-locale} for the details behind).
  11.190 -
  11.191 -  Fortunately, with minor effort the desired behaviour can be
  11.192 -  achieved.  First, a dedicated definition of the constant on which
  11.193 -  the local @{text "powers"} after interpretation is supposed to be
  11.194 -  mapped on:
  11.195 -*}
  11.196 -
  11.197 -definition %quote funpows :: "nat list \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a" where
  11.198 -  [code del]: "funpows = power.powers (\<lambda>n f. f ^^ n)"
  11.199 -
  11.200 -text {*
  11.201 -  \noindent In general, the pattern is @{text "c = t"} where @{text c}
  11.202 -  is the name of the future constant and @{text t} the foundational
  11.203 -  term corresponding to the local constant after interpretation.
  11.204 -
  11.205 -  The interpretation itself is enriched with an equation @{text "t = c"}:
  11.206 -*}
  11.207 -
  11.208 -interpretation %quote fun_power: power "\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n" where
  11.209 -  "power.powers (\<lambda>n f. f ^^ n) = funpows"
  11.210 -  by unfold_locales
  11.211 -    (simp_all add: fun_eq_iff funpow_mult mult_commute funpows_def)
  11.212 -
  11.213 -text {*
  11.214 -  \noindent This additional equation is trivially proved by the
  11.215 -  definition itself.
  11.216 -
  11.217 -  After this setup procedure, code generation can continue as usual:
  11.218 -*}
  11.219 -
  11.220 -text %quotetypewriter {*
  11.221 -  @{code_stmts funpows (consts) Nat.funpow funpows (Haskell)}
  11.222 -*}
  11.223 -
  11.224 -
  11.225 -subsection {* Imperative data structures *}
  11.226 -
  11.227 -text {*
  11.228 -  If you consider imperative data structures as inevitable for a
  11.229 -  specific application, you should consider \emph{Imperative
  11.230 -  Functional Programming with Isabelle/HOL}
  11.231 -  \cite{bulwahn-et-al:2008:imperative}; the framework described there
  11.232 -  is available in session @{text Imperative_HOL}, together with a
  11.233 -  short primer document.
  11.234 -*}
  11.235 -
  11.236 -
  11.237 -subsection {* ML system interfaces \label{sec:ml} *}
  11.238 -
  11.239 -text {*
  11.240 -  Since the code generator framework not only aims to provide a nice
  11.241 -  Isar interface but also to form a base for code-generation-based
  11.242 -  applications, here a short description of the most fundamental ML
  11.243 -  interfaces.
  11.244 -*}
  11.245 -
  11.246 -subsubsection {* Managing executable content *}
  11.247 -
  11.248 -text %mlref {*
  11.249 -  \begin{mldecls}
  11.250 -  @{index_ML Code.read_const: "theory -> string -> string"} \\
  11.251 -  @{index_ML Code.add_eqn: "thm -> theory -> theory"} \\
  11.252 -  @{index_ML Code.del_eqn: "thm -> theory -> theory"} \\
  11.253 -  @{index_ML Code_Preproc.map_pre: "(simpset -> simpset) -> theory -> theory"} \\
  11.254 -  @{index_ML Code_Preproc.map_post: "(simpset -> simpset) -> theory -> theory"} \\
  11.255 -  @{index_ML Code_Preproc.add_functrans: "
  11.256 -    string * (theory -> (thm * bool) list -> (thm * bool) list option)
  11.257 -      -> theory -> theory"} \\
  11.258 -  @{index_ML Code_Preproc.del_functrans: "string -> theory -> theory"} \\
  11.259 -  @{index_ML Code.add_datatype: "(string * typ) list -> theory -> theory"} \\
  11.260 -  @{index_ML Code.get_type: "theory -> string
  11.261 -    -> ((string * sort) list * (string * ((string * sort) list * typ list)) list) * bool"} \\
  11.262 -  @{index_ML Code.get_type_of_constr_or_abstr: "theory -> string -> (string * bool) option"}
  11.263 -  \end{mldecls}
  11.264 -
  11.265 -  \begin{description}
  11.266 -
  11.267 -  \item @{ML Code.read_const}~@{text thy}~@{text s}
  11.268 -     reads a constant as a concrete term expression @{text s}.
  11.269 -
  11.270 -  \item @{ML Code.add_eqn}~@{text "thm"}~@{text "thy"} adds function
  11.271 -     theorem @{text "thm"} to executable content.
  11.272 -
  11.273 -  \item @{ML Code.del_eqn}~@{text "thm"}~@{text "thy"} removes function
  11.274 -     theorem @{text "thm"} from executable content, if present.
  11.275 -
  11.276 -  \item @{ML Code_Preproc.map_pre}~@{text "f"}~@{text "thy"} changes
  11.277 -     the preprocessor simpset.
  11.278 -
  11.279 -  \item @{ML Code_Preproc.add_functrans}~@{text "(name, f)"}~@{text "thy"} adds
  11.280 -     function transformer @{text f} (named @{text name}) to executable content;
  11.281 -     @{text f} is a transformer of the code equations belonging
  11.282 -     to a certain function definition, depending on the
  11.283 -     current theory context.  Returning @{text NONE} indicates that no
  11.284 -     transformation took place;  otherwise, the whole process will be iterated
  11.285 -     with the new code equations.
  11.286 -
  11.287 -  \item @{ML Code_Preproc.del_functrans}~@{text "name"}~@{text "thy"} removes
  11.288 -     function transformer named @{text name} from executable content.
  11.289 -
  11.290 -  \item @{ML Code.add_datatype}~@{text cs}~@{text thy} adds
  11.291 -     a datatype to executable content, with generation
  11.292 -     set @{text cs}.
  11.293 -
  11.294 -  \item @{ML Code.get_type_of_constr_or_abstr}~@{text "thy"}~@{text "const"}
  11.295 -     returns type constructor corresponding to
  11.296 -     constructor @{text const}; returns @{text NONE}
  11.297 -     if @{text const} is no constructor.
  11.298 -
  11.299 -  \end{description}
  11.300 -*}
  11.301 -
  11.302 -
  11.303 -subsubsection {* Data depending on the theory's executable content *}
  11.304 -
  11.305 -text {*
  11.306 -  Implementing code generator applications on top of the framework set
  11.307 -  out so far usually not only involves using those primitive
  11.308 -  interfaces but also storing code-dependent data and various other
  11.309 -  things.
  11.310 -
  11.311 -  Due to incrementality of code generation, changes in the theory's
  11.312 -  executable content have to be propagated in a certain fashion.
  11.313 -  Additionally, such changes may occur not only during theory
  11.314 -  extension but also during theory merge, which is a little bit nasty
  11.315 -  from an implementation point of view.  The framework provides a
  11.316 -  solution to this technical challenge by providing a functorial data
  11.317 -  slot @{ML_functor Code_Data}; on instantiation of this functor, the
  11.318 -  following types and operations are required:
  11.319 -
  11.320 -  \medskip
  11.321 -  \begin{tabular}{l}
  11.322 -  @{text "type T"} \\
  11.323 -  @{text "val empty: T"} \\
  11.324 -  \end{tabular}
  11.325 -
  11.326 -  \begin{description}
  11.327 -
  11.328 -  \item @{text T} the type of data to store.
  11.329 -
  11.330 -  \item @{text empty} initial (empty) data.
  11.331 -
  11.332 -  \end{description}
  11.333 -
  11.334 -  \noindent An instance of @{ML_functor Code_Data} provides the
  11.335 -  following interface:
  11.336 -
  11.337 -  \medskip
  11.338 -  \begin{tabular}{l}
  11.339 -  @{text "change: theory \<rightarrow> (T \<rightarrow> T) \<rightarrow> T"} \\
  11.340 -  @{text "change_yield: theory \<rightarrow> (T \<rightarrow> 'a * T) \<rightarrow> 'a * T"}
  11.341 -  \end{tabular}
  11.342 -
  11.343 -  \begin{description}
  11.344 -
  11.345 -  \item @{text change} update of current data (cached!) by giving a
  11.346 -    continuation.
  11.347 -
  11.348 -  \item @{text change_yield} update with side result.
  11.349 -
  11.350 -  \end{description}
  11.351 -*}
  11.352 -
  11.353 -end
  11.354 -
    12.1 --- a/doc-src/Codegen/Inductive_Predicate.thy	Tue Aug 28 18:46:15 2012 +0200
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,275 +0,0 @@
    12.4 -theory Inductive_Predicate
    12.5 -imports Setup
    12.6 -begin
    12.7 -
    12.8 -(*<*)
    12.9 -hide_const %invisible append
   12.10 -
   12.11 -inductive %invisible append where
   12.12 -  "append [] ys ys"
   12.13 -| "append xs ys zs \<Longrightarrow> append (x # xs) ys (x # zs)"
   12.14 -
   12.15 -lemma %invisible append: "append xs ys zs = (xs @ ys = zs)"
   12.16 -  by (induct xs arbitrary: ys zs) (auto elim: append.cases intro: append.intros)
   12.17 -
   12.18 -lemmas lexordp_def = 
   12.19 -  lexordp_def [unfolded lexord_def mem_Collect_eq split]
   12.20 -(*>*)
   12.21 -
   12.22 -section {* Inductive Predicates \label{sec:inductive} *}
   12.23 -
   12.24 -text {*
   12.25 -  The @{text "predicate compiler"} is an extension of the code generator
   12.26 -  which turns inductive specifications into equational ones, from
   12.27 -  which in turn executable code can be generated.  The mechanisms of
   12.28 -  this compiler are described in detail in
   12.29 -  \cite{Berghofer-Bulwahn-Haftmann:2009:TPHOL}.
   12.30 -
   12.31 -  Consider the simple predicate @{const append} given by these two
   12.32 -  introduction rules:
   12.33 -*}
   12.34 -
   12.35 -text %quote {*
   12.36 -  @{thm append.intros(1)[of ys]} \\
   12.37 -  @{thm append.intros(2)[of xs ys zs x]}
   12.38 -*}
   12.39 -
   12.40 -text {*
   12.41 -  \noindent To invoke the compiler, simply use @{command_def "code_pred"}:
   12.42 -*}
   12.43 -
   12.44 -code_pred %quote append .
   12.45 -
   12.46 -text {*
   12.47 -  \noindent The @{command "code_pred"} command takes the name of the
   12.48 -  inductive predicate and then you put a period to discharge a trivial
   12.49 -  correctness proof.  The compiler infers possible modes for the
   12.50 -  predicate and produces the derived code equations.  Modes annotate
   12.51 -  which (parts of the) arguments are to be taken as input, and which
   12.52 -  output. Modes are similar to types, but use the notation @{text "i"}
   12.53 -  for input and @{text "o"} for output.
   12.54 - 
   12.55 -  For @{term "append"}, the compiler can infer the following modes:
   12.56 -  \begin{itemize}
   12.57 -    \item @{text "i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool"}
   12.58 -    \item @{text "i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool"}
   12.59 -    \item @{text "o \<Rightarrow> o \<Rightarrow> i \<Rightarrow> bool"}
   12.60 -  \end{itemize}
   12.61 -  You can compute sets of predicates using @{command_def "values"}:
   12.62 -*}
   12.63 -
   12.64 -values %quote "{zs. append [(1::nat),2,3] [4,5] zs}"
   12.65 -
   12.66 -text {* \noindent outputs @{text "{[1, 2, 3, 4, 5]}"}, and *}
   12.67 -
   12.68 -values %quote "{(xs, ys). append xs ys [(2::nat),3]}"
   12.69 -
   12.70 -text {* \noindent outputs @{text "{([], [2, 3]), ([2], [3]), ([2, 3], [])}"}. *}
   12.71 -
   12.72 -text {*
   12.73 -  \noindent If you are only interested in the first elements of the
   12.74 -  set comprehension (with respect to a depth-first search on the
   12.75 -  introduction rules), you can pass an argument to @{command "values"}
   12.76 -  to specify the number of elements you want:
   12.77 -*}
   12.78 -
   12.79 -values %quote 1 "{(xs, ys). append xs ys [(1::nat), 2, 3, 4]}"
   12.80 -values %quote 3 "{(xs, ys). append xs ys [(1::nat), 2, 3, 4]}"
   12.81 -
   12.82 -text {*
   12.83 -  \noindent The @{command "values"} command can only compute set
   12.84 -  comprehensions for which a mode has been inferred.
   12.85 -
   12.86 -  The code equations for a predicate are made available as theorems with
   12.87 -  the suffix @{text "equation"}, and can be inspected with:
   12.88 -*}
   12.89 -
   12.90 -thm %quote append.equation
   12.91 -
   12.92 -text {*
   12.93 -  \noindent More advanced options are described in the following subsections.
   12.94 -*}
   12.95 -
   12.96 -subsection {* Alternative names for functions *}
   12.97 -
   12.98 -text {* 
   12.99 -  By default, the functions generated from a predicate are named after
  12.100 -  the predicate with the mode mangled into the name (e.g., @{text
  12.101 -  "append_i_i_o"}).  You can specify your own names as follows:
  12.102 -*}
  12.103 -
  12.104 -code_pred %quote (modes: i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool as concat,
  12.105 -  o \<Rightarrow> o \<Rightarrow> i \<Rightarrow> bool as split,
  12.106 -  i \<Rightarrow> o \<Rightarrow> i \<Rightarrow> bool as suffix) append .
  12.107 -
  12.108 -subsection {* Alternative introduction rules *}
  12.109 -
  12.110 -text {*
  12.111 -  Sometimes the introduction rules of an predicate are not executable
  12.112 -  because they contain non-executable constants or specific modes
  12.113 -  could not be inferred.  It is also possible that the introduction
  12.114 -  rules yield a function that loops forever due to the execution in a
  12.115 -  depth-first search manner.  Therefore, you can declare alternative
  12.116 -  introduction rules for predicates with the attribute @{attribute
  12.117 -  "code_pred_intro"}.  For example, the transitive closure is defined
  12.118 -  by:
  12.119 -*}
  12.120 -
  12.121 -text %quote {*
  12.122 -  @{lemma [source] "r a b \<Longrightarrow> tranclp r a b" by (fact tranclp.intros(1))}\\
  12.123 -  @{lemma [source] "tranclp r a b \<Longrightarrow> r b c \<Longrightarrow> tranclp r a c" by (fact tranclp.intros(2))}
  12.124 -*}
  12.125 -
  12.126 -text {*
  12.127 -  \noindent These rules do not suit well for executing the transitive
  12.128 -  closure with the mode @{text "(i \<Rightarrow> o \<Rightarrow> bool) \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool"}, as
  12.129 -  the second rule will cause an infinite loop in the recursive call.
  12.130 -  This can be avoided using the following alternative rules which are
  12.131 -  declared to the predicate compiler by the attribute @{attribute
  12.132 -  "code_pred_intro"}:
  12.133 -*}
  12.134 -
  12.135 -lemma %quote [code_pred_intro]:
  12.136 -  "r a b \<Longrightarrow> tranclp r a b"
  12.137 -  "r a b \<Longrightarrow> tranclp r b c \<Longrightarrow> tranclp r a c"
  12.138 -by auto
  12.139 -
  12.140 -text {*
  12.141 -  \noindent After declaring all alternative rules for the transitive
  12.142 -  closure, you invoke @{command "code_pred"} as usual.  As you have
  12.143 -  declared alternative rules for the predicate, you are urged to prove
  12.144 -  that these introduction rules are complete, i.e., that you can
  12.145 -  derive an elimination rule for the alternative rules:
  12.146 -*}
  12.147 -
  12.148 -code_pred %quote tranclp
  12.149 -proof -
  12.150 -  case tranclp
  12.151 -  from this converse_tranclpE [OF tranclp.prems] show thesis by metis
  12.152 -qed
  12.153 -
  12.154 -text {*
  12.155 -  \noindent Alternative rules can also be used for constants that have
  12.156 -  not been defined inductively. For example, the lexicographic order
  12.157 -  which is defined as:
  12.158 -*}
  12.159 -
  12.160 -text %quote {*
  12.161 -  @{thm [display] lexordp_def [of r]}
  12.162 -*}
  12.163 -
  12.164 -text {*
  12.165 -  \noindent To make it executable, you can derive the following two
  12.166 -  rules and prove the elimination rule:
  12.167 -*}
  12.168 -
  12.169 -lemma %quote [code_pred_intro]:
  12.170 -  "append xs (a # v) ys \<Longrightarrow> lexordp r xs ys"
  12.171 -(*<*)unfolding lexordp_def by (auto simp add: append)(*>*)
  12.172 -
  12.173 -lemma %quote [code_pred_intro]:
  12.174 -  "append u (a # v) xs \<Longrightarrow> append u (b # w) ys \<Longrightarrow> r a b
  12.175 -  \<Longrightarrow> lexordp r xs ys"
  12.176 -(*<*)unfolding lexordp_def append apply simp
  12.177 -apply (rule disjI2) by auto(*>*)
  12.178 -
  12.179 -code_pred %quote lexordp
  12.180 -(*<*)proof -
  12.181 -  fix r xs ys
  12.182 -  assume lexord: "lexordp r xs ys"
  12.183 -  assume 1: "\<And>r' xs' ys' a v. r = r' \<Longrightarrow> xs = xs' \<Longrightarrow> ys = ys'
  12.184 -    \<Longrightarrow> append xs' (a # v) ys' \<Longrightarrow> thesis"
  12.185 -  assume 2: "\<And>r' xs' ys' u a v b w. r = r' \<Longrightarrow> xs = xs' \<Longrightarrow> ys = ys'
  12.186 -    \<Longrightarrow> append u (a # v) xs' \<Longrightarrow> append u (b # w) ys' \<Longrightarrow> r' a b \<Longrightarrow> thesis"
  12.187 -  {
  12.188 -    assume "\<exists>a v. ys = xs @ a # v"
  12.189 -    from this 1 have thesis
  12.190 -        by (fastforce simp add: append)
  12.191 -  } moreover
  12.192 -  {
  12.193 -    assume "\<exists>u a b v w. r a b \<and> xs = u @ a # v \<and> ys = u @ b # w"
  12.194 -    from this 2 have thesis by (fastforce simp add: append)
  12.195 -  } moreover
  12.196 -  note lexord
  12.197 -  ultimately show thesis
  12.198 -    unfolding lexordp_def
  12.199 -    by fastforce
  12.200 -qed(*>*)
  12.201 -
  12.202 -
  12.203 -subsection {* Options for values *}
  12.204 -
  12.205 -text {*
  12.206 -  In the presence of higher-order predicates, multiple modes for some
  12.207 -  predicate could be inferred that are not disambiguated by the
  12.208 -  pattern of the set comprehension.  To disambiguate the modes for the
  12.209 -  arguments of a predicate, you can state the modes explicitly in the
  12.210 -  @{command "values"} command.  Consider the simple predicate @{term
  12.211 -  "succ"}:
  12.212 -*}
  12.213 -
  12.214 -inductive %quote succ :: "nat \<Rightarrow> nat \<Rightarrow> bool" where
  12.215 -  "succ 0 (Suc 0)"
  12.216 -| "succ x y \<Longrightarrow> succ (Suc x) (Suc y)"
  12.217 -
  12.218 -code_pred %quote succ .
  12.219 -
  12.220 -text {*
  12.221 -  \noindent For this, the predicate compiler can infer modes @{text "o
  12.222 -  \<Rightarrow> o \<Rightarrow> bool"}, @{text "i \<Rightarrow> o \<Rightarrow> bool"}, @{text "o \<Rightarrow> i \<Rightarrow> bool"} and
  12.223 -  @{text "i \<Rightarrow> i \<Rightarrow> bool"}.  The invocation of @{command "values"}
  12.224 -  @{text "{n. tranclp succ 10 n}"} loops, as multiple modes for the
  12.225 -  predicate @{text "succ"} are possible and here the first mode @{text
  12.226 -  "o \<Rightarrow> o \<Rightarrow> bool"} is chosen. To choose another mode for the argument,
  12.227 -  you can declare the mode for the argument between the @{command
  12.228 -  "values"} and the number of elements.
  12.229 -*}
  12.230 -
  12.231 -values %quote [mode: i \<Rightarrow> o \<Rightarrow> bool] 1 "{n. tranclp succ 10 n}" (*FIMXE does not terminate for n\<ge>1*)
  12.232 -values %quote [mode: o \<Rightarrow> i \<Rightarrow> bool] 1 "{n. tranclp succ n 10}"
  12.233 -
  12.234 -
  12.235 -subsection {* Embedding into functional code within Isabelle/HOL *}
  12.236 -
  12.237 -text {*
  12.238 -  To embed the computation of an inductive predicate into functions
  12.239 -  that are defined in Isabelle/HOL, you have a number of options:
  12.240 -
  12.241 -  \begin{itemize}
  12.242 -
  12.243 -    \item You want to use the first-order predicate with the mode
  12.244 -      where all arguments are input. Then you can use the predicate directly, e.g.
  12.245 -
  12.246 -      \begin{quote}
  12.247 -        @{text "valid_suffix ys zs = "} \\
  12.248 -        @{text "(if append [Suc 0, 2] ys zs then Some ys else None)"}
  12.249 -      \end{quote}
  12.250 -
  12.251 -    \item If you know that the execution returns only one value (it is
  12.252 -      deterministic), then you can use the combinator @{term
  12.253 -      "Predicate.the"}, e.g., a functional concatenation of lists is
  12.254 -      defined with
  12.255 -
  12.256 -      \begin{quote}
  12.257 -        @{term "functional_concat xs ys = Predicate.the (append_i_i_o xs ys)"}
  12.258 -      \end{quote}
  12.259 -
  12.260 -      Note that if the evaluation does not return a unique value, it
  12.261 -      raises a run-time error @{term "not_unique"}.
  12.262 -
  12.263 -  \end{itemize}
  12.264 -*}
  12.265 -
  12.266 -
  12.267 -subsection {* Further Examples *}
  12.268 -
  12.269 -text {*
  12.270 -  Further examples for compiling inductive predicates can be found in
  12.271 -  the @{text "HOL/ex/Predicate_Compile_ex.thy"} theory file.  There are
  12.272 -  also some examples in the Archive of Formal Proofs, notably in the
  12.273 -  @{text "POPLmark-deBruijn"} and the @{text "FeatherweightJava"}
  12.274 -  sessions.
  12.275 -*}
  12.276 -
  12.277 -end
  12.278 -
    13.1 --- a/doc-src/Codegen/Introduction.thy	Tue Aug 28 18:46:15 2012 +0200
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,242 +0,0 @@
    13.4 -theory Introduction
    13.5 -imports Setup
    13.6 -begin
    13.7 -
    13.8 -section {* Introduction *}
    13.9 -
   13.10 -text {*
   13.11 -  This tutorial introduces the code generator facilities of @{text
   13.12 -  "Isabelle/HOL"}.  It allows to turn (a certain class of) HOL
   13.13 -  specifications into corresponding executable code in the programming
   13.14 -  languages @{text SML} \cite{SML}, @{text OCaml} \cite{OCaml},
   13.15 -  @{text Haskell} \cite{haskell-revised-report} and @{text Scala}
   13.16 -  \cite{scala-overview-tech-report}.
   13.17 -
   13.18 -  To profit from this tutorial, some familiarity and experience with
   13.19 -  @{theory HOL} \cite{isa-tutorial} and its basic theories is assumed.
   13.20 -*}
   13.21 -
   13.22 -
   13.23 -subsection {* Code generation principle: shallow embedding \label{sec:principle} *}
   13.24 -
   13.25 -text {*
   13.26 -  The key concept for understanding Isabelle's code generation is
   13.27 -  \emph{shallow embedding}: logical entities like constants, types and
   13.28 -  classes are identified with corresponding entities in the target
   13.29 -  language.  In particular, the carrier of a generated program's
   13.30 -  semantics are \emph{equational theorems} from the logic.  If we view
   13.31 -  a generated program as an implementation of a higher-order rewrite
   13.32 -  system, then every rewrite step performed by the program can be
   13.33 -  simulated in the logic, which guarantees partial correctness
   13.34 -  \cite{Haftmann-Nipkow:2010:code}.
   13.35 -*}
   13.36 -
   13.37 -
   13.38 -subsection {* A quick start with the Isabelle/HOL toolbox \label{sec:queue_example} *}
   13.39 -
   13.40 -text {*
   13.41 -  In a HOL theory, the @{command_def datatype} and @{command_def
   13.42 -  definition}/@{command_def primrec}/@{command_def fun} declarations
   13.43 -  form the core of a functional programming language.  By default
   13.44 -  equational theorems stemming from those are used for generated code,
   13.45 -  therefore \qt{naive} code generation can proceed without further
   13.46 -  ado.
   13.47 -
   13.48 -  For example, here a simple \qt{implementation} of amortised queues:
   13.49 -*}
   13.50 -
   13.51 -datatype %quote 'a queue = AQueue "'a list" "'a list"
   13.52 -
   13.53 -definition %quote empty :: "'a queue" where
   13.54 -  "empty = AQueue [] []"
   13.55 -
   13.56 -primrec %quote enqueue :: "'a \<Rightarrow> 'a queue \<Rightarrow> 'a queue" where
   13.57 -  "enqueue x (AQueue xs ys) = AQueue (x # xs) ys"
   13.58 -
   13.59 -fun %quote dequeue :: "'a queue \<Rightarrow> 'a option \<times> 'a queue" where
   13.60 -    "dequeue (AQueue [] []) = (None, AQueue [] [])"
   13.61 -  | "dequeue (AQueue xs (y # ys)) = (Some y, AQueue xs ys)"
   13.62 -  | "dequeue (AQueue xs []) =
   13.63 -      (case rev xs of y # ys \<Rightarrow> (Some y, AQueue [] ys))" (*<*)
   13.64 -
   13.65 -lemma %invisible dequeue_nonempty_Nil [simp]:
   13.66 -  "xs \<noteq> [] \<Longrightarrow> dequeue (AQueue xs []) = (case rev xs of y # ys \<Rightarrow> (Some y, AQueue [] ys))"
   13.67 -  by (cases xs) (simp_all split: list.splits) (*>*)
   13.68 -
   13.69 -text {* \noindent Then we can generate code e.g.~for @{text SML} as follows: *}
   13.70 -
   13.71 -export_code %quote empty dequeue enqueue in SML
   13.72 -  module_name Example file "examples/example.ML"
   13.73 -
   13.74 -text {* \noindent resulting in the following code: *}
   13.75 -
   13.76 -text %quotetypewriter {*
   13.77 -  @{code_stmts empty enqueue dequeue (SML)}
   13.78 -*}
   13.79 -
   13.80 -text {*
   13.81 -  \noindent The @{command_def export_code} command takes a
   13.82 -  space-separated list of constants for which code shall be generated;
   13.83 -  anything else needed for those is added implicitly.  Then follows a
   13.84 -  target language identifier and a freely chosen module name.  A file
   13.85 -  name denotes the destination to store the generated code.  Note that
   13.86 -  the semantics of the destination depends on the target language: for
   13.87 -  @{text SML}, @{text OCaml} and @{text Scala} it denotes a \emph{file},
   13.88 -  for @{text Haskell} it denotes a \emph{directory} where a file named as the
   13.89 -  module name (with extension @{text ".hs"}) is written:
   13.90 -*}
   13.91 -
   13.92 -export_code %quote empty dequeue enqueue in Haskell
   13.93 -  module_name Example file "examples/"
   13.94 -
   13.95 -text {*
   13.96 -  \noindent This is the corresponding code:
   13.97 -*}
   13.98 -
   13.99 -text %quotetypewriter {*
  13.100 -  @{code_stmts empty enqueue dequeue (Haskell)}
  13.101 -*}
  13.102 -
  13.103 -text {*
  13.104 -  \noindent For more details about @{command export_code} see
  13.105 -  \secref{sec:further}.
  13.106 -*}
  13.107 -
  13.108 -
  13.109 -subsection {* Type classes *}
  13.110 -
  13.111 -text {*
  13.112 -  Code can also be generated from type classes in a Haskell-like
  13.113 -  manner.  For illustration here an example from abstract algebra:
  13.114 -*}
  13.115 -
  13.116 -class %quote semigroup =
  13.117 -  fixes mult :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "\<otimes>" 70)
  13.118 -  assumes assoc: "(x \<otimes> y) \<otimes> z = x \<otimes> (y \<otimes> z)"
  13.119 -
  13.120 -class %quote monoid = semigroup +
  13.121 -  fixes neutral :: 'a ("\<one>")
  13.122 -  assumes neutl: "\<one> \<otimes> x = x"
  13.123 -    and neutr: "x \<otimes> \<one> = x"
  13.124 -
  13.125 -instantiation %quote nat :: monoid
  13.126 -begin
  13.127 -
  13.128 -primrec %quote mult_nat where
  13.129 -    "0 \<otimes> n = (0\<Colon>nat)"
  13.130 -  | "Suc m \<otimes> n = n + m \<otimes> n"
  13.131 -
  13.132 -definition %quote neutral_nat where
  13.133 -  "\<one> = Suc 0"
  13.134 -
  13.135 -lemma %quote add_mult_distrib:
  13.136 -  fixes n m q :: nat
  13.137 -  shows "(n + m) \<otimes> q = n \<otimes> q + m \<otimes> q"
  13.138 -  by (induct n) simp_all
  13.139 -
  13.140 -instance %quote proof
  13.141 -  fix m n q :: nat
  13.142 -  show "m \<otimes> n \<otimes> q = m \<otimes> (n \<otimes> q)"
  13.143 -    by (induct m) (simp_all add: add_mult_distrib)
  13.144 -  show "\<one> \<otimes> n = n"
  13.145 -    by (simp add: neutral_nat_def)
  13.146 -  show "m \<otimes> \<one> = m"
  13.147 -    by (induct m) (simp_all add: neutral_nat_def)
  13.148 -qed
  13.149 -
  13.150 -end %quote
  13.151 -
  13.152 -text {*
  13.153 -  \noindent We define the natural operation of the natural numbers
  13.154 -  on monoids:
  13.155 -*}
  13.156 -
  13.157 -primrec %quote (in monoid) pow :: "nat \<Rightarrow> 'a \<Rightarrow> 'a" where
  13.158 -    "pow 0 a = \<one>"
  13.159 -  | "pow (Suc n) a = a \<otimes> pow n a"
  13.160 -
  13.161 -text {*
  13.162 -  \noindent This we use to define the discrete exponentiation
  13.163 -  function:
  13.164 -*}
  13.165 -
  13.166 -definition %quote bexp :: "nat \<Rightarrow> nat" where
  13.167 -  "bexp n = pow n (Suc (Suc 0))"
  13.168 -
  13.169 -text {*
  13.170 -  \noindent The corresponding code in Haskell uses that language's
  13.171 -  native classes:
  13.172 -*}
  13.173 -
  13.174 -text %quotetypewriter {*
  13.175 -  @{code_stmts bexp (Haskell)}
  13.176 -*}
  13.177 -
  13.178 -text {*
  13.179 -  \noindent This is a convenient place to show how explicit dictionary
  13.180 -  construction manifests in generated code -- the same example in
  13.181 -  @{text SML}:
  13.182 -*}
  13.183 -
  13.184 -text %quotetypewriter {*
  13.185 -  @{code_stmts bexp (SML)}
  13.186 -*}
  13.187 -
  13.188 -text {*
  13.189 -  \noindent Note the parameters with trailing underscore (@{verbatim
  13.190 -  "A_"}), which are the dictionary parameters.
  13.191 -*}
  13.192 -
  13.193 -
  13.194 -subsection {* How to continue from here *}
  13.195 -
  13.196 -text {*
  13.197 -  What you have seen so far should be already enough in a lot of
  13.198 -  cases.  If you are content with this, you can quit reading here.
  13.199 -
  13.200 -  Anyway, to understand situations where problems occur or to increase
  13.201 -  the scope of code generation beyond default, it is necessary to gain
  13.202 -  some understanding how the code generator actually works:
  13.203 -
  13.204 -  \begin{itemize}
  13.205 -
  13.206 -    \item The foundations of the code generator are described in
  13.207 -      \secref{sec:foundations}.
  13.208 -
  13.209 -    \item In particular \secref{sec:utterly_wrong} gives hints how to
  13.210 -      debug situations where code generation does not succeed as
  13.211 -      expected.
  13.212 -
  13.213 -    \item The scope and quality of generated code can be increased
  13.214 -      dramatically by applying refinement techniques, which are
  13.215 -      introduced in \secref{sec:refinement}.
  13.216 -
  13.217 -    \item Inductive predicates can be turned executable using an
  13.218 -      extension of the code generator \secref{sec:inductive}.
  13.219 -
  13.220 -    \item If you want to utilize code generation to obtain fast
  13.221 -      evaluators e.g.~for decision procedures, have a look at
  13.222 -      \secref{sec:evaluation}.
  13.223 -
  13.224 -    \item You may want to skim over the more technical sections
  13.225 -      \secref{sec:adaptation} and \secref{sec:further}.
  13.226 -
  13.227 -    \item The target language Scala \cite{scala-overview-tech-report}
  13.228 -      comes with some specialities discussed in \secref{sec:scala}.
  13.229 -
  13.230 -    \item For exhaustive syntax diagrams etc. you should visit the
  13.231 -      Isabelle/Isar Reference Manual \cite{isabelle-isar-ref}.
  13.232 -
  13.233 -  \end{itemize}
  13.234 -
  13.235 -  \bigskip
  13.236 -
  13.237 -  \begin{center}\fbox{\fbox{\begin{minipage}{8cm}
  13.238 -
  13.239 -    \begin{center}\textit{Happy proving, happy hacking!}\end{center}
  13.240 -
  13.241 -  \end{minipage}}}\end{center}
  13.242 -*}
  13.243 -
  13.244 -end
  13.245 -
    14.1 --- a/doc-src/Codegen/Refinement.thy	Tue Aug 28 18:46:15 2012 +0200
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,274 +0,0 @@
    14.4 -theory Refinement
    14.5 -imports Setup
    14.6 -begin
    14.7 -
    14.8 -section {* Program and datatype refinement \label{sec:refinement} *}
    14.9 -
   14.10 -text {*
   14.11 -  Code generation by shallow embedding (cf.~\secref{sec:principle})
   14.12 -  allows to choose code equations and datatype constructors freely,
   14.13 -  given that some very basic syntactic properties are met; this
   14.14 -  flexibility opens up mechanisms for refinement which allow to extend
   14.15 -  the scope and quality of generated code dramatically.
   14.16 -*}
   14.17 -
   14.18 -
   14.19 -subsection {* Program refinement *}
   14.20 -
   14.21 -text {*
   14.22 -  Program refinement works by choosing appropriate code equations
   14.23 -  explicitly (cf.~\secref{sec:equations}); as example, we use Fibonacci
   14.24 -  numbers:
   14.25 -*}
   14.26 -
   14.27 -fun %quote fib :: "nat \<Rightarrow> nat" where
   14.28 -    "fib 0 = 0"
   14.29 -  | "fib (Suc 0) = Suc 0"
   14.30 -  | "fib (Suc (Suc n)) = fib n + fib (Suc n)"
   14.31 -
   14.32 -text {*
   14.33 -  \noindent The runtime of the corresponding code grows exponential due
   14.34 -  to two recursive calls:
   14.35 -*}
   14.36 -
   14.37 -text %quotetypewriter {*
   14.38 -  @{code_stmts fib (consts) fib (Haskell)}
   14.39 -*}
   14.40 -
   14.41 -text {*
   14.42 -  \noindent A more efficient implementation would use dynamic
   14.43 -  programming, e.g.~sharing of common intermediate results between
   14.44 -  recursive calls.  This idea is expressed by an auxiliary operation
   14.45 -  which computes a Fibonacci number and its successor simultaneously:
   14.46 -*}
   14.47 -
   14.48 -definition %quote fib_step :: "nat \<Rightarrow> nat \<times> nat" where
   14.49 -  "fib_step n = (fib (Suc n), fib n)"
   14.50 -
   14.51 -text {*
   14.52 -  \noindent This operation can be implemented by recursion using
   14.53 -  dynamic programming:
   14.54 -*}
   14.55 -
   14.56 -lemma %quote [code]:
   14.57 -  "fib_step 0 = (Suc 0, 0)"
   14.58 -  "fib_step (Suc n) = (let (m, q) = fib_step n in (m + q, m))"
   14.59 -  by (simp_all add: fib_step_def)
   14.60 -
   14.61 -text {*
   14.62 -  \noindent What remains is to implement @{const fib} by @{const
   14.63 -  fib_step} as follows:
   14.64 -*}
   14.65 -
   14.66 -lemma %quote [code]:
   14.67 -  "fib 0 = 0"
   14.68 -  "fib (Suc n) = fst (fib_step n)"
   14.69 -  by (simp_all add: fib_step_def)
   14.70 -
   14.71 -text {*
   14.72 -  \noindent The resulting code shows only linear growth of runtime:
   14.73 -*}
   14.74 -
   14.75 -text %quotetypewriter {*
   14.76 -  @{code_stmts fib (consts) fib fib_step (Haskell)}
   14.77 -*}
   14.78 -
   14.79 -
   14.80 -subsection {* Datatype refinement *}
   14.81 -
   14.82 -text {*
   14.83 -  Selecting specific code equations \emph{and} datatype constructors
   14.84 -  leads to datatype refinement.  As an example, we will develop an
   14.85 -  alternative representation of the queue example given in
   14.86 -  \secref{sec:queue_example}.  The amortised representation is
   14.87 -  convenient for generating code but exposes its \qt{implementation}
   14.88 -  details, which may be cumbersome when proving theorems about it.
   14.89 -  Therefore, here is a simple, straightforward representation of
   14.90 -  queues:
   14.91 -*}
   14.92 -
   14.93 -datatype %quote 'a queue = Queue "'a list"
   14.94 -
   14.95 -definition %quote empty :: "'a queue" where
   14.96 -  "empty = Queue []"
   14.97 -
   14.98 -primrec %quote enqueue :: "'a \<Rightarrow> 'a queue \<Rightarrow> 'a queue" where
   14.99 -  "enqueue x (Queue xs) = Queue (xs @ [x])"
  14.100 -
  14.101 -fun %quote dequeue :: "'a queue \<Rightarrow> 'a option \<times> 'a queue" where
  14.102 -    "dequeue (Queue []) = (None, Queue [])"
  14.103 -  | "dequeue (Queue (x # xs)) = (Some x, Queue xs)"
  14.104 -
  14.105 -text {*
  14.106 -  \noindent This we can use directly for proving;  for executing,
  14.107 -  we provide an alternative characterisation:
  14.108 -*}
  14.109 -
  14.110 -definition %quote AQueue :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a queue" where
  14.111 -  "AQueue xs ys = Queue (ys @ rev xs)"
  14.112 -
  14.113 -code_datatype %quote AQueue
  14.114 -
  14.115 -text {*
  14.116 -  \noindent Here we define a \qt{constructor} @{const "AQueue"} which
  14.117 -  is defined in terms of @{text "Queue"} and interprets its arguments
  14.118 -  according to what the \emph{content} of an amortised queue is supposed
  14.119 -  to be.
  14.120 -
  14.121 -  The prerequisite for datatype constructors is only syntactical: a
  14.122 -  constructor must be of type @{text "\<tau> = \<dots> \<Rightarrow> \<kappa> \<alpha>\<^isub>1 \<dots> \<alpha>\<^isub>n"} where @{text
  14.123 -  "{\<alpha>\<^isub>1, \<dots>, \<alpha>\<^isub>n}"} is exactly the set of \emph{all} type variables in
  14.124 -  @{text "\<tau>"}; then @{text "\<kappa>"} is its corresponding datatype.  The
  14.125 -  HOL datatype package by default registers any new datatype with its
  14.126 -  constructors, but this may be changed using @{command_def
  14.127 -  code_datatype}; the currently chosen constructors can be inspected
  14.128 -  using the @{command print_codesetup} command.
  14.129 -
  14.130 -  Equipped with this, we are able to prove the following equations
  14.131 -  for our primitive queue operations which \qt{implement} the simple
  14.132 -  queues in an amortised fashion:
  14.133 -*}
  14.134 -
  14.135 -lemma %quote empty_AQueue [code]:
  14.136 -  "empty = AQueue [] []"
  14.137 -  by (simp add: AQueue_def empty_def)
  14.138 -
  14.139 -lemma %quote enqueue_AQueue [code]:
  14.140 -  "enqueue x (AQueue xs ys) = AQueue (x # xs) ys"
  14.141 -  by (simp add: AQueue_def)
  14.142 -
  14.143 -lemma %quote dequeue_AQueue [code]:
  14.144 -  "dequeue (AQueue xs []) =
  14.145 -    (if xs = [] then (None, AQueue [] [])
  14.146 -    else dequeue (AQueue [] (rev xs)))"
  14.147 -  "dequeue (AQueue xs (y # ys)) = (Some y, AQueue xs ys)"
  14.148 -  by (simp_all add: AQueue_def)
  14.149 -
  14.150 -text {*
  14.151 -  \noindent It is good style, although no absolute requirement, to
  14.152 -  provide code equations for the original artefacts of the implemented
  14.153 -  type, if possible; in our case, these are the datatype constructor
  14.154 -  @{const Queue} and the case combinator @{const queue_case}:
  14.155 -*}
  14.156 -
  14.157 -lemma %quote Queue_AQueue [code]:
  14.158 -  "Queue = AQueue []"
  14.159 -  by (simp add: AQueue_def fun_eq_iff)
  14.160 -
  14.161 -lemma %quote queue_case_AQueue [code]:
  14.162 -  "queue_case f (AQueue xs ys) = f (ys @ rev xs)"
  14.163 -  by (simp add: AQueue_def)
  14.164 -
  14.165 -text {*
  14.166 -  \noindent The resulting code looks as expected:
  14.167 -*}
  14.168 -
  14.169 -text %quotetypewriter {*
  14.170 -  @{code_stmts empty enqueue dequeue Queue queue_case (SML)}
  14.171 -*}
  14.172 -
  14.173 -text {*
  14.174 -  The same techniques can also be applied to types which are not
  14.175 -  specified as datatypes, e.g.~type @{typ int} is originally specified
  14.176 -  as quotient type by means of @{command_def typedef}, but for code
  14.177 -  generation constants allowing construction of binary numeral values
  14.178 -  are used as constructors for @{typ int}.
  14.179 -
  14.180 -  This approach however fails if the representation of a type demands
  14.181 -  invariants; this issue is discussed in the next section.
  14.182 -*}
  14.183 -
  14.184 -
  14.185 -subsection {* Datatype refinement involving invariants \label{sec:invariant} *}
  14.186 -
  14.187 -text {*
  14.188 -  Datatype representation involving invariants require a dedicated
  14.189 -  setup for the type and its primitive operations.  As a running
  14.190 -  example, we implement a type @{text "'a dlist"} of list consisting
  14.191 -  of distinct elements.
  14.192 -
  14.193 -  The first step is to decide on which representation the abstract
  14.194 -  type (in our example @{text "'a dlist"}) should be implemented.
  14.195 -  Here we choose @{text "'a list"}.  Then a conversion from the concrete
  14.196 -  type to the abstract type must be specified, here:
  14.197 -*}
  14.198 -
  14.199 -text %quote {*
  14.200 -  @{term_type Dlist}
  14.201 -*}
  14.202 -
  14.203 -text {*
  14.204 -  \noindent Next follows the specification of a suitable \emph{projection},
  14.205 -  i.e.~a conversion from abstract to concrete type:
  14.206 -*}
  14.207 -
  14.208 -text %quote {*
  14.209 -  @{term_type list_of_dlist}
  14.210 -*}
  14.211 -
  14.212 -text {*
  14.213 -  \noindent This projection must be specified such that the following
  14.214 -  \emph{abstract datatype certificate} can be proven:
  14.215 -*}
  14.216 -
  14.217 -lemma %quote [code abstype]:
  14.218 -  "Dlist (list_of_dlist dxs) = dxs"
  14.219 -  by (fact Dlist_list_of_dlist)
  14.220 -
  14.221 -text {*
  14.222 -  \noindent Note that so far the invariant on representations
  14.223 -  (@{term_type distinct}) has never been mentioned explicitly:
  14.224 -  the invariant is only referred to implicitly: all values in
  14.225 -  set @{term "{xs. list_of_dlist (Dlist xs) = xs}"} are invariant,
  14.226 -  and in our example this is exactly @{term "{xs. distinct xs}"}.
  14.227 -  
  14.228 -  The primitive operations on @{typ "'a dlist"} are specified
  14.229 -  indirectly using the projection @{const list_of_dlist}.  For
  14.230 -  the empty @{text "dlist"}, @{const Dlist.empty}, we finally want
  14.231 -  the code equation
  14.232 -*}
  14.233 -
  14.234 -text %quote {*
  14.235 -  @{term "Dlist.empty = Dlist []"}
  14.236 -*}
  14.237 -
  14.238 -text {*
  14.239 -  \noindent This we have to prove indirectly as follows:
  14.240 -*}
  14.241 -
  14.242 -lemma %quote [code abstract]:
  14.243 -  "list_of_dlist Dlist.empty = []"
  14.244 -  by (fact list_of_dlist_empty)
  14.245 -
  14.246 -text {*
  14.247 -  \noindent This equation logically encodes both the desired code
  14.248 -  equation and that the expression @{const Dlist} is applied to obeys
  14.249 -  the implicit invariant.  Equations for insertion and removal are
  14.250 -  similar:
  14.251 -*}
  14.252 -
  14.253 -lemma %quote [code abstract]:
  14.254 -  "list_of_dlist (Dlist.insert x dxs) = List.insert x (list_of_dlist dxs)"
  14.255 -  by (fact list_of_dlist_insert)
  14.256 -
  14.257 -lemma %quote [code abstract]:
  14.258 -  "list_of_dlist (Dlist.remove x dxs) = remove1 x (list_of_dlist dxs)"
  14.259 -  by (fact list_of_dlist_remove)
  14.260 -
  14.261 -text {*
  14.262 -  \noindent Then the corresponding code is as follows:
  14.263 -*}
  14.264 -
  14.265 -text %quotetypewriter {*
  14.266 -  @{code_stmts Dlist.empty Dlist.insert Dlist.remove list_of_dlist (Haskell)}
  14.267 -*} (*(types) dlist (consts) dempty dinsert dremove list_of List.member insert remove *)
  14.268 -
  14.269 -text {*
  14.270 -  Typical data structures implemented by representations involving
  14.271 -  invariants are available in the library, theory @{theory Mapping}
  14.272 -  specifies key-value-mappings (type @{typ "('a, 'b) mapping"});
  14.273 -  these can be implemented by red-black-trees (theory @{theory RBT}).
  14.274 -*}
  14.275 -
  14.276 -end
  14.277 -
    15.1 --- a/doc-src/Codegen/Setup.thy	Tue Aug 28 18:46:15 2012 +0200
    15.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    15.3 @@ -1,37 +0,0 @@
    15.4 -theory Setup
    15.5 -imports
    15.6 -  Complex_Main
    15.7 -  "~~/src/HOL/Library/Dlist"
    15.8 -  "~~/src/HOL/Library/RBT"
    15.9 -  "~~/src/HOL/Library/Mapping"
   15.10 -begin
   15.11 -
   15.12 -(* FIXME avoid writing into source directory *)
   15.13 -ML {*
   15.14 -  Isabelle_System.mkdirs (Path.append (Thy_Load.master_directory @{theory}) (Path.basic "examples"))
   15.15 -*}
   15.16 -
   15.17 -ML_file "../antiquote_setup.ML"
   15.18 -ML_file "../more_antiquote.ML"
   15.19 -
   15.20 -setup {*
   15.21 -  Antiquote_Setup.setup #>
   15.22 -  More_Antiquote.setup #>
   15.23 -let
   15.24 -  val typ = Simple_Syntax.read_typ;
   15.25 -in
   15.26 -  Sign.del_modesyntax_i (Symbol.xsymbolsN, false)
   15.27 -   [("_constrain", typ "logic => type => logic", Mixfix ("_\<Colon>_", [4, 0], 3)),
   15.28 -    ("_constrain", typ "prop' => type => prop'", Mixfix ("_\<Colon>_", [4, 0], 3))] #>
   15.29 -  Sign.add_modesyntax_i (Symbol.xsymbolsN, false)
   15.30 -   [("_constrain", typ "logic => type => logic", Mixfix ("_ \<Colon>  _", [4, 0], 3)),
   15.31 -    ("_constrain", typ "prop' => type => prop'", Mixfix ("_ \<Colon> _", [4, 0], 3))]
   15.32 -end
   15.33 -*}
   15.34 -
   15.35 -setup {* Code_Target.set_default_code_width 74 *}
   15.36 -
   15.37 -declare [[names_unique = false]]
   15.38 -
   15.39 -end
   15.40 -
    16.1 --- a/doc-src/Codegen/document/adapt.tex	Tue Aug 28 18:46:15 2012 +0200
    16.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.3 @@ -1,52 +0,0 @@
    16.4 -
    16.5 -\documentclass[12pt]{article}
    16.6 -\usepackage{tikz}
    16.7 -
    16.8 -\begin{document}
    16.9 -
   16.10 -\thispagestyle{empty}
   16.11 -\setlength{\fboxrule}{0.01pt}
   16.12 -\setlength{\fboxsep}{4pt}
   16.13 -
   16.14 -\fcolorbox{white}{white}{
   16.15 -
   16.16 -\begin{tikzpicture}[scale = 0.5]
   16.17 -  \tikzstyle water=[color = blue, thick]
   16.18 -  \tikzstyle ice=[color = black, very thick, cap = round, join = round, fill = white]
   16.19 -  \tikzstyle process=[color = green, semithick, ->]
   16.20 -  \tikzstyle adaptation=[color = red, semithick, ->]
   16.21 -  \tikzstyle target=[color = black]
   16.22 -  \foreach \x in {0, ..., 24}
   16.23 -    \draw[style=water] (\x, 0.25) sin + (0.25, 0.25) cos + (0.25, -0.25) sin
   16.24 -      + (0.25, -0.25) cos + (0.25, 0.25);
   16.25 -  \draw[style=ice] (1, 0) --
   16.26 -    (3, 6) node[above, fill=white] {logic} -- (5, 0) -- cycle;
   16.27 -  \draw[style=ice] (9, 0) --
   16.28 -    (11, 6) node[above, fill=white] {intermediate language} -- (13, 0) -- cycle;
   16.29 -  \draw[style=ice] (15, -6) --
   16.30 -    (19, 6) node[above, fill=white] {target language} -- (23, -6) -- cycle;
   16.31 -  \draw[style=process]
   16.32 -    (3.5, 3) .. controls (7, 5) .. node[fill=white] {translation} (10.5, 3);
   16.33 -  \draw[style=process]
   16.34 -    (11.5, 3) .. controls (15, 5) .. node[fill=white] (serialisation) {serialisation} (18.5, 3);
   16.35 -  \node (adaptation) at (11, -2) [style=adaptation] {adaptation};
   16.36 -  \node at (19, 3) [rotate=90] {generated};
   16.37 -  \node at (19.5, -5) {language};
   16.38 -  \node at (19.5, -3) {library};
   16.39 -  \node (includes) at (19.5, -1) {includes};
   16.40 -  \node (reserved) at (16.5, -3) [rotate=72] {reserved}; % proper 71.57
   16.41 -  \draw[style=process]
   16.42 -    (includes) -- (serialisation);
   16.43 -  \draw[style=process]
   16.44 -    (reserved) -- (serialisation);
   16.45 -  \draw[style=adaptation]
   16.46 -    (adaptation) -- (serialisation);
   16.47 -  \draw[style=adaptation]
   16.48 -    (adaptation) -- (includes);
   16.49 -  \draw[style=adaptation]
   16.50 -    (adaptation) -- (reserved);
   16.51 -\end{tikzpicture}
   16.52 -
   16.53 -}
   16.54 -
   16.55 -\end{document}
    17.1 --- a/doc-src/Codegen/document/architecture.tex	Tue Aug 28 18:46:15 2012 +0200
    17.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    17.3 @@ -1,50 +0,0 @@
    17.4 -
    17.5 -\documentclass[12pt]{article}
    17.6 -\usepackage{tikz}
    17.7 -\usetikzlibrary{shapes}
    17.8 -\usetikzlibrary{arrows}
    17.9 -
   17.10 -\begin{document}
   17.11 -
   17.12 -\thispagestyle{empty}
   17.13 -\setlength{\fboxrule}{0.01pt}
   17.14 -\setlength{\fboxsep}{4pt}
   17.15 -
   17.16 -\fcolorbox{white}{white}{
   17.17 -
   17.18 -\newcommand{\sys}[1]{\emph{#1}}
   17.19 -
   17.20 -\begin{tikzpicture}[x = 4cm, y = 1cm]
   17.21 -  \tikzstyle positive=[color = black, fill = white];
   17.22 -  \tikzstyle negative=[color = white, fill = black];
   17.23 -  \tikzstyle entity=[rounded corners, draw, thick];
   17.24 -  \tikzstyle process=[ellipse, draw, thick];
   17.25 -  \tikzstyle arrow=[-stealth, semithick];
   17.26 -  \node (spec) at (0, 3) [entity, positive] {specification tools};
   17.27 -  \node (user) at (1, 3) [entity, positive] {user proofs};
   17.28 -  \node (spec_user_join) at (0.5, 3) [shape=coordinate] {};
   17.29 -  \node (raw) at (0.5, 4) [entity, positive] {raw code equations};
   17.30 -  \node (pre) at (1.5, 4) [process, positive] {preprocessing};
   17.31 -  \node (eqn) at (2.5, 4) [entity, positive] {code equations};
   17.32 -  \node (iml) at (0.5, 0) [entity, positive] {intermediate program};
   17.33 -  \node (seri) at (1.5, 0) [process, positive] {serialisation};
   17.34 -  \node (SML) at (2.5, 3) [entity, positive] {\sys{SML}};
   17.35 -  \node (OCaml) at (2.5, 2) [entity, positive] {\sys{OCaml}};
   17.36 -  \node (Haskell) at (2.5, 1) [entity, positive] {\sys{Haskell}};
   17.37 -  \node (Scala) at (2.5, 0) [entity, positive] {\sys{Scala}};
   17.38 -  \draw [semithick] (spec) -- (spec_user_join);
   17.39 -  \draw [semithick] (user) -- (spec_user_join);
   17.40 -  \draw [-diamond, semithick] (spec_user_join) -- (raw);
   17.41 -  \draw [arrow] (raw) -- (pre);
   17.42 -  \draw [arrow] (pre) -- (eqn);
   17.43 -  \draw [arrow] (eqn) -- node (transl) [process, positive] {translation} (iml);
   17.44 -  \draw [arrow] (iml) -- (seri);
   17.45 -  \draw [arrow] (seri) -- (SML);
   17.46 -  \draw [arrow] (seri) -- (OCaml);
   17.47 -  \draw [arrow] (seri) -- (Haskell);
   17.48 -  \draw [arrow] (seri) -- (Scala);
   17.49 -\end{tikzpicture}
   17.50 -
   17.51 -}
   17.52 -
   17.53 -\end{document}
    18.1 --- a/doc-src/Codegen/document/build	Tue Aug 28 18:46:15 2012 +0200
    18.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    18.3 @@ -1,25 +0,0 @@
    18.4 -#!/bin/bash
    18.5 -
    18.6 -set -e
    18.7 -
    18.8 -FORMAT="$1"
    18.9 -VARIANT="$2"
   18.10 -
   18.11 -"$ISABELLE_TOOL" logo -o isabelle_isar.pdf "Isar"
   18.12 -"$ISABELLE_TOOL" logo -o isabelle_isar.eps "Isar"
   18.13 -
   18.14 -cp "$ISABELLE_HOME/doc-src/iman.sty" .
   18.15 -cp "$ISABELLE_HOME/doc-src/extra.sty" .
   18.16 -cp "$ISABELLE_HOME/doc-src/isar.sty" .
   18.17 -cp "$ISABELLE_HOME/doc-src/proof.sty" .
   18.18 -cp "$ISABELLE_HOME/doc-src/manual.bib" .
   18.19 -
   18.20 -for NAME in architecture adapt
   18.21 -do
   18.22 -  latex "$NAME"
   18.23 -  $ISABELLE_DVIPS -E -o "$NAME.eps" "$NAME.dvi"
   18.24 -  $ISABELLE_EPSTOPDF "$NAME.eps"
   18.25 -done
   18.26 -
   18.27 -"$ISABELLE_HOME/doc-src/prepare_document" "$FORMAT"
   18.28 -
    19.1 --- a/doc-src/Codegen/document/root.tex	Tue Aug 28 18:46:15 2012 +0200
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,52 +0,0 @@
    19.4 -
    19.5 -\documentclass[12pt,a4paper,fleqn]{article}
    19.6 -\usepackage{latexsym,graphicx}
    19.7 -\usepackage{multirow}
    19.8 -\usepackage{iman,extra,isar,proof}
    19.9 -\usepackage{isabelle,isabellesym}
   19.10 -\usepackage{style}
   19.11 -\usepackage{pdfsetup}
   19.12 -
   19.13 -\hyphenation{Isabelle}
   19.14 -\hyphenation{Isar}
   19.15 -\isadroptag{theory}
   19.16 -
   19.17 -\title{\includegraphics[scale=0.5]{isabelle_isar}
   19.18 -  \\[4ex] Code generation from Isabelle/HOL theories}
   19.19 -\author{\emph{Florian Haftmann with contributions from Lukas Bulwahn}}
   19.20 -
   19.21 -\begin{document}
   19.22 -
   19.23 -\maketitle
   19.24 -
   19.25 -\begin{abstract}
   19.26 -  \noindent This tutorial introduces the code generator facilities of Isabelle/HOL.
   19.27 -    They empower the user to turn HOL specifications into corresponding executable
   19.28 -    programs in the languages SML, OCaml, Haskell and Scala.
   19.29 -\end{abstract}
   19.30 -
   19.31 -\thispagestyle{empty}\clearpage
   19.32 -
   19.33 -\pagenumbering{roman}
   19.34 -\clearfirst
   19.35 -
   19.36 -\input{Introduction.tex}
   19.37 -\input{Foundations.tex}
   19.38 -\input{Refinement.tex}
   19.39 -\input{Inductive_Predicate.tex}
   19.40 -\input{Adaptation.tex}
   19.41 -\input{Evaluation.tex}
   19.42 -\input{Further.tex}
   19.43 -
   19.44 -\begingroup
   19.45 -\bibliographystyle{plain} \small\raggedright\frenchspacing
   19.46 -\bibliography{manual}
   19.47 -\endgroup
   19.48 -
   19.49 -\end{document}
   19.50 -
   19.51 -
   19.52 -%%% Local Variables: 
   19.53 -%%% mode: latex
   19.54 -%%% TeX-master: t
   19.55 -%%% End: 
    20.1 --- a/doc-src/Codegen/document/style.sty	Tue Aug 28 18:46:15 2012 +0200
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,75 +0,0 @@
    20.4 -
    20.5 -%% toc
    20.6 -\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
    20.7 -\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
    20.8 -
    20.9 -%% paragraphs
   20.10 -\setlength{\parindent}{1em}
   20.11 -
   20.12 -%% references
   20.13 -\newcommand{\secref}[1]{\S\ref{#1}}
   20.14 -\newcommand{\figref}[1]{figure~\ref{#1}}
   20.15 -
   20.16 -%% logical markup
   20.17 -\newcommand{\strong}[1]{{\bfseries {#1}}}
   20.18 -\newcommand{\qn}[1]{\emph{#1}}
   20.19 -
   20.20 -%% typographic conventions
   20.21 -\newcommand{\qt}[1]{``{#1}''}
   20.22 -\newcommand{\ditem}[1]{\item[\isastyletext #1]}
   20.23 -
   20.24 -%% quote environment
   20.25 -\isakeeptag{quote}
   20.26 -\renewenvironment{quote}
   20.27 -  {\list{}{\leftmargin2em\rightmargin0pt}\parindent0pt\parskip0pt\item\relax}
   20.28 -  {\endlist}
   20.29 -\renewcommand{\isatagquote}{\begin{quote}}
   20.30 -\renewcommand{\endisatagquote}{\end{quote}}
   20.31 -\newcommand{\quotebreak}{\\[1.2ex]}
   20.32 -
   20.33 -%% typewriter text
   20.34 -\newenvironment{typewriter}{\renewcommand{\isastyletext}{}%
   20.35 -\renewcommand{\isadigit}[1]{{##1}}%
   20.36 -\parindent0pt%
   20.37 -\makeatletter\isa@parindent0pt\makeatother%
   20.38 -\isabellestyle{tt}\isastyle%
   20.39 -\fontsize{9pt}{9pt}\selectfont}{}
   20.40 -
   20.41 -\isakeeptag{quotetypewriter}
   20.42 -\renewcommand{\isatagquotetypewriter}{\begin{quote}\begin{typewriter}}
   20.43 -\renewcommand{\endisatagquotetypewriter}{\end{typewriter}\end{quote}}
   20.44 -
   20.45 -\isakeeptag{quotett}
   20.46 -\renewcommand{\isatagquotett}{\begin{quote}\isabellestyle{tt}\isastyle}
   20.47 -\renewcommand{\endisatagquotett}{\end{quote}}
   20.48 -
   20.49 -%% a trick
   20.50 -\newcommand{\isasymSML}{SML}
   20.51 -\newcommand{\isasymSMLdummy}{SML}
   20.52 -
   20.53 -%% presentation
   20.54 -\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
   20.55 -
   20.56 -%% character detail
   20.57 -\renewcommand{\isadigit}[1]{\isamath{#1}}
   20.58 -\binperiod
   20.59 -\underscoreoff
   20.60 -
   20.61 -%% format
   20.62 -\pagestyle{headings}
   20.63 -\isabellestyle{it}
   20.64 -
   20.65 -%% ml reference
   20.66 -\newenvironment{mldecls}{\par\noindent\begingroup\footnotesize\def\isanewline{\\}\begin{tabular}{l}}{\end{tabular}\smallskip\endgroup}
   20.67 -
   20.68 -\isakeeptag{mlref}
   20.69 -\renewcommand{\isatagmlref}{\subsection*{\makebox[0pt][r]{\fbox{\ML}~~}Reference}\begingroup\def\isastyletext{\rm}\small}
   20.70 -\renewcommand{\endisatagmlref}{\endgroup}
   20.71 -
   20.72 -\isabellestyle{it}
   20.73 -
   20.74 -
   20.75 -%%% Local Variables: 
   20.76 -%%% mode: latex
   20.77 -%%% TeX-master: "implementation"
   20.78 -%%% End: 
    21.1 --- a/doc-src/Functions/Functions.thy	Tue Aug 28 18:46:15 2012 +0200
    21.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    21.3 @@ -1,1190 +0,0 @@
    21.4 -(*  Title:      doc-src/IsarAdvanced/Functions/Thy/Fundefs.thy
    21.5 -    Author:     Alexander Krauss, TU Muenchen
    21.6 -
    21.7 -Tutorial for function definitions with the new "function" package.
    21.8 -*)
    21.9 -
   21.10 -theory Functions
   21.11 -imports Main
   21.12 -begin
   21.13 -
   21.14 -section {* Function Definitions for Dummies *}
   21.15 -
   21.16 -text {*
   21.17 -  In most cases, defining a recursive function is just as simple as other definitions:
   21.18 -*}
   21.19 -
   21.20 -fun fib :: "nat \<Rightarrow> nat"
   21.21 -where
   21.22 -  "fib 0 = 1"
   21.23 -| "fib (Suc 0) = 1"
   21.24 -| "fib (Suc (Suc n)) = fib n + fib (Suc n)"
   21.25 -
   21.26 -text {*
   21.27 -  The syntax is rather self-explanatory: We introduce a function by
   21.28 -  giving its name, its type, 
   21.29 -  and a set of defining recursive equations.
   21.30 -  If we leave out the type, the most general type will be
   21.31 -  inferred, which can sometimes lead to surprises: Since both @{term
   21.32 -  "1::nat"} and @{text "+"} are overloaded, we would end up
   21.33 -  with @{text "fib :: nat \<Rightarrow> 'a::{one,plus}"}.
   21.34 -*}
   21.35 -
   21.36 -text {*
   21.37 -  The function always terminates, since its argument gets smaller in
   21.38 -  every recursive call. 
   21.39 -  Since HOL is a logic of total functions, termination is a
   21.40 -  fundamental requirement to prevent inconsistencies\footnote{From the
   21.41 -  \qt{definition} @{text "f(n) = f(n) + 1"} we could prove 
   21.42 -  @{text "0 = 1"} by subtracting @{text "f(n)"} on both sides.}.
   21.43 -  Isabelle tries to prove termination automatically when a definition
   21.44 -  is made. In \S\ref{termination}, we will look at cases where this
   21.45 -  fails and see what to do then.
   21.46 -*}
   21.47 -
   21.48 -subsection {* Pattern matching *}
   21.49 -
   21.50 -text {* \label{patmatch}
   21.51 -  Like in functional programming, we can use pattern matching to
   21.52 -  define functions. At the moment we will only consider \emph{constructor
   21.53 -  patterns}, which only consist of datatype constructors and
   21.54 -  variables. Furthermore, patterns must be linear, i.e.\ all variables
   21.55 -  on the left hand side of an equation must be distinct. In
   21.56 -  \S\ref{genpats} we discuss more general pattern matching.
   21.57 -
   21.58 -  If patterns overlap, the order of the equations is taken into
   21.59 -  account. The following function inserts a fixed element between any
   21.60 -  two elements of a list:
   21.61 -*}
   21.62 -
   21.63 -fun sep :: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list"
   21.64 -where
   21.65 -  "sep a (x#y#xs) = x # a # sep a (y # xs)"
   21.66 -| "sep a xs       = xs"
   21.67 -
   21.68 -text {* 
   21.69 -  Overlapping patterns are interpreted as \qt{increments} to what is
   21.70 -  already there: The second equation is only meant for the cases where
   21.71 -  the first one does not match. Consequently, Isabelle replaces it
   21.72 -  internally by the remaining cases, making the patterns disjoint:
   21.73 -*}
   21.74 -
   21.75 -thm sep.simps
   21.76 -
   21.77 -text {* @{thm [display] sep.simps[no_vars]} *}
   21.78 -
   21.79 -text {* 
   21.80 -  \noindent The equations from function definitions are automatically used in
   21.81 -  simplification:
   21.82 -*}
   21.83 -
   21.84 -lemma "sep 0 [1, 2, 3] = [1, 0, 2, 0, 3]"
   21.85 -by simp
   21.86 -
   21.87 -subsection {* Induction *}
   21.88 -
   21.89 -text {*
   21.90 -
   21.91 -  Isabelle provides customized induction rules for recursive
   21.92 -  functions. These rules follow the recursive structure of the
   21.93 -  definition. Here is the rule @{text sep.induct} arising from the
   21.94 -  above definition of @{const sep}:
   21.95 -
   21.96 -  @{thm [display] sep.induct}
   21.97 -  
   21.98 -  We have a step case for list with at least two elements, and two
   21.99 -  base cases for the zero- and the one-element list. Here is a simple
  21.100 -  proof about @{const sep} and @{const map}
  21.101 -*}
  21.102 -
  21.103 -lemma "map f (sep x ys) = sep (f x) (map f ys)"
  21.104 -apply (induct x ys rule: sep.induct)
  21.105 -
  21.106 -txt {*
  21.107 -  We get three cases, like in the definition.
  21.108 -
  21.109 -  @{subgoals [display]}
  21.110 -*}
  21.111 -
  21.112 -apply auto 
  21.113 -done
  21.114 -text {*
  21.115 -
  21.116 -  With the \cmd{fun} command, you can define about 80\% of the
  21.117 -  functions that occur in practice. The rest of this tutorial explains
  21.118 -  the remaining 20\%.
  21.119 -*}
  21.120 -
  21.121 -
  21.122 -section {* fun vs.\ function *}
  21.123 -
  21.124 -text {* 
  21.125 -  The \cmd{fun} command provides a
  21.126 -  convenient shorthand notation for simple function definitions. In
  21.127 -  this mode, Isabelle tries to solve all the necessary proof obligations
  21.128 -  automatically. If any proof fails, the definition is
  21.129 -  rejected. This can either mean that the definition is indeed faulty,
  21.130 -  or that the default proof procedures are just not smart enough (or
  21.131 -  rather: not designed) to handle the definition.
  21.132 -
  21.133 -  By expanding the abbreviation to the more verbose \cmd{function} command, these proof obligations become visible and can be analyzed or
  21.134 -  solved manually. The expansion from \cmd{fun} to \cmd{function} is as follows:
  21.135 -
  21.136 -\end{isamarkuptext}
  21.137 -
  21.138 -
  21.139 -\[\left[\;\begin{minipage}{0.25\textwidth}\vspace{6pt}
  21.140 -\cmd{fun} @{text "f :: \<tau>"}\\%
  21.141 -\cmd{where}\\%
  21.142 -\hspace*{2ex}{\it equations}\\%
  21.143 -\hspace*{2ex}\vdots\vspace*{6pt}
  21.144 -\end{minipage}\right]
  21.145 -\quad\equiv\quad
  21.146 -\left[\;\begin{minipage}{0.48\textwidth}\vspace{6pt}
  21.147 -\cmd{function} @{text "("}\cmd{sequential}@{text ") f :: \<tau>"}\\%
  21.148 -\cmd{where}\\%
  21.149 -\hspace*{2ex}{\it equations}\\%
  21.150 -\hspace*{2ex}\vdots\\%
  21.151 -\cmd{by} @{text "pat_completeness auto"}\\%
  21.152 -\cmd{termination by} @{text "lexicographic_order"}\vspace{6pt}
  21.153 -\end{minipage}
  21.154 -\right]\]
  21.155 -
  21.156 -\begin{isamarkuptext}
  21.157 -  \vspace*{1em}
  21.158 -  \noindent Some details have now become explicit:
  21.159 -
  21.160 -  \begin{enumerate}
  21.161 -  \item The \cmd{sequential} option enables the preprocessing of
  21.162 -  pattern overlaps which we already saw. Without this option, the equations
  21.163 -  must already be disjoint and complete. The automatic completion only
  21.164 -  works with constructor patterns.
  21.165 -
  21.166 -  \item A function definition produces a proof obligation which
  21.167 -  expresses completeness and compatibility of patterns (we talk about
  21.168 -  this later). The combination of the methods @{text "pat_completeness"} and
  21.169 -  @{text "auto"} is used to solve this proof obligation.
  21.170 -
  21.171 -  \item A termination proof follows the definition, started by the
  21.172 -  \cmd{termination} command. This will be explained in \S\ref{termination}.
  21.173 - \end{enumerate}
  21.174 -  Whenever a \cmd{fun} command fails, it is usually a good idea to
  21.175 -  expand the syntax to the more verbose \cmd{function} form, to see
  21.176 -  what is actually going on.
  21.177 - *}
  21.178 -
  21.179 -
  21.180 -section {* Termination *}
  21.181 -
  21.182 -text {*\label{termination}
  21.183 -  The method @{text "lexicographic_order"} is the default method for
  21.184 -  termination proofs. It can prove termination of a
  21.185 -  certain class of functions by searching for a suitable lexicographic
  21.186 -  combination of size measures. Of course, not all functions have such
  21.187 -  a simple termination argument. For them, we can specify the termination
  21.188 -  relation manually.
  21.189 -*}
  21.190 -
  21.191 -subsection {* The {\tt relation} method *}
  21.192 -text{*
  21.193 -  Consider the following function, which sums up natural numbers up to
  21.194 -  @{text "N"}, using a counter @{text "i"}:
  21.195 -*}
  21.196 -
  21.197 -function sum :: "nat \<Rightarrow> nat \<Rightarrow> nat"
  21.198 -where
  21.199 -  "sum i N = (if i > N then 0 else i + sum (Suc i) N)"
  21.200 -by pat_completeness auto
  21.201 -
  21.202 -text {*
  21.203 -  \noindent The @{text "lexicographic_order"} method fails on this example, because none of the
  21.204 -  arguments decreases in the recursive call, with respect to the standard size ordering.
  21.205 -  To prove termination manually, we must provide a custom wellfounded relation.
  21.206 -
  21.207 -  The termination argument for @{text "sum"} is based on the fact that
  21.208 -  the \emph{difference} between @{text "i"} and @{text "N"} gets
  21.209 -  smaller in every step, and that the recursion stops when @{text "i"}
  21.210 -  is greater than @{text "N"}. Phrased differently, the expression 
  21.211 -  @{text "N + 1 - i"} always decreases.
  21.212 -
  21.213 -  We can use this expression as a measure function suitable to prove termination.
  21.214 -*}
  21.215 -
  21.216 -termination sum
  21.217 -apply (relation "measure (\<lambda>(i,N). N + 1 - i)")
  21.218 -
  21.219 -txt {*
  21.220 -  The \cmd{termination} command sets up the termination goal for the
  21.221 -  specified function @{text "sum"}. If the function name is omitted, it
  21.222 -  implicitly refers to the last function definition.
  21.223 -
  21.224 -  The @{text relation} method takes a relation of
  21.225 -  type @{typ "('a \<times> 'a) set"}, where @{typ "'a"} is the argument type of
  21.226 -  the function. If the function has multiple curried arguments, then
  21.227 -  these are packed together into a tuple, as it happened in the above
  21.228 -  example.
  21.229 -
  21.230 -  The predefined function @{term[source] "measure :: ('a \<Rightarrow> nat) \<Rightarrow> ('a \<times> 'a) set"} constructs a
  21.231 -  wellfounded relation from a mapping into the natural numbers (a
  21.232 -  \emph{measure function}). 
  21.233 -
  21.234 -  After the invocation of @{text "relation"}, we must prove that (a)
  21.235 -  the relation we supplied is wellfounded, and (b) that the arguments
  21.236 -  of recursive calls indeed decrease with respect to the
  21.237 -  relation:
  21.238 -
  21.239 -  @{subgoals[display,indent=0]}
  21.240 -
  21.241 -  These goals are all solved by @{text "auto"}:
  21.242 -*}
  21.243 -
  21.244 -apply auto
  21.245 -done
  21.246 -
  21.247 -text {*
  21.248 -  Let us complicate the function a little, by adding some more
  21.249 -  recursive calls: 
  21.250 -*}
  21.251 -
  21.252 -function foo :: "nat \<Rightarrow> nat \<Rightarrow> nat"
  21.253 -where
  21.254 -  "foo i N = (if i > N 
  21.255 -              then (if N = 0 then 0 else foo 0 (N - 1))
  21.256 -              else i + foo (Suc i) N)"
  21.257 -by pat_completeness auto
  21.258 -
  21.259 -text {*
  21.260 -  When @{text "i"} has reached @{text "N"}, it starts at zero again
  21.261 -  and @{text "N"} is decremented.
  21.262 -  This corresponds to a nested
  21.263 -  loop where one index counts up and the other down. Termination can
  21.264 -  be proved using a lexicographic combination of two measures, namely
  21.265 -  the value of @{text "N"} and the above difference. The @{const
  21.266 -  "measures"} combinator generalizes @{text "measure"} by taking a
  21.267 -  list of measure functions.  
  21.268 -*}
  21.269 -
  21.270 -termination 
  21.271 -by (relation "measures [\<lambda>(i, N). N, \<lambda>(i,N). N + 1 - i]") auto
  21.272 -
  21.273 -subsection {* How @{text "lexicographic_order"} works *}
  21.274 -
  21.275 -(*fun fails :: "nat \<Rightarrow> nat list \<Rightarrow> nat"
  21.276 -where
  21.277 -  "fails a [] = a"
  21.278 -| "fails a (x#xs) = fails (x + a) (x # xs)"
  21.279 -*)
  21.280 -
  21.281 -text {*
  21.282 -  To see how the automatic termination proofs work, let's look at an
  21.283 -  example where it fails\footnote{For a detailed discussion of the
  21.284 -  termination prover, see \cite{bulwahnKN07}}:
  21.285 -
  21.286 -\end{isamarkuptext}  
  21.287 -\cmd{fun} @{text "fails :: \"nat \<Rightarrow> nat list \<Rightarrow> nat\""}\\%
  21.288 -\cmd{where}\\%
  21.289 -\hspace*{2ex}@{text "\"fails a [] = a\""}\\%
  21.290 -|\hspace*{1.5ex}@{text "\"fails a (x#xs) = fails (x + a) (x#xs)\""}\\
  21.291 -\begin{isamarkuptext}
  21.292 -
  21.293 -\noindent Isabelle responds with the following error:
  21.294 -
  21.295 -\begin{isabelle}
  21.296 -*** Unfinished subgoals:\newline
  21.297 -*** (a, 1, <):\newline
  21.298 -*** \ 1.~@{text "\<And>x. x = 0"}\newline
  21.299 -*** (a, 1, <=):\newline
  21.300 -*** \ 1.~False\newline
  21.301 -*** (a, 2, <):\newline
  21.302 -*** \ 1.~False\newline
  21.303 -*** Calls:\newline
  21.304 -*** a) @{text "(a, x # xs) -->> (x + a, x # xs)"}\newline
  21.305 -*** Measures:\newline
  21.306 -*** 1) @{text "\<lambda>x. size (fst x)"}\newline
  21.307 -*** 2) @{text "\<lambda>x. size (snd x)"}\newline
  21.308 -*** Result matrix:\newline
  21.309 -*** \ \ \ \ 1\ \ 2  \newline
  21.310 -*** a:  ?   <= \newline
  21.311 -*** Could not find lexicographic termination order.\newline
  21.312 -*** At command "fun".\newline
  21.313 -\end{isabelle}
  21.314 -*}
  21.315 -text {*
  21.316 -  The key to this error message is the matrix at the bottom. The rows
  21.317 -  of that matrix correspond to the different recursive calls (In our
  21.318 -  case, there is just one). The columns are the function's arguments 
  21.319 -  (expressed through different measure functions, which map the
  21.320 -  argument tuple to a natural number). 
  21.321 -
  21.322 -  The contents of the matrix summarize what is known about argument
  21.323 -  descents: The second argument has a weak descent (@{text "<="}) at the
  21.324 -  recursive call, and for the first argument nothing could be proved,
  21.325 -  which is expressed by @{text "?"}. In general, there are the values
  21.326 -  @{text "<"}, @{text "<="} and @{text "?"}.
  21.327 -
  21.328 -  For the failed proof attempts, the unfinished subgoals are also
  21.329 -  printed. Looking at these will often point to a missing lemma.
  21.330 -*}
  21.331 -
  21.332 -subsection {* The @{text size_change} method *}
  21.333 -
  21.334 -text {*
  21.335 -  Some termination goals that are beyond the powers of
  21.336 -  @{text lexicographic_order} can be solved automatically by the
  21.337 -  more powerful @{text size_change} method, which uses a variant of
  21.338 -  the size-change principle, together with some other
  21.339 -  techniques. While the details are discussed
  21.340 -  elsewhere\cite{krauss_phd},
  21.341 -  here are a few typical situations where
  21.342 -  @{text lexicographic_order} has difficulties and @{text size_change}
  21.343 -  may be worth a try:
  21.344 -  \begin{itemize}
  21.345 -  \item Arguments are permuted in a recursive call.
  21.346 -  \item Several mutually recursive functions with multiple arguments.
  21.347 -  \item Unusual control flow (e.g., when some recursive calls cannot
  21.348 -  occur in sequence).
  21.349 -  \end{itemize}
  21.350 -
  21.351 -  Loading the theory @{text Multiset} makes the @{text size_change}
  21.352 -  method a bit stronger: it can then use multiset orders internally.
  21.353 -*}
  21.354 -
  21.355 -section {* Mutual Recursion *}
  21.356 -
  21.357 -text {*
  21.358 -  If two or more functions call one another mutually, they have to be defined
  21.359 -  in one step. Here are @{text "even"} and @{text "odd"}:
  21.360 -*}
  21.361 -
  21.362 -function even :: "nat \<Rightarrow> bool"
  21.363 -    and odd  :: "nat \<Rightarrow> bool"
  21.364 -where
  21.365 -  "even 0 = True"
  21.366 -| "odd 0 = False"
  21.367 -| "even (Suc n) = odd n"
  21.368 -| "odd (Suc n) = even n"
  21.369 -by pat_completeness auto
  21.370 -
  21.371 -text {*
  21.372 -  To eliminate the mutual dependencies, Isabelle internally
  21.373 -  creates a single function operating on the sum
  21.374 -  type @{typ "nat + nat"}. Then, @{const even} and @{const odd} are
  21.375 -  defined as projections. Consequently, termination has to be proved
  21.376 -  simultaneously for both functions, by specifying a measure on the
  21.377 -  sum type: 
  21.378 -*}
  21.379 -
  21.380 -termination 
  21.381 -by (relation "measure (\<lambda>x. case x of Inl n \<Rightarrow> n | Inr n \<Rightarrow> n)") auto
  21.382 -
  21.383 -text {* 
  21.384 -  We could also have used @{text lexicographic_order}, which
  21.385 -  supports mutual recursive termination proofs to a certain extent.
  21.386 -*}
  21.387 -
  21.388 -subsection {* Induction for mutual recursion *}
  21.389 -
  21.390 -text {*
  21.391 -
  21.392 -  When functions are mutually recursive, proving properties about them
  21.393 -  generally requires simultaneous induction. The induction rule @{text "even_odd.induct"}
  21.394 -  generated from the above definition reflects this.
  21.395 -
  21.396 -  Let us prove something about @{const even} and @{const odd}:
  21.397 -*}
  21.398 -
  21.399 -lemma even_odd_mod2:
  21.400 -  "even n = (n mod 2 = 0)"
  21.401 -  "odd n = (n mod 2 = 1)"
  21.402 -
  21.403 -txt {* 
  21.404 -  We apply simultaneous induction, specifying the induction variable
  21.405 -  for both goals, separated by \cmd{and}:  *}
  21.406 -
  21.407 -apply (induct n and n rule: even_odd.induct)
  21.408 -
  21.409 -txt {* 
  21.410 -  We get four subgoals, which correspond to the clauses in the
  21.411 -  definition of @{const even} and @{const odd}:
  21.412 -  @{subgoals[display,indent=0]}
  21.413 -  Simplification solves the first two goals, leaving us with two
  21.414 -  statements about the @{text "mod"} operation to prove:
  21.415 -*}
  21.416 -
  21.417 -apply simp_all
  21.418 -
  21.419 -txt {* 
  21.420 -  @{subgoals[display,indent=0]} 
  21.421 -
  21.422 -  \noindent These can be handled by Isabelle's arithmetic decision procedures.
  21.423 -  
  21.424 -*}
  21.425 -
  21.426 -apply arith
  21.427 -apply arith
  21.428 -done
  21.429 -
  21.430 -text {*
  21.431 -  In proofs like this, the simultaneous induction is really essential:
  21.432 -  Even if we are just interested in one of the results, the other
  21.433 -  one is necessary to strengthen the induction hypothesis. If we leave
  21.434 -  out the statement about @{const odd} and just write @{term True} instead,
  21.435 -  the same proof fails:
  21.436 -*}
  21.437 -
  21.438 -lemma failed_attempt:
  21.439 -  "even n = (n mod 2 = 0)"
  21.440 -  "True"
  21.441 -apply (induct n rule: even_odd.induct)
  21.442 -
  21.443 -txt {*
  21.444 -  \noindent Now the third subgoal is a dead end, since we have no
  21.445 -  useful induction hypothesis available:
  21.446 -
  21.447 -  @{subgoals[display,indent=0]} 
  21.448 -*}
  21.449 -
  21.450 -oops
  21.451 -
  21.452 -section {* General pattern matching *}
  21.453 -text{*\label{genpats} *}
  21.454 -
  21.455 -subsection {* Avoiding automatic pattern splitting *}
  21.456 -
  21.457 -text {*
  21.458 -
  21.459 -  Up to now, we used pattern matching only on datatypes, and the
  21.460 -  patterns were always disjoint and complete, and if they weren't,
  21.461 -  they were made disjoint automatically like in the definition of
  21.462 -  @{const "sep"} in \S\ref{patmatch}.
  21.463 -
  21.464 -  This automatic splitting can significantly increase the number of
  21.465 -  equations involved, and this is not always desirable. The following
  21.466 -  example shows the problem:
  21.467 -  
  21.468 -  Suppose we are modeling incomplete knowledge about the world by a
  21.469 -  three-valued datatype, which has values @{term "T"}, @{term "F"}
  21.470 -  and @{term "X"} for true, false and uncertain propositions, respectively. 
  21.471 -*}
  21.472 -
  21.473 -datatype P3 = T | F | X
  21.474 -
  21.475 -text {* \noindent Then the conjunction of such values can be defined as follows: *}
  21.476 -
  21.477 -fun And :: "P3 \<Rightarrow> P3 \<Rightarrow> P3"
  21.478 -where
  21.479 -  "And T p = p"
  21.480 -| "And p T = p"
  21.481 -| "And p F = F"
  21.482 -| "And F p = F"
  21.483 -| "And X X = X"
  21.484 -
  21.485 -
  21.486 -text {* 
  21.487 -  This definition is useful, because the equations can directly be used
  21.488 -  as simplification rules. But the patterns overlap: For example,
  21.489 -  the expression @{term "And T T"} is matched by both the first and
  21.490 -  the second equation. By default, Isabelle makes the patterns disjoint by
  21.491 -  splitting them up, producing instances:
  21.492 -*}
  21.493 -
  21.494 -thm And.simps
  21.495 -
  21.496 -text {*
  21.497 -  @{thm[indent=4] And.simps}
  21.498 -  
  21.499 -  \vspace*{1em}
  21.500 -  \noindent There are several problems with this:
  21.501 -
  21.502 -  \begin{enumerate}
  21.503 -  \item If the datatype has many constructors, there can be an
  21.504 -  explosion of equations. For @{const "And"}, we get seven instead of
  21.505 -  five equations, which can be tolerated, but this is just a small
  21.506 -  example.
  21.507 -
  21.508 -  \item Since splitting makes the equations \qt{less general}, they
  21.509 -  do not always match in rewriting. While the term @{term "And x F"}
  21.510 -  can be simplified to @{term "F"} with the original equations, a
  21.511 -  (manual) case split on @{term "x"} is now necessary.
  21.512 -
  21.513 -  \item The splitting also concerns the induction rule @{text
  21.514 -  "And.induct"}. Instead of five premises it now has seven, which
  21.515 -  means that our induction proofs will have more cases.
  21.516 -
  21.517 -  \item In general, it increases clarity if we get the same definition
  21.518 -  back which we put in.
  21.519 -  \end{enumerate}
  21.520 -
  21.521 -  If we do not want the automatic splitting, we can switch it off by
  21.522 -  leaving out the \cmd{sequential} option. However, we will have to
  21.523 -  prove that our pattern matching is consistent\footnote{This prevents
  21.524 -  us from defining something like @{term "f x = True"} and @{term "f x
  21.525 -  = False"} simultaneously.}:
  21.526 -*}
  21.527 -
  21.528 -function And2 :: "P3 \<Rightarrow> P3 \<Rightarrow> P3"
  21.529 -where
  21.530 -  "And2 T p = p"
  21.531 -| "And2 p T = p"
  21.532 -| "And2 p F = F"
  21.533 -| "And2 F p = F"
  21.534 -| "And2 X X = X"
  21.535 -
  21.536 -txt {*
  21.537 -  \noindent Now let's look at the proof obligations generated by a
  21.538 -  function definition. In this case, they are:
  21.539 -
  21.540 -  @{subgoals[display,indent=0]}\vspace{-1.2em}\hspace{3cm}\vdots\vspace{1.2em}
  21.541 -
  21.542 -  The first subgoal expresses the completeness of the patterns. It has
  21.543 -  the form of an elimination rule and states that every @{term x} of
  21.544 -  the function's input type must match at least one of the patterns\footnote{Completeness could
  21.545 -  be equivalently stated as a disjunction of existential statements: 
  21.546 -@{term "(\<exists>p. x = (T, p)) \<or> (\<exists>p. x = (p, T)) \<or> (\<exists>p. x = (p, F)) \<or>
  21.547 -  (\<exists>p. x = (F, p)) \<or> (x = (X, X))"}, and you can use the method @{text atomize_elim} to get that form instead.}. If the patterns just involve
  21.548 -  datatypes, we can solve it with the @{text "pat_completeness"}
  21.549 -  method:
  21.550 -*}
  21.551 -
  21.552 -apply pat_completeness
  21.553 -
  21.554 -txt {*
  21.555 -  The remaining subgoals express \emph{pattern compatibility}. We do
  21.556 -  allow that an input value matches multiple patterns, but in this
  21.557 -  case, the result (i.e.~the right hand sides of the equations) must
  21.558 -  also be equal. For each pair of two patterns, there is one such
  21.559 -  subgoal. Usually this needs injectivity of the constructors, which
  21.560 -  is used automatically by @{text "auto"}.
  21.561 -*}
  21.562 -
  21.563 -by auto
  21.564 -termination by (relation "{}") simp
  21.565 -
  21.566 -
  21.567 -subsection {* Non-constructor patterns *}
  21.568 -
  21.569 -text {*
  21.570 -  Most of Isabelle's basic types take the form of inductive datatypes,
  21.571 -  and usually pattern matching works on the constructors of such types. 
  21.572 -  However, this need not be always the case, and the \cmd{function}
  21.573 -  command handles other kind of patterns, too.
  21.574 -
  21.575 -  One well-known instance of non-constructor patterns are
  21.576 -  so-called \emph{$n+k$-patterns}, which are a little controversial in
  21.577 -  the functional programming world. Here is the initial fibonacci
  21.578 -  example with $n+k$-patterns:
  21.579 -*}
  21.580 -
  21.581 -function fib2 :: "nat \<Rightarrow> nat"
  21.582 -where
  21.583 -  "fib2 0 = 1"
  21.584 -| "fib2 1 = 1"
  21.585 -| "fib2 (n + 2) = fib2 n + fib2 (Suc n)"
  21.586 -
  21.587 -txt {*
  21.588 -  This kind of matching is again justified by the proof of pattern
  21.589 -  completeness and compatibility. 
  21.590 -  The proof obligation for pattern completeness states that every natural number is
  21.591 -  either @{term "0::nat"}, @{term "1::nat"} or @{term "n +
  21.592 -  (2::nat)"}:
  21.593 -
  21.594 -  @{subgoals[display,indent=0,goals_limit=1]}
  21.595 -
  21.596 -  This is an arithmetic triviality, but unfortunately the
  21.597 -  @{text arith} method cannot handle this specific form of an
  21.598 -  elimination rule. However, we can use the method @{text
  21.599 -  "atomize_elim"} to do an ad-hoc conversion to a disjunction of
  21.600 -  existentials, which can then be solved by the arithmetic decision procedure.
  21.601 -  Pattern compatibility and termination are automatic as usual.
  21.602 -*}
  21.603 -apply atomize_elim
  21.604 -apply arith
  21.605 -apply auto
  21.606 -done
  21.607 -termination by lexicographic_order
  21.608 -text {*
  21.609 -  We can stretch the notion of pattern matching even more. The
  21.610 -  following function is not a sensible functional program, but a
  21.611 -  perfectly valid mathematical definition:
  21.612 -*}
  21.613 -
  21.614 -function ev :: "nat \<Rightarrow> bool"
  21.615 -where
  21.616 -  "ev (2 * n) = True"
  21.617 -| "ev (2 * n + 1) = False"
  21.618 -apply atomize_elim
  21.619 -by arith+
  21.620 -termination by (relation "{}") simp
  21.621 -
  21.622 -text {*
  21.623 -  This general notion of pattern matching gives you a certain freedom
  21.624 -  in writing down specifications. However, as always, such freedom should
  21.625 -  be used with care:
  21.626 -
  21.627 -  If we leave the area of constructor
  21.628 -  patterns, we have effectively departed from the world of functional
  21.629 -  programming. This means that it is no longer possible to use the
  21.630 -  code generator, and expect it to generate ML code for our
  21.631 -  definitions. Also, such a specification might not work very well together with
  21.632 -  simplification. Your mileage may vary.
  21.633 -*}
  21.634 -
  21.635 -
  21.636 -subsection {* Conditional equations *}
  21.637 -
  21.638 -text {* 
  21.639 -  The function package also supports conditional equations, which are
  21.640 -  similar to guards in a language like Haskell. Here is Euclid's
  21.641 -  algorithm written with conditional patterns\footnote{Note that the
  21.642 -  patterns are also overlapping in the base case}:
  21.643 -*}
  21.644 -
  21.645 -function gcd :: "nat \<Rightarrow> nat \<Rightarrow> nat"
  21.646 -where
  21.647 -  "gcd x 0 = x"
  21.648 -| "gcd 0 y = y"
  21.649 -| "x < y \<Longrightarrow> gcd (Suc x) (Suc y) = gcd (Suc x) (y - x)"
  21.650 -| "\<not> x < y \<Longrightarrow> gcd (Suc x) (Suc y) = gcd (x - y) (Suc y)"
  21.651 -by (atomize_elim, auto, arith)
  21.652 -termination by lexicographic_order
  21.653 -
  21.654 -text {*
  21.655 -  By now, you can probably guess what the proof obligations for the
  21.656 -  pattern completeness and compatibility look like. 
  21.657 -
  21.658 -  Again, functions with conditional patterns are not supported by the
  21.659 -  code generator.
  21.660 -*}
  21.661 -
  21.662 -
  21.663 -subsection {* Pattern matching on strings *}
  21.664 -
  21.665 -text {*
  21.666 -  As strings (as lists of characters) are normal datatypes, pattern
  21.667 -  matching on them is possible, but somewhat problematic. Consider the
  21.668 -  following definition:
  21.669 -
  21.670 -\end{isamarkuptext}
  21.671 -\noindent\cmd{fun} @{text "check :: \"string \<Rightarrow> bool\""}\\%
  21.672 -\cmd{where}\\%
  21.673 -\hspace*{2ex}@{text "\"check (''good'') = True\""}\\%
  21.674 -@{text "| \"check s = False\""}
  21.675 -\begin{isamarkuptext}
  21.676 -
  21.677 -  \noindent An invocation of the above \cmd{fun} command does not
  21.678 -  terminate. What is the problem? Strings are lists of characters, and
  21.679 -  characters are a datatype with a lot of constructors. Splitting the
  21.680 -  catch-all pattern thus leads to an explosion of cases, which cannot
  21.681 -  be handled by Isabelle.
  21.682 -
  21.683 -  There are two things we can do here. Either we write an explicit
  21.684 -  @{text "if"} on the right hand side, or we can use conditional patterns:
  21.685 -*}
  21.686 -
  21.687 -function check :: "string \<Rightarrow> bool"
  21.688 -where
  21.689 -  "check (''good'') = True"
  21.690 -| "s \<noteq> ''good'' \<Longrightarrow> check s = False"
  21.691 -by auto
  21.692 -termination by (relation "{}") simp
  21.693 -
  21.694 -
  21.695 -section {* Partiality *}
  21.696 -
  21.697 -text {* 
  21.698 -  In HOL, all functions are total. A function @{term "f"} applied to
  21.699 -  @{term "x"} always has the value @{term "f x"}, and there is no notion
  21.700 -  of undefinedness. 
  21.701 -  This is why we have to do termination
  21.702 -  proofs when defining functions: The proof justifies that the
  21.703 -  function can be defined by wellfounded recursion.
  21.704 -
  21.705 -  However, the \cmd{function} package does support partiality to a
  21.706 -  certain extent. Let's look at the following function which looks
  21.707 -  for a zero of a given function f. 
  21.708 -*}
  21.709 -
  21.710 -function (*<*)(domintros)(*>*)findzero :: "(nat \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> nat"
  21.711 -where
  21.712 -  "findzero f n = (if f n = 0 then n else findzero f (Suc n))"
  21.713 -by pat_completeness auto
  21.714 -
  21.715 -text {*
  21.716 -  \noindent Clearly, any attempt of a termination proof must fail. And without
  21.717 -  that, we do not get the usual rules @{text "findzero.simps"} and 
  21.718 -  @{text "findzero.induct"}. So what was the definition good for at all?
  21.719 -*}
  21.720 -
  21.721 -subsection {* Domain predicates *}
  21.722 -
  21.723 -text {*
  21.724 -  The trick is that Isabelle has not only defined the function @{const findzero}, but also
  21.725 -  a predicate @{term "findzero_dom"} that characterizes the values where the function
  21.726 -  terminates: the \emph{domain} of the function. If we treat a
  21.727 -  partial function just as a total function with an additional domain
  21.728 -  predicate, we can derive simplification and
  21.729 -  induction rules as we do for total functions. They are guarded
  21.730 -  by domain conditions and are called @{text psimps} and @{text
  21.731 -  pinduct}: 
  21.732 -*}
  21.733 -
  21.734 -text {*
  21.735 -  \noindent\begin{minipage}{0.79\textwidth}@{thm[display,margin=85] findzero.psimps}\end{minipage}
  21.736 -  \hfill(@{text "findzero.psimps"})
  21.737 -  \vspace{1em}
  21.738 -
  21.739 -  \noindent\begin{minipage}{0.79\textwidth}@{thm[display,margin=85] findzero.pinduct}\end{minipage}
  21.740 -  \hfill(@{text "findzero.pinduct"})
  21.741 -*}
  21.742 -
  21.743 -text {*
  21.744 -  Remember that all we
  21.745 -  are doing here is use some tricks to make a total function appear
  21.746 -  as if it was partial. We can still write the term @{term "findzero
  21.747 -  (\<lambda>x. 1) 0"} and like any other term of type @{typ nat} it is equal
  21.748 -  to some natural number, although we might not be able to find out
  21.749 -  which one. The function is \emph{underdefined}.
  21.750 -
  21.751 -  But it is defined enough to prove something interesting about it. We
  21.752 -  can prove that if @{term "findzero f n"}
  21.753 -  terminates, it indeed returns a zero of @{term f}:
  21.754 -*}
  21.755 -
  21.756 -lemma findzero_zero: "findzero_dom (f, n) \<Longrightarrow> f (findzero f n) = 0"
  21.757 -
  21.758 -txt {* \noindent We apply induction as usual, but using the partial induction
  21.759 -  rule: *}
  21.760 -
  21.761 -apply (induct f n rule: findzero.pinduct)
  21.762 -
  21.763 -txt {* \noindent This gives the following subgoals:
  21.764 -
  21.765 -  @{subgoals[display,indent=0]}
  21.766 -
  21.767 -  \noindent The hypothesis in our lemma was used to satisfy the first premise in
  21.768 -  the induction rule. However, we also get @{term
  21.769 -  "findzero_dom (f, n)"} as a local assumption in the induction step. This
  21.770 -  allows unfolding @{term "findzero f n"} using the @{text psimps}
  21.771 -  rule, and the rest is trivial.
  21.772 - *}
  21.773 -apply (simp add: findzero.psimps)
  21.774 -done
  21.775 -
  21.776 -text {*
  21.777 -  Proofs about partial functions are often not harder than for total
  21.778 -  functions. Fig.~\ref{findzero_isar} shows a slightly more
  21.779 -  complicated proof written in Isar. It is verbose enough to show how
  21.780 -  partiality comes into play: From the partial induction, we get an
  21.781 -  additional domain condition hypothesis. Observe how this condition
  21.782 -  is applied when calls to @{term findzero} are unfolded.
  21.783 -*}
  21.784 -
  21.785 -text_raw {*
  21.786 -\begin{figure}
  21.787 -\hrule\vspace{6pt}
  21.788 -\begin{minipage}{0.8\textwidth}
  21.789 -\isabellestyle{it}
  21.790 -\isastyle\isamarkuptrue
  21.791 -*}
  21.792 -lemma "\<lbrakk>findzero_dom (f, n); x \<in> {n ..< findzero f n}\<rbrakk> \<Longrightarrow> f x \<noteq> 0"
  21.793 -proof (induct rule: findzero.pinduct)
  21.794 -  fix f n assume dom: "findzero_dom (f, n)"
  21.795 -               and IH: "\<lbrakk>f n \<noteq> 0; x \<in> {Suc n ..< findzero f (Suc n)}\<rbrakk> \<Longrightarrow> f x \<noteq> 0"
  21.796 -               and x_range: "x \<in> {n ..< findzero f n}"
  21.797 -  have "f n \<noteq> 0"
  21.798 -  proof 
  21.799 -    assume "f n = 0"
  21.800 -    with dom have "findzero f n = n" by (simp add: findzero.psimps)
  21.801 -    with x_range show False by auto
  21.802 -  qed
  21.803 -  
  21.804 -  from x_range have "x = n \<or> x \<in> {Suc n ..< findzero f n}" by auto
  21.805 -  thus "f x \<noteq> 0"
  21.806 -  proof
  21.807 -    assume "x = n"
  21.808 -    with `f n \<noteq> 0` show ?thesis by simp
  21.809 -  next
  21.810 -    assume "x \<in> {Suc n ..< findzero f n}"
  21.811 -    with dom and `f n \<noteq> 0` have "x \<in> {Suc n ..< findzero f (Suc n)}" by (simp add: findzero.psimps)
  21.812 -    with IH and `f n \<noteq> 0`
  21.813 -    show ?thesis by simp
  21.814 -  qed
  21.815 -qed
  21.816 -text_raw {*
  21.817 -\isamarkupfalse\isabellestyle{tt}
  21.818 -\end{minipage}\vspace{6pt}\hrule
  21.819 -\caption{A proof about a partial function}\label{findzero_isar}
  21.820 -\end{figure}
  21.821 -*}
  21.822 -
  21.823 -subsection {* Partial termination proofs *}
  21.824 -
  21.825 -text {*
  21.826 -  Now that we have proved some interesting properties about our
  21.827 -  function, we should turn to the domain predicate and see if it is
  21.828 -  actually true for some values. Otherwise we would have just proved
  21.829 -  lemmas with @{term False} as a premise.
  21.830 -
  21.831 -  Essentially, we need some introduction rules for @{text
  21.832 -  findzero_dom}. The function package can prove such domain
  21.833 -  introduction rules automatically. But since they are not used very
  21.834 -  often (they are almost never needed if the function is total), this
  21.835 -  functionality is disabled by default for efficiency reasons. So we have to go
  21.836 -  back and ask for them explicitly by passing the @{text
  21.837 -  "(domintros)"} option to the function package:
  21.838 -
  21.839 -\vspace{1ex}
  21.840 -\noindent\cmd{function} @{text "(domintros) findzero :: \"(nat \<Rightarrow> nat) \<Rightarrow> nat \<Rightarrow> nat\""}\\%
  21.841 -\cmd{where}\isanewline%
  21.842 -\ \ \ldots\\
  21.843 -
  21.844 -  \noindent Now the package has proved an introduction rule for @{text findzero_dom}:
  21.845 -*}
  21.846 -
  21.847 -thm findzero.domintros
  21.848 -
  21.849 -text {*
  21.850 -  @{thm[display] findzero.domintros}
  21.851 -
  21.852 -  Domain introduction rules allow to show that a given value lies in the
  21.853 -  domain of a function, if the arguments of all recursive calls
  21.854 -  are in the domain as well. They allow to do a \qt{single step} in a
  21.855 -  termination proof. Usually, you want to combine them with a suitable
  21.856 -  induction principle.
  21.857 -
  21.858 -  Since our function increases its argument at recursive calls, we
  21.859 -  need an induction principle which works \qt{backwards}. We will use
  21.860 -  @{text inc_induct}, which allows to do induction from a fixed number
  21.861 -  \qt{downwards}:
  21.862 -
  21.863 -  \begin{center}@{thm inc_induct}\hfill(@{text "inc_induct"})\end{center}
  21.864 -
  21.865 -  Figure \ref{findzero_term} gives a detailed Isar proof of the fact
  21.866 -  that @{text findzero} terminates if there is a zero which is greater
  21.867 -  or equal to @{term n}. First we derive two useful rules which will
  21.868 -  solve the base case and the step case of the induction. The
  21.869 -  induction is then straightforward, except for the unusual induction
  21.870 -  principle.
  21.871 -
  21.872 -*}
  21.873 -
  21.874 -text_raw {*
  21.875 -\begin{figure}
  21.876 -\hrule\vspace{6pt}
  21.877 -\begin{minipage}{0.8\textwidth}
  21.878 -\isabellestyle{it}
  21.879 -\isastyle\isamarkuptrue
  21.880 -*}
  21.881 -lemma findzero_termination:
  21.882 -  assumes "x \<ge> n" and "f x = 0"
  21.883 -  shows "findzero_dom (f, n)"
  21.884 -proof - 
  21.885 -  have base: "findzero_dom (f, x)"
  21.886 -    by (rule findzero.domintros) (simp add:`f x = 0`)
  21.887 -
  21.888 -  have step: "\<And>i. findzero_dom (f, Suc i) 
  21.889 -    \<Longrightarrow> findzero_dom (f, i)"
  21.890 -    by (rule findzero.domintros) simp
  21.891 -
  21.892 -  from `x \<ge> n` show ?thesis
  21.893 -  proof (induct rule:inc_induct)
  21.894 -    show "findzero_dom (f, x)" by (rule base)
  21.895 -  next
  21.896 -    fix i assume "findzero_dom (f, Suc i)"
  21.897 -    thus "findzero_dom (f, i)" by (rule step)
  21.898 -  qed
  21.899 -qed      
  21.900 -text_raw {*
  21.901 -\isamarkupfalse\isabellestyle{tt}
  21.902 -\end{minipage}\vspace{6pt}\hrule
  21.903 -\caption{Termination proof for @{text findzero}}\label{findzero_term}
  21.904 -\end{figure}
  21.905 -*}
  21.906 -      
  21.907 -text {*
  21.908 -  Again, the proof given in Fig.~\ref{findzero_term} has a lot of
  21.909 -  detail in order to explain the principles. Using more automation, we
  21.910 -  can also have a short proof:
  21.911 -*}
  21.912 -
  21.913 -lemma findzero_termination_short:
  21.914 -  assumes zero: "x >= n" 
  21.915 -  assumes [simp]: "f x = 0"
  21.916 -  shows "findzero_dom (f, n)"
  21.917 -using zero
  21.918 -by (induct rule:inc_induct) (auto intro: findzero.domintros)
  21.919 -    
  21.920 -text {*
  21.921 -  \noindent It is simple to combine the partial correctness result with the
  21.922 -  termination lemma:
  21.923 -*}
  21.924 -
  21.925 -lemma findzero_total_correctness:
  21.926 -  "f x = 0 \<Longrightarrow> f (findzero f 0) = 0"
  21.927 -by (blast intro: findzero_zero findzero_termination)
  21.928 -
  21.929 -subsection {* Definition of the domain predicate *}
  21.930 -
  21.931 -text {*
  21.932 -  Sometimes it is useful to know what the definition of the domain
  21.933 -  predicate looks like. Actually, @{text findzero_dom} is just an
  21.934 -  abbreviation:
  21.935 -
  21.936 -  @{abbrev[display] findzero_dom}
  21.937 -
  21.938 -  The domain predicate is the \emph{accessible part} of a relation @{const
  21.939 -  findzero_rel}, which was also created internally by the function
  21.940 -  package. @{const findzero_rel} is just a normal
  21.941 -  inductive predicate, so we can inspect its definition by
  21.942 -  looking at the introduction rules @{text findzero_rel.intros}.
  21.943 -  In our case there is just a single rule:
  21.944 -
  21.945 -  @{thm[display] findzero_rel.intros}
  21.946 -
  21.947 -  The predicate @{const findzero_rel}
  21.948 -  describes the \emph{recursion relation} of the function
  21.949 -  definition. The recursion relation is a binary relation on
  21.950 -  the arguments of the function that relates each argument to its
  21.951 -  recursive calls. In general, there is one introduction rule for each
  21.952 -  recursive call.
  21.953 -
  21.954 -  The predicate @{term "accp findzero_rel"} is the accessible part of
  21.955 -  that relation. An argument belongs to the accessible part, if it can
  21.956 -  be reached in a finite number of steps (cf.~its definition in @{text
  21.957 -  "Wellfounded.thy"}).
  21.958 -
  21.959 -  Since the domain predicate is just an abbreviation, you can use
  21.960 -  lemmas for @{const accp} and @{const findzero_rel} directly. Some
  21.961 -  lemmas which are occasionally useful are @{text accpI}, @{text
  21.962 -  accp_downward}, and of course the introduction and elimination rules
  21.963 -  for the recursion relation @{text "findzero.intros"} and @{text "findzero.cases"}.
  21.964 -*}
  21.965 -
  21.966 -section {* Nested recursion *}
  21.967 -
  21.968 -text {*
  21.969 -  Recursive calls which are nested in one another frequently cause
  21.970 -  complications, since their termination proof can depend on a partial
  21.971 -  correctness property of the function itself. 
  21.972 -
  21.973 -  As a small example, we define the \qt{nested zero} function:
  21.974 -*}
  21.975 -
  21.976 -function nz :: "nat \<Rightarrow> nat"
  21.977 -where
  21.978 -  "nz 0 = 0"
  21.979 -| "nz (Suc n) = nz (nz n)"
  21.980 -by pat_completeness auto
  21.981 -
  21.982 -text {*
  21.983 -  If we attempt to prove termination using the identity measure on
  21.984 -  naturals, this fails:
  21.985 -*}
  21.986 -
  21.987 -termination
  21.988 -  apply (relation "measure (\<lambda>n. n)")
  21.989 -  apply auto
  21.990 -
  21.991 -txt {*
  21.992 -  We get stuck with the subgoal
  21.993 -
  21.994 -  @{subgoals[display]}
  21.995 -
  21.996 -  Of course this statement is true, since we know that @{const nz} is
  21.997 -  the zero function. And in fact we have no problem proving this
  21.998 -  property by induction.
  21.999 -*}
 21.1000 -(*<*)oops(*>*)
 21.1001 -lemma nz_is_zero: "nz_dom n \<Longrightarrow> nz n = 0"
 21.1002 -  by (induct rule:nz.pinduct) (auto simp: nz.psimps)
 21.1003 -
 21.1004 -text {*
 21.1005 -  We formulate this as a partial correctness lemma with the condition
 21.1006 -  @{term "nz_dom n"}. This allows us to prove it with the @{text
 21.1007 -  pinduct} rule before we have proved termination. With this lemma,
 21.1008 -  the termination proof works as expected:
 21.1009 -*}
 21.1010 -
 21.1011 -termination
 21.1012 -  by (relation "measure (\<lambda>n. n)") (auto simp: nz_is_zero)
 21.1013 -
 21.1014 -text {*
 21.1015 -  As a general strategy, one should prove the statements needed for
 21.1016 -  termination as a partial property first. Then they can be used to do
 21.1017 -  the termination proof. This also works for less trivial
 21.1018 -  examples. Figure \ref{f91} defines the 91-function, a well-known
 21.1019 -  challenge problem due to John McCarthy, and proves its termination.
 21.1020 -*}
 21.1021 -
 21.1022 -text_raw {*
 21.1023 -\begin{figure}
 21.1024 -\hrule\vspace{6pt}
 21.1025 -\begin{minipage}{0.8\textwidth}
 21.1026 -\isabellestyle{it}
 21.1027 -\isastyle\isamarkuptrue
 21.1028 -*}
 21.1029 -
 21.1030 -function f91 :: "nat \<Rightarrow> nat"
 21.1031 -where
 21.1032 -  "f91 n = (if 100 < n then n - 10 else f91 (f91 (n + 11)))"
 21.1033 -by pat_completeness auto
 21.1034 -
 21.1035 -lemma f91_estimate: 
 21.1036 -  assumes trm: "f91_dom n" 
 21.1037 -  shows "n < f91 n + 11"
 21.1038 -using trm by induct (auto simp: f91.psimps)
 21.1039 -
 21.1040 -termination
 21.1041 -proof
 21.1042 -  let ?R = "measure (\<lambda>x. 101 - x)"
 21.1043 -  show "wf ?R" ..
 21.1044 -
 21.1045 -  fix n :: nat assume "\<not> 100 < n" -- "Assumptions for both calls"
 21.1046 -
 21.1047 -  thus "(n + 11, n) \<in> ?R" by simp -- "Inner call"
 21.1048 -
 21.1049 -  assume inner_trm: "f91_dom (n + 11)" -- "Outer call"
 21.1050 -  with f91_estimate have "n + 11 < f91 (n + 11) + 11" .
 21.1051 -  with `\<not> 100 < n` show "(f91 (n + 11), n) \<in> ?R" by simp
 21.1052 -qed
 21.1053 -
 21.1054 -text_raw {*
 21.1055 -\isamarkupfalse\isabellestyle{tt}
 21.1056 -\end{minipage}
 21.1057 -\vspace{6pt}\hrule
 21.1058 -\caption{McCarthy's 91-function}\label{f91}
 21.1059 -\end{figure}
 21.1060 -*}
 21.1061 -
 21.1062 -
 21.1063 -section {* Higher-Order Recursion *}
 21.1064 -
 21.1065 -text {*
 21.1066 -  Higher-order recursion occurs when recursive calls
 21.1067 -  are passed as arguments to higher-order combinators such as @{const
 21.1068 -  map}, @{term filter} etc.
 21.1069 -  As an example, imagine a datatype of n-ary trees:
 21.1070 -*}
 21.1071 -
 21.1072 -datatype 'a tree = 
 21.1073 -  Leaf 'a 
 21.1074 -| Branch "'a tree list"
 21.1075 -
 21.1076 -
 21.1077 -text {* \noindent We can define a function which swaps the left and right subtrees recursively, using the 
 21.1078 -  list functions @{const rev} and @{const map}: *}
 21.1079 -
 21.1080 -fun mirror :: "'a tree \<Rightarrow> 'a tree"
 21.1081 -where
 21.1082 -  "mirror (Leaf n) = Leaf n"
 21.1083 -| "mirror (Branch l) = Branch (rev (map mirror l))"
 21.1084 -
 21.1085 -text {*
 21.1086 -  Although the definition is accepted without problems, let us look at the termination proof:
 21.1087 -*}
 21.1088 -
 21.1089 -termination proof
 21.1090 -  txt {*
 21.1091 -
 21.1092 -  As usual, we have to give a wellfounded relation, such that the
 21.1093 -  arguments of the recursive calls get smaller. But what exactly are
 21.1094 -  the arguments of the recursive calls when mirror is given as an
 21.1095 -  argument to @{const map}? Isabelle gives us the
 21.1096 -  subgoals
 21.1097 -
 21.1098 -  @{subgoals[display,indent=0]} 
 21.1099 -
 21.1100 -  So the system seems to know that @{const map} only
 21.1101 -  applies the recursive call @{term "mirror"} to elements
 21.1102 -  of @{term "l"}, which is essential for the termination proof.
 21.1103 -
 21.1104 -  This knowledge about @{const map} is encoded in so-called congruence rules,
 21.1105 -  which are special theorems known to the \cmd{function} command. The
 21.1106 -  rule for @{const map} is
 21.1107 -
 21.1108 -  @{thm[display] map_cong}
 21.1109 -
 21.1110 -  You can read this in the following way: Two applications of @{const
 21.1111 -  map} are equal, if the list arguments are equal and the functions
 21.1112 -  coincide on the elements of the list. This means that for the value 
 21.1113 -  @{term "map f l"} we only have to know how @{term f} behaves on
 21.1114 -  the elements of @{term l}.
 21.1115 -
 21.1116 -  Usually, one such congruence rule is
 21.1117 -  needed for each higher-order construct that is used when defining
 21.1118 -  new functions. In fact, even basic functions like @{const
 21.1119 -  If} and @{const Let} are handled by this mechanism. The congruence
 21.1120 -  rule for @{const If} states that the @{text then} branch is only
 21.1121 -  relevant if the condition is true, and the @{text else} branch only if it
 21.1122 -  is false:
 21.1123 -
 21.1124 -  @{thm[display] if_cong}
 21.1125 -  
 21.1126 -  Congruence rules can be added to the
 21.1127 -  function package by giving them the @{term fundef_cong} attribute.
 21.1128 -
 21.1129 -  The constructs that are predefined in Isabelle, usually
 21.1130 -  come with the respective congruence rules.
 21.1131 -  But if you define your own higher-order functions, you may have to
 21.1132 -  state and prove the required congruence rules yourself, if you want to use your
 21.1133 -  functions in recursive definitions. 
 21.1134 -*}
 21.1135 -(*<*)oops(*>*)
 21.1136 -
 21.1137 -subsection {* Congruence Rules and Evaluation Order *}
 21.1138 -
 21.1139 -text {* 
 21.1140 -  Higher order logic differs from functional programming languages in
 21.1141 -  that it has no built-in notion of evaluation order. A program is
 21.1142 -  just a set of equations, and it is not specified how they must be
 21.1143 -  evaluated. 
 21.1144 -
 21.1145 -  However for the purpose of function definition, we must talk about
 21.1146 -  evaluation order implicitly, when we reason about termination.
 21.1147 -  Congruence rules express that a certain evaluation order is
 21.1148 -  consistent with the logical definition. 
 21.1149 -
 21.1150 -  Consider the following function.
 21.1151 -*}
 21.1152 -
 21.1153 -function f :: "nat \<Rightarrow> bool"
 21.1154 -where
 21.1155 -  "f n = (n = 0 \<or> f (n - 1))"
 21.1156 -(*<*)by pat_completeness auto(*>*)
 21.1157 -
 21.1158 -text {*
 21.1159 -  For this definition, the termination proof fails. The default configuration
 21.1160 -  specifies no congruence rule for disjunction. We have to add a
 21.1161 -  congruence rule that specifies left-to-right evaluation order:
 21.1162 -
 21.1163 -  \vspace{1ex}
 21.1164 -  \noindent @{thm disj_cong}\hfill(@{text "disj_cong"})
 21.1165 -  \vspace{1ex}
 21.1166 -
 21.1167 -  Now the definition works without problems. Note how the termination
 21.1168 -  proof depends on the extra condition that we get from the congruence
 21.1169 -  rule.
 21.1170 -
 21.1171 -  However, as evaluation is not a hard-wired concept, we
 21.1172 -  could just turn everything around by declaring a different
 21.1173 -  congruence rule. Then we can make the reverse definition:
 21.1174 -*}
 21.1175 -
 21.1176 -lemma disj_cong2[fundef_cong]: 
 21.1177 -  "(\<not> Q' \<Longrightarrow> P = P') \<Longrightarrow> (Q = Q') \<Longrightarrow> (P \<or> Q) = (P' \<or> Q')"
 21.1178 -  by blast
 21.1179 -
 21.1180 -fun f' :: "nat \<Rightarrow> bool"
 21.1181 -where
 21.1182 -  "f' n = (f' (n - 1) \<or> n = 0)"
 21.1183 -
 21.1184 -text {*
 21.1185 -  \noindent These examples show that, in general, there is no \qt{best} set of
 21.1186 -  congruence rules.
 21.1187 -
 21.1188 -  However, such tweaking should rarely be necessary in
 21.1189 -  practice, as most of the time, the default set of congruence rules
 21.1190 -  works well.
 21.1191 -*}
 21.1192 -
 21.1193 -end
    22.1 --- a/doc-src/Functions/document/build	Tue Aug 28 18:46:15 2012 +0200
    22.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.3 @@ -1,14 +0,0 @@
    22.4 -#!/bin/bash
    22.5 -
    22.6 -set -e
    22.7 -
    22.8 -FORMAT="$1"
    22.9 -VARIANT="$2"
   22.10 -
   22.11 -cp "$ISABELLE_HOME/doc-src/iman.sty" .
   22.12 -cp "$ISABELLE_HOME/doc-src/extra.sty" .
   22.13 -cp "$ISABELLE_HOME/doc-src/isar.sty" .
   22.14 -cp "$ISABELLE_HOME/doc-src/manual.bib" .
   22.15 -
   22.16 -"$ISABELLE_HOME/doc-src/prepare_document" "$FORMAT"
   22.17 -
    23.1 --- a/doc-src/Functions/document/conclusion.tex	Tue Aug 28 18:46:15 2012 +0200
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,7 +0,0 @@
    23.4 -\section{Conclusion}
    23.5 -
    23.6 -\fixme{}
    23.7 -
    23.8 -
    23.9 -
   23.10 -
    24.1 --- a/doc-src/Functions/document/intro.tex	Tue Aug 28 18:46:15 2012 +0200
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,55 +0,0 @@
    24.4 -\section{Introduction}
    24.5 -
    24.6 -Starting from Isabelle 2007, new facilities for recursive
    24.7 -function definitions~\cite{krauss2006} are available. They provide
    24.8 -better support for general recursive definitions than previous
    24.9 -packages.  But despite all tool support, function definitions can
   24.10 -sometimes be a difficult thing. 
   24.11 -
   24.12 -This tutorial is an example-guided introduction to the practical use
   24.13 -of the package and related tools. It should help you get started with
   24.14 -defining functions quickly. For the more difficult definitions we will
   24.15 -discuss what problems can arise, and how they can be solved.
   24.16 -
   24.17 -We assume that you have mastered the fundamentals of Isabelle/HOL
   24.18 -and are able to write basic specifications and proofs. To start out
   24.19 -with Isabelle in general, consult the Isabelle/HOL tutorial
   24.20 -\cite{isa-tutorial}.
   24.21 -
   24.22 -
   24.23 -
   24.24 -\paragraph{Structure of this tutorial.}
   24.25 -Section 2 introduces the syntax and basic operation of the \cmd{fun}
   24.26 -command, which provides full automation with reasonable default
   24.27 -behavior.  The impatient reader can stop after that
   24.28 -section, and consult the remaining sections only when needed.
   24.29 -Section 3 introduces the more verbose \cmd{function} command which
   24.30 -gives fine-grained control. This form should be used
   24.31 -whenever the short form fails.
   24.32 -After that we discuss more specialized issues:
   24.33 -termination, mutual, nested and higher-order recursion, partiality, pattern matching
   24.34 -and others.
   24.35 -
   24.36 -
   24.37 -\paragraph{Some background.}
   24.38 -Following the LCF tradition, the package is realized as a definitional
   24.39 -extension: Recursive definitions are internally transformed into a
   24.40 -non-recursive form, such that the function can be defined using
   24.41 -standard definition facilities. Then the recursive specification is
   24.42 -derived from the primitive definition.  This is a complex task, but it
   24.43 -is fully automated and mostly transparent to the user. Definitional
   24.44 -extensions are valuable because they are conservative by construction:
   24.45 -The \qt{new} concept of general wellfounded recursion is completely reduced
   24.46 -to existing principles.
   24.47 -
   24.48 -
   24.49 -
   24.50 -
   24.51 -The new \cmd{function} command, and its short form \cmd{fun} have mostly
   24.52 -replaced the traditional \cmd{recdef} command \cite{slind-tfl}. They solve
   24.53 -a few of technical issues around \cmd{recdef}, and allow definitions
   24.54 -which were not previously possible.
   24.55 -
   24.56 -
   24.57 -
   24.58 -
    25.1 --- a/doc-src/Functions/document/mathpartir.sty	Tue Aug 28 18:46:15 2012 +0200
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,421 +0,0 @@
    25.4 -%  Mathpartir --- Math Paragraph for Typesetting Inference Rules
    25.5 -%
    25.6 -%  Copyright (C) 2001, 2002, 2003, 2004, 2005 Didier Rémy
    25.7 -%
    25.8 -%  Author         : Didier Remy 
    25.9 -%  Version        : 1.2.0
   25.10 -%  Bug Reports    : to author
   25.11 -%  Web Site       : http://pauillac.inria.fr/~remy/latex/
   25.12 -% 
   25.13 -%  Mathpartir is free software; you can redistribute it and/or modify
   25.14 -%  it under the terms of the GNU General Public License as published by
   25.15 -%  the Free Software Foundation; either version 2, or (at your option)
   25.16 -%  any later version.
   25.17 -%  
   25.18 -%  Mathpartir is distributed in the hope that it will be useful,
   25.19 -%  but WITHOUT ANY WARRANTY; without even the implied warranty of
   25.20 -%  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   25.21 -%  GNU General Public License for more details 
   25.22 -%  (http://pauillac.inria.fr/~remy/license/GPL).
   25.23 -%
   25.24 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
   25.25 -%  File mathpartir.sty (LaTeX macros)
   25.26 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
   25.27 -
   25.28 -\NeedsTeXFormat{LaTeX2e}
   25.29 -\ProvidesPackage{mathpartir}
   25.30 -    [2005/12/20 version 1.2.0 Math Paragraph for Typesetting Inference Rules]
   25.31 -
   25.32 -%%
   25.33 -
   25.34 -%% Identification
   25.35 -%% Preliminary declarations
   25.36 -
   25.37 -\RequirePackage {keyval}
   25.38 -
   25.39 -%% Options
   25.40 -%% More declarations
   25.41 -
   25.42 -%% PART I: Typesetting maths in paragraphe mode
   25.43 -
   25.44 -\newdimen \mpr@tmpdim
   25.45 -
   25.46 -% To ensure hevea \hva compatibility, \hva should expands to nothing 
   25.47 -% in mathpar or in inferrule
   25.48 -\let \mpr@hva \empty
   25.49 -
   25.50 -%% normal paragraph parametters, should rather be taken dynamically
   25.51 -\def \mpr@savepar {%
   25.52 -  \edef \MathparNormalpar
   25.53 -     {\noexpand \lineskiplimit \the\lineskiplimit
   25.54 -      \noexpand \lineskip \the\lineskip}%
   25.55 -  }
   25.56 -
   25.57 -\def \mpr@rulelineskip {\lineskiplimit=0.3em\lineskip=0.2em plus 0.1em}
   25.58 -\def \mpr@lesslineskip {\lineskiplimit=0.6em\lineskip=0.5em plus 0.2em}
   25.59 -\def \mpr@lineskip  {\lineskiplimit=1.2em\lineskip=1.2em plus 0.2em}
   25.60 -\let \MathparLineskip \mpr@lineskip
   25.61 -\def \mpr@paroptions {\MathparLineskip}
   25.62 -\let \mpr@prebindings \relax
   25.63 -
   25.64 -\newskip \mpr@andskip \mpr@andskip 2em plus 0.5fil minus 0.5em
   25.65 -
   25.66 -\def \mpr@goodbreakand
   25.67 -   {\hskip -\mpr@andskip  \penalty -1000\hskip \mpr@andskip}
   25.68 -\def \mpr@and {\hskip \mpr@andskip}
   25.69 -\def \mpr@andcr {\penalty 50\mpr@and}
   25.70 -\def \mpr@cr {\penalty -10000\mpr@and}
   25.71 -\def \mpr@eqno #1{\mpr@andcr #1\hskip 0em plus -1fil \penalty 10}
   25.72 -
   25.73 -\def \mpr@bindings {%
   25.74 -  \let \and \mpr@andcr
   25.75 -  \let \par \mpr@andcr
   25.76 -  \let \\\mpr@cr
   25.77 -  \let \eqno \mpr@eqno
   25.78 -  \let \hva \mpr@hva
   25.79 -  } 
   25.80 -\let \MathparBindings \mpr@bindings
   25.81 -
   25.82 -% \@ifundefined {ignorespacesafterend}
   25.83 -%    {\def \ignorespacesafterend {\aftergroup \ignorespaces}
   25.84 -
   25.85 -\newenvironment{mathpar}[1][]
   25.86 -  {$$\mpr@savepar \parskip 0em \hsize \linewidth \centering
   25.87 -     \vbox \bgroup \mpr@prebindings \mpr@paroptions #1\ifmmode $\else
   25.88 -     \noindent $\displaystyle\fi
   25.89 -     \MathparBindings}
   25.90 -  {\unskip \ifmmode $\fi\egroup $$\ignorespacesafterend}
   25.91 -
   25.92 -% \def \math@mathpar #1{\setbox0 \hbox {$\displaystyle #1$}\ifnum
   25.93 -%     \wd0 < \hsize  $$\box0$$\else \bmathpar #1\emathpar \fi}
   25.94 -
   25.95 -%%% HOV BOXES
   25.96 -
   25.97 -\def \mathvbox@ #1{\hbox \bgroup \mpr@normallineskip 
   25.98 -  \vbox \bgroup \tabskip 0em \let \\ \cr
   25.99 -  \halign \bgroup \hfil $##$\hfil\cr #1\crcr \egroup \egroup
  25.100 -  \egroup}
  25.101 -
  25.102 -\def \mathhvbox@ #1{\setbox0 \hbox {\let \\\qquad $#1$}\ifnum \wd0 < \hsize
  25.103 -      \box0\else \mathvbox {#1}\fi}
  25.104 -
  25.105 -
  25.106 -%% Part II -- operations on lists
  25.107 -
  25.108 -\newtoks \mpr@lista
  25.109 -\newtoks \mpr@listb
  25.110 -
  25.111 -\long \def\mpr@cons #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
  25.112 -{#2}\edef #2{\the \mpr@lista \the \mpr@listb}}
  25.113 -
  25.114 -\long \def\mpr@snoc #1\mpr@to#2{\mpr@lista {\\{#1}}\mpr@listb \expandafter
  25.115 -{#2}\edef #2{\the \mpr@listb\the\mpr@lista}}
  25.116 -
  25.117 -\long \def \mpr@concat#1=#2\mpr@to#3{\mpr@lista \expandafter {#2}\mpr@listb
  25.118 -\expandafter {#3}\edef #1{\the \mpr@listb\the\mpr@lista}}
  25.119 -
  25.120 -\def \mpr@head #1\mpr@to #2{\expandafter \mpr@head@ #1\mpr@head@ #1#2}
  25.121 -\long \def \mpr@head@ #1#2\mpr@head@ #3#4{\def #4{#1}\def#3{#2}}
  25.122 -
  25.123 -\def \mpr@flatten #1\mpr@to #2{\expandafter \mpr@flatten@ #1\mpr@flatten@ #1#2}
  25.124 -\long \def \mpr@flatten@ \\#1\\#2\mpr@flatten@ #3#4{\def #4{#1}\def #3{\\#2}}
  25.125 -
  25.126 -\def \mpr@makelist #1\mpr@to #2{\def \mpr@all {#1}%
  25.127 -   \mpr@lista {\\}\mpr@listb \expandafter {\mpr@all}\edef \mpr@all {\the
  25.128 -   \mpr@lista \the \mpr@listb \the \mpr@lista}\let #2\empty 
  25.129 -   \def \mpr@stripof ##1##2\mpr@stripend{\def \mpr@stripped{##2}}\loop
  25.130 -     \mpr@flatten \mpr@all \mpr@to \mpr@one
  25.131 -     \expandafter \mpr@snoc \mpr@one \mpr@to #2\expandafter \mpr@stripof
  25.132 -     \mpr@all \mpr@stripend  
  25.133 -     \ifx \mpr@stripped \empty \let \mpr@isempty 0\else \let \mpr@isempty 1\fi
  25.134 -     \ifx 1\mpr@isempty
  25.135 -   \repeat
  25.136 -}
  25.137 -
  25.138 -\def \mpr@rev #1\mpr@to #2{\let \mpr@tmp \empty
  25.139 -   \def \\##1{\mpr@cons ##1\mpr@to \mpr@tmp}#1\let #2\mpr@tmp}
  25.140 -
  25.141 -%% Part III -- Type inference rules
  25.142 -
  25.143 -\newif \if@premisse
  25.144 -\newbox \mpr@hlist
  25.145 -\newbox \mpr@vlist
  25.146 -\newif \ifmpr@center \mpr@centertrue
  25.147 -\def \mpr@htovlist {%
  25.148 -   \setbox \mpr@hlist
  25.149 -      \hbox {\strut
  25.150 -             \ifmpr@center \hskip -0.5\wd\mpr@hlist\fi
  25.151 -             \unhbox \mpr@hlist}%
  25.152 -   \setbox \mpr@vlist
  25.153 -      \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
  25.154 -             \else \unvbox \mpr@vlist \box \mpr@hlist
  25.155 -             \fi}%
  25.156 -}
  25.157 -% OLD version
  25.158 -% \def \mpr@htovlist {%
  25.159 -%    \setbox \mpr@hlist
  25.160 -%       \hbox {\strut \hskip -0.5\wd\mpr@hlist \unhbox \mpr@hlist}%
  25.161 -%    \setbox \mpr@vlist
  25.162 -%       \vbox {\if@premisse  \box \mpr@hlist \unvbox \mpr@vlist
  25.163 -%              \else \unvbox \mpr@vlist \box \mpr@hlist
  25.164 -%              \fi}%
  25.165 -% }
  25.166 -
  25.167 -\def \mpr@item #1{$\displaystyle #1$}
  25.168 -\def \mpr@sep{2em}
  25.169 -\def \mpr@blank { }
  25.170 -\def \mpr@hovbox #1#2{\hbox
  25.171 -  \bgroup
  25.172 -  \ifx #1T\@premissetrue
  25.173 -  \else \ifx #1B\@premissefalse
  25.174 -  \else
  25.175 -     \PackageError{mathpartir}
  25.176 -       {Premisse orientation should either be T or B}
  25.177 -       {Fatal error in Package}%
  25.178 -  \fi \fi
  25.179 -  \def \@test {#2}\ifx \@test \mpr@blank\else
  25.180 -  \setbox \mpr@hlist \hbox {}%
  25.181 -  \setbox \mpr@vlist \vbox {}%
  25.182 -  \if@premisse \let \snoc \mpr@cons \else \let \snoc \mpr@snoc \fi
  25.183 -  \let \@hvlist \empty \let \@rev \empty
  25.184 -  \mpr@tmpdim 0em
  25.185 -  \expandafter \mpr@makelist #2\mpr@to \mpr@flat
  25.186 -  \if@premisse \mpr@rev \mpr@flat \mpr@to \@rev \else \let \@rev \mpr@flat \fi
  25.187 -  \def \\##1{%
  25.188 -     \def \@test {##1}\ifx \@test \empty
  25.189 -        \mpr@htovlist
  25.190 -        \mpr@tmpdim 0em %%% last bug fix not extensively checked
  25.191 -     \else
  25.192 -      \setbox0 \hbox{\mpr@item {##1}}\relax
  25.193 -      \advance \mpr@tmpdim by \wd0
  25.194 -      %\mpr@tmpdim 1.02\mpr@tmpdim
  25.195 -      \ifnum \mpr@tmpdim < \hsize
  25.196 -         \ifnum \wd\mpr@hlist > 0
  25.197 -           \if@premisse
  25.198 -             \setbox \mpr@hlist 
  25.199 -                \hbox {\unhbox0 \hskip \mpr@sep \unhbox \mpr@hlist}%
  25.200 -           \else
  25.201 -             \setbox \mpr@hlist
  25.202 -                \hbox {\unhbox \mpr@hlist  \hskip \mpr@sep \unhbox0}%
  25.203 -           \fi
  25.204 -         \else 
  25.205 -         \setbox \mpr@hlist \hbox {\unhbox0}%
  25.206 -         \fi
  25.207 -      \else
  25.208 -         \ifnum \wd \mpr@hlist > 0
  25.209 -            \mpr@htovlist 
  25.210 -            \mpr@tmpdim \wd0
  25.211 -         \fi
  25.212 -         \setbox \mpr@hlist \hbox {\unhbox0}%
  25.213 -      \fi
  25.214 -      \advance \mpr@tmpdim by \mpr@sep
  25.215 -   \fi
  25.216 -   }%
  25.217 -   \@rev
  25.218 -   \mpr@htovlist
  25.219 -   \ifmpr@center \hskip \wd\mpr@vlist\fi \box \mpr@vlist
  25.220 -   \fi
  25.221 -   \egroup
  25.222 -}
  25.223 -
  25.224 -%%% INFERENCE RULES
  25.225 -
  25.226 -\@ifundefined{@@over}{%
  25.227 -    \let\@@over\over % fallback if amsmath is not loaded
  25.228 -    \let\@@overwithdelims\overwithdelims
  25.229 -    \let\@@atop\atop \let\@@atopwithdelims\atopwithdelims
  25.230 -    \let\@@above\above \let\@@abovewithdelims\abovewithdelims
  25.231 -  }{}
  25.232 -
  25.233 -%% The default
  25.234 -
  25.235 -\def \mpr@@fraction #1#2{\hbox {\advance \hsize by -0.5em
  25.236 -    $\displaystyle {#1\mpr@over #2}$}}
  25.237 -\let \mpr@fraction \mpr@@fraction
  25.238 -
  25.239 -%% A generic solution to arrow
  25.240 -
  25.241 -\def \mpr@make@fraction #1#2#3#4#5{\hbox {%
  25.242 -     \def \mpr@tail{#1}%
  25.243 -     \def \mpr@body{#2}%
  25.244 -     \def \mpr@head{#3}%
  25.245 -     \setbox1=\hbox{$#4$}\setbox2=\hbox{$#5$}%
  25.246 -     \setbox3=\hbox{$\mkern -3mu\mpr@body\mkern -3mu$}%
  25.247 -     \setbox3=\hbox{$\mkern -3mu \mpr@body\mkern -3mu$}%
  25.248 -     \dimen0=\dp1\advance\dimen0 by \ht3\relax\dp1\dimen0\relax
  25.249 -     \dimen0=\ht2\advance\dimen0 by \dp3\relax\ht2\dimen0\relax
  25.250 -     \setbox0=\hbox {$\box1 \@@atop \box2$}%
  25.251 -     \dimen0=\wd0\box0
  25.252 -     \box0 \hskip -\dimen0\relax
  25.253 -     \hbox to \dimen0 {$%
  25.254 -       \mathrel{\mpr@tail}\joinrel
  25.255 -       \xleaders\hbox{\copy3}\hfil\joinrel\mathrel{\mpr@head}%
  25.256 -     $}}}
  25.257 -
  25.258 -%% Old stuff should be removed in next version
  25.259 -\def \mpr@@reduce #1#2{\hbox
  25.260 -    {$\lower 0.01pt \mpr@@fraction {#1}{#2}\mkern -15mu\rightarrow$}}
  25.261 -\def \mpr@@rewrite #1#2#3{\hbox
  25.262 -    {$\lower 0.01pt \mpr@@fraction {#2}{#3}\mkern -8mu#1$}}
  25.263 -\def \mpr@infercenter #1{\vcenter {\mpr@hovbox{T}{#1}}}
  25.264 -
  25.265 -\def \mpr@empty {}
  25.266 -\def \mpr@inferrule
  25.267 -  {\bgroup
  25.268 -     \ifnum \linewidth<\hsize \hsize \linewidth\fi
  25.269 -     \mpr@rulelineskip
  25.270 -     \let \and \qquad
  25.271 -     \let \hva \mpr@hva
  25.272 -     \let \@rulename \mpr@empty
  25.273 -     \let \@rule@options \mpr@empty
  25.274 -     \let \mpr@over \@@over
  25.275 -     \mpr@inferrule@}
  25.276 -\newcommand {\mpr@inferrule@}[3][]
  25.277 -  {\everymath={\displaystyle}%       
  25.278 -   \def \@test {#2}\ifx \empty \@test
  25.279 -      \setbox0 \hbox {$\vcenter {\mpr@hovbox{B}{#3}}$}%
  25.280 -   \else 
  25.281 -   \def \@test {#3}\ifx \empty \@test
  25.282 -      \setbox0 \hbox {$\vcenter {\mpr@hovbox{T}{#2}}$}%
  25.283 -   \else
  25.284 -   \setbox0 \mpr@fraction {\mpr@hovbox{T}{#2}}{\mpr@hovbox{B}{#3}}%
  25.285 -   \fi \fi
  25.286 -   \def \@test {#1}\ifx \@test\empty \box0
  25.287 -   \else \vbox 
  25.288 -%%% Suggestion de Francois pour les etiquettes longues
  25.289 -%%%   {\hbox to \wd0 {\RefTirName {#1}\hfil}\box0}\fi
  25.290 -      {\hbox {\RefTirName {#1}}\box0}\fi
  25.291 -   \egroup}
  25.292 -
  25.293 -\def \mpr@vdotfil #1{\vbox to #1{\leaders \hbox{$\cdot$} \vfil}}
  25.294 -
  25.295 -% They are two forms
  25.296 -% \inferrule [label]{[premisses}{conclusions}
  25.297 -% or
  25.298 -% \inferrule* [options]{[premisses}{conclusions}
  25.299 -%
  25.300 -% Premisses and conclusions are lists of elements separated by \\
  25.301 -% Each \\ produces a break, attempting horizontal breaks if possible, 
  25.302 -% and  vertical breaks if needed. 
  25.303 -% 
  25.304 -% An empty element obtained by \\\\ produces a vertical break in all cases. 
  25.305 -%
  25.306 -% The former rule is aligned on the fraction bar. 
  25.307 -% The optional label appears on top of the rule
  25.308 -% The second form to be used in a derivation tree is aligned on the last
  25.309 -% line of its conclusion
  25.310 -% 
  25.311 -% The second form can be parameterized, using the key=val interface. The
  25.312 -% folloiwng keys are recognized:
  25.313 -%       
  25.314 -%  width                set the width of the rule to val
  25.315 -%  narrower             set the width of the rule to val\hsize
  25.316 -%  before               execute val at the beginning/left
  25.317 -%  lab                  put a label [Val] on top of the rule
  25.318 -%  lskip                add negative skip on the right
  25.319 -%  left                 put a left label [Val]
  25.320 -%  Left                 put a left label [Val],  ignoring its width 
  25.321 -%  right                put a right label [Val]
  25.322 -%  Right                put a right label [Val], ignoring its width
  25.323 -%  leftskip             skip negative space on the left-hand side
  25.324 -%  rightskip            skip negative space on the right-hand side
  25.325 -%  vdots                lift the rule by val and fill vertical space with dots
  25.326 -%  after                execute val at the end/right
  25.327 -%  
  25.328 -%  Note that most options must come in this order to avoid strange
  25.329 -%  typesetting (in particular  leftskip must preceed left and Left and
  25.330 -%  rightskip must follow Right or right; vdots must come last 
  25.331 -%  or be only followed by rightskip. 
  25.332 -%  
  25.333 -
  25.334 -%% Keys that make sence in all kinds of rules
  25.335 -\def \mprset #1{\setkeys{mprset}{#1}}
  25.336 -\define@key {mprset}{flushleft}[]{\mpr@centerfalse}
  25.337 -\define@key {mprset}{center}[]{\mpr@centertrue}
  25.338 -\define@key {mprset}{rewrite}[]{\let \mpr@fraction \mpr@@rewrite}
  25.339 -\define@key {mprset}{myfraction}[]{\let \mpr@fraction #1}
  25.340 -\define@key {mprset}{fraction}[]{\def \mpr@fraction {\mpr@make@fraction #1}}
  25.341 -
  25.342 -\newbox \mpr@right
  25.343 -\define@key {mpr}{flushleft}[]{\mpr@centerfalse}
  25.344 -\define@key {mpr}{center}[]{\mpr@centertrue}
  25.345 -\define@key {mpr}{rewrite}[]{\let \mpr@fraction \mpr@@rewrite}
  25.346 -\define@key {mpr}{myfraction}[]{\let \mpr@fraction #1}
  25.347 -\define@key {mpr}{fraction}[]{\def \mpr@fraction {\mpr@make@fraction #1}}
  25.348 -\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
  25.349 -     \advance \hsize by -\wd0\box0}
  25.350 -\define@key {mpr}{width}{\hsize #1}
  25.351 -\define@key {mpr}{sep}{\def\mpr@sep{#1}}
  25.352 -\define@key {mpr}{before}{#1}
  25.353 -\define@key {mpr}{lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
  25.354 -\define@key {mpr}{Lab}{\let \RefTirName \TirName \def \mpr@rulename {#1}}
  25.355 -\define@key {mpr}{narrower}{\hsize #1\hsize}
  25.356 -\define@key {mpr}{leftskip}{\hskip -#1}
  25.357 -\define@key {mpr}{reduce}[]{\let \mpr@fraction \mpr@@reduce}
  25.358 -\define@key {mpr}{rightskip}
  25.359 -  {\setbox \mpr@right \hbox {\unhbox \mpr@right \hskip -#1}}
  25.360 -\define@key {mpr}{LEFT}{\setbox0 \hbox {$#1$}\relax
  25.361 -     \advance \hsize by -\wd0\box0}
  25.362 -\define@key {mpr}{left}{\setbox0 \hbox {$\TirName {#1}\;$}\relax
  25.363 -     \advance \hsize by -\wd0\box0}
  25.364 -\define@key {mpr}{Left}{\llap{$\TirName {#1}\;$}}
  25.365 -\define@key {mpr}{right}
  25.366 -  {\setbox0 \hbox {$\;\TirName {#1}$}\relax \advance \hsize by -\wd0
  25.367 -   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
  25.368 -\define@key {mpr}{RIGHT}
  25.369 -  {\setbox0 \hbox {$#1$}\relax \advance \hsize by -\wd0
  25.370 -   \setbox \mpr@right \hbox {\unhbox \mpr@right \unhbox0}}
  25.371 -\define@key {mpr}{Right}
  25.372 -  {\setbox \mpr@right \hbox {\unhbox \mpr@right \rlap {$\;\TirName {#1}$}}}
  25.373 -\define@key {mpr}{vdots}{\def \mpr@vdots {\@@atop \mpr@vdotfil{#1}}}
  25.374 -\define@key {mpr}{after}{\edef \mpr@after {\mpr@after #1}}
  25.375 -
  25.376 -\newdimen \rule@dimen
  25.377 -\newcommand \mpr@inferstar@ [3][]{\setbox0
  25.378 -  \hbox {\let \mpr@rulename \mpr@empty \let \mpr@vdots \relax
  25.379 -         \setbox \mpr@right \hbox{}%
  25.380 -         $\setkeys{mpr}{#1}%
  25.381 -          \ifx \mpr@rulename \mpr@empty \mpr@inferrule {#2}{#3}\else
  25.382 -          \mpr@inferrule [{\mpr@rulename}]{#2}{#3}\fi
  25.383 -          \box \mpr@right \mpr@vdots$}
  25.384 -  \setbox1 \hbox {\strut}
  25.385 -  \rule@dimen \dp0 \advance \rule@dimen by -\dp1
  25.386 -  \raise \rule@dimen \box0}
  25.387 -
  25.388 -\def \mpr@infer {\@ifnextchar *{\mpr@inferstar}{\mpr@inferrule}}
  25.389 -\newcommand \mpr@err@skipargs[3][]{}
  25.390 -\def \mpr@inferstar*{\ifmmode 
  25.391 -    \let \@do \mpr@inferstar@
  25.392 -  \else 
  25.393 -    \let \@do \mpr@err@skipargs
  25.394 -    \PackageError {mathpartir}
  25.395 -      {\string\inferrule* can only be used in math mode}{}%
  25.396 -  \fi \@do}
  25.397 -
  25.398 -
  25.399 -%%% Exports
  25.400 -
  25.401 -% Envirnonment mathpar
  25.402 -
  25.403 -\let \inferrule \mpr@infer
  25.404 -
  25.405 -% make a short name \infer is not already defined
  25.406 -\@ifundefined {infer}{\let \infer \mpr@infer}{}
  25.407 -
  25.408 -\def \TirNameStyle #1{\small \textsc{#1}}
  25.409 -\def \tir@name #1{\hbox {\small \TirNameStyle{#1}}}
  25.410 -\let \TirName \tir@name
  25.411 -\let \DefTirName \TirName
  25.412 -\let \RefTirName \TirName
  25.413 -
  25.414 -%%% Other Exports
  25.415 -
  25.416 -% \let \listcons \mpr@cons
  25.417 -% \let \listsnoc \mpr@snoc
  25.418 -% \let \listhead \mpr@head
  25.419 -% \let \listmake \mpr@makelist
  25.420 -
  25.421 -
  25.422 -
  25.423 -
  25.424 -\endinput
    26.1 --- a/doc-src/Functions/document/root.tex	Tue Aug 28 18:46:15 2012 +0200
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,90 +0,0 @@
    26.4 -
    26.5 -\documentclass[a4paper,fleqn]{article}
    26.6 -
    26.7 -\usepackage{latexsym,graphicx}
    26.8 -\usepackage[refpage]{nomencl}
    26.9 -\usepackage{iman,extra,isar}
   26.10 -\usepackage{isabelle,isabellesym}
   26.11 -\usepackage{style}
   26.12 -\usepackage{mathpartir}
   26.13 -\usepackage{amsthm}
   26.14 -\usepackage{pdfsetup}
   26.15 -
   26.16 -\newcommand{\cmd}[1]{\isacommand{#1}}
   26.17 -
   26.18 -\newcommand{\isasymINFIX}{\cmd{infix}}
   26.19 -\newcommand{\isasymLOCALE}{\cmd{locale}}
   26.20 -\newcommand{\isasymINCLUDES}{\cmd{includes}}
   26.21 -\newcommand{\isasymDATATYPE}{\cmd{datatype}}
   26.22 -\newcommand{\isasymDEFINES}{\cmd{defines}}
   26.23 -\newcommand{\isasymNOTES}{\cmd{notes}}
   26.24 -\newcommand{\isasymCLASS}{\cmd{class}}
   26.25 -\newcommand{\isasymINSTANCE}{\cmd{instance}}
   26.26 -\newcommand{\isasymLEMMA}{\cmd{lemma}}
   26.27 -\newcommand{\isasymPROOF}{\cmd{proof}}
   26.28 -\newcommand{\isasymQED}{\cmd{qed}}
   26.29 -\newcommand{\isasymFIX}{\cmd{fix}}
   26.30 -\newcommand{\isasymASSUME}{\cmd{assume}}
   26.31 -\newcommand{\isasymSHOW}{\cmd{show}}
   26.32 -\newcommand{\isasymNOTE}{\cmd{note}}
   26.33 -\newcommand{\isasymCODEGEN}{\cmd{code\_gen}}
   26.34 -\newcommand{\isasymPRINTCODETHMS}{\cmd{print\_codethms}}
   26.35 -\newcommand{\isasymFUN}{\cmd{fun}}
   26.36 -\newcommand{\isasymFUNCTION}{\cmd{function}}
   26.37 -\newcommand{\isasymPRIMREC}{\cmd{primrec}}
   26.38 -\newcommand{\isasymRECDEF}{\cmd{recdef}}
   26.39 -
   26.40 -\newcommand{\qt}[1]{``#1''}
   26.41 -\newcommand{\qtt}[1]{"{}{#1}"{}}
   26.42 -\newcommand{\qn}[1]{\emph{#1}}
   26.43 -\newcommand{\strong}[1]{{\bfseries #1}}
   26.44 -\newcommand{\fixme}[1][!]{\strong{FIXME: #1}}
   26.45 -
   26.46 -\newtheorem{exercise}{Exercise}{\bf}{\itshape}
   26.47 -%\newtheorem*{thmstar}{Theorem}{\bf}{\itshape}
   26.48 -
   26.49 -\hyphenation{Isabelle}
   26.50 -\hyphenation{Isar}
   26.51 -
   26.52 -\isadroptag{theory}
   26.53 -\title{Defining Recursive Functions in Isabelle/HOL}
   26.54 -\author{Alexander Krauss}
   26.55 -
   26.56 -\isabellestyle{tt}
   26.57 -\renewcommand{\isastyletxt}{\isastyletext}% use same formatting for txt and text
   26.58 -
   26.59 -\begin{document}
   26.60 -
   26.61 -\date{\ \\}
   26.62 -\maketitle
   26.63 -
   26.64 -\begin{abstract}
   26.65 -  This tutorial describes the use of the new \emph{function} package,
   26.66 -	which provides general recursive function definitions for Isabelle/HOL.
   26.67 -	We start with very simple examples and then gradually move on to more
   26.68 -	advanced topics such as manual termination proofs, nested recursion,
   26.69 -	partiality, tail recursion and congruence rules.
   26.70 -\end{abstract}
   26.71 -
   26.72 -%\thispagestyle{empty}\clearpage
   26.73 -
   26.74 -%\pagenumbering{roman}
   26.75 -%\clearfirst
   26.76 -
   26.77 -\input{intro.tex}
   26.78 -\input{Functions.tex}
   26.79 -%\input{conclusion.tex}
   26.80 -
   26.81 -\begingroup
   26.82 -%\tocentry{\bibname}
   26.83 -\bibliographystyle{plain} \small\raggedright\frenchspacing
   26.84 -\bibliography{manual}
   26.85 -\endgroup
   26.86 -
   26.87 -\end{document}
   26.88 -
   26.89 -
   26.90 -%%% Local Variables: 
   26.91 -%%% mode: latex
   26.92 -%%% TeX-master: t
   26.93 -%%% End: 
    27.1 --- a/doc-src/Functions/document/style.sty	Tue Aug 28 18:46:15 2012 +0200
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,46 +0,0 @@
    27.4 -%% toc
    27.5 -\newcommand{\tocentry}[1]{\cleardoublepage\phantomsection\addcontentsline{toc}{chapter}{#1}
    27.6 -\@mkboth{\MakeUppercase{#1}}{\MakeUppercase{#1}}}
    27.7 -
    27.8 -%% references
    27.9 -\newcommand{\secref}[1]{\S\ref{#1}}
   27.10 -\newcommand{\chref}[1]{chapter~\ref{#1}}
   27.11 -\newcommand{\figref}[1]{figure~\ref{#1}}
   27.12 -
   27.13 -%% math
   27.14 -\newcommand{\text}[1]{\mbox{#1}}
   27.15 -\newcommand{\isasymvartheta}{\isamath{\theta}}
   27.16 -\newcommand{\isactrlvec}[1]{\emph{$\overline{#1}$}}
   27.17 -
   27.18 -\setcounter{secnumdepth}{2} \setcounter{tocdepth}{2}
   27.19 -
   27.20 -\pagestyle{headings}
   27.21 -\sloppy
   27.22 -\binperiod
   27.23 -\underscoreon
   27.24 -
   27.25 -\renewcommand{\isadigit}[1]{\isamath{#1}}
   27.26 -
   27.27 -\newenvironment{mldecls}{\par\noindent\begingroup\footnotesize\def\isanewline{\\}\begin{tabular}{l}}{\end{tabular}\smallskip\endgroup}
   27.28 -
   27.29 -\isafoldtag{FIXME}
   27.30 -\isakeeptag{mlref}
   27.31 -\renewcommand{\isatagmlref}{\subsection*{\makebox[0pt][r]{\fbox{\ML}~~}Reference}\begingroup\def\isastyletext{\rm}\small}
   27.32 -\renewcommand{\endisatagmlref}{\endgroup}
   27.33 -
   27.34 -\newcommand{\isasymGUESS}{\isakeyword{guess}}
   27.35 -\newcommand{\isasymOBTAIN}{\isakeyword{obtain}}
   27.36 -\newcommand{\isasymTHEORY}{\isakeyword{theory}}
   27.37 -\newcommand{\isasymUSES}{\isakeyword{uses}}
   27.38 -\newcommand{\isasymEND}{\isakeyword{end}}
   27.39 -\newcommand{\isasymCONSTS}{\isakeyword{consts}}
   27.40 -\newcommand{\isasymDEFS}{\isakeyword{defs}}
   27.41 -\newcommand{\isasymTHEOREM}{\isakeyword{theorem}}
   27.42 -\newcommand{\isasymDEFINITION}{\isakeyword{definition}}
   27.43 -
   27.44 -\isabellestyle{it}
   27.45 -
   27.46 -%%% Local Variables: 
   27.47 -%%% mode: latex
   27.48 -%%% TeX-master: "implementation"
   27.49 -%%% End: 
    28.1 --- a/doc-src/HOL/document/HOL.tex	Tue Aug 28 18:46:15 2012 +0200
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,2089 +0,0 @@
    28.4 -\chapter{Higher-Order Logic}
    28.5 -\index{higher-order logic|(}
    28.6 -\index{HOL system@{\sc hol} system}
    28.7 -
    28.8 -\begin{figure}
    28.9 -\begin{constants}
   28.10 -  \it name      &\it meta-type  & \it description \\
   28.11 -  \cdx{Trueprop}& $bool\To prop$                & coercion to $prop$\\
   28.12 -  \cdx{Not}     & $bool\To bool$                & negation ($\lnot$) \\
   28.13 -  \cdx{True}    & $bool$                        & tautology ($\top$) \\
   28.14 -  \cdx{False}   & $bool$                        & absurdity ($\bot$) \\
   28.15 -  \cdx{If}      & $[bool,\alpha,\alpha]\To\alpha$ & conditional \\
   28.16 -  \cdx{Let}     & $[\alpha,\alpha\To\beta]\To\beta$ & let binder
   28.17 -\end{constants}
   28.18 -\subcaption{Constants}
   28.19 -
   28.20 -\begin{constants}
   28.21 -\index{"@@{\tt\at} symbol}
   28.22 -\index{*"! symbol}\index{*"? symbol}
   28.23 -\index{*"?"! symbol}\index{*"E"X"! symbol}
   28.24 -  \it symbol &\it name     &\it meta-type & \it description \\
   28.25 -  \sdx{SOME} or \tt\at & \cdx{Eps}  & $(\alpha\To bool)\To\alpha$ & 
   28.26 -        Hilbert description ($\varepsilon$) \\
   28.27 -  \sdx{ALL} or {\tt!~} & \cdx{All}  & $(\alpha\To bool)\To bool$ & 
   28.28 -        universal quantifier ($\forall$) \\
   28.29 -  \sdx{EX} or {\tt?~}  & \cdx{Ex}   & $(\alpha\To bool)\To bool$ & 
   28.30 -        existential quantifier ($\exists$) \\
   28.31 -  \texttt{EX!} or {\tt?!} & \cdx{Ex1}  & $(\alpha\To bool)\To bool$ & 
   28.32 -        unique existence ($\exists!$)\\
   28.33 -  \texttt{LEAST}  & \cdx{Least}  & $(\alpha::ord \To bool)\To\alpha$ & 
   28.34 -        least element
   28.35 -\end{constants}
   28.36 -\subcaption{Binders} 
   28.37 -
   28.38 -\begin{constants}
   28.39 -\index{*"= symbol}
   28.40 -\index{&@{\tt\&} symbol}
   28.41 -\index{"!@{\tt\char124} symbol} %\char124 is vertical bar. We use ! because | stopped working
   28.42 -\index{*"-"-"> symbol}
   28.43 -  \it symbol    & \it meta-type & \it priority & \it description \\ 
   28.44 -  \sdx{o}       & $[\beta\To\gamma,\alpha\To\beta]\To (\alpha\To\gamma)$ & 
   28.45 -        Left 55 & composition ($\circ$) \\
   28.46 -  \tt =         & $[\alpha,\alpha]\To bool$ & Left 50 & equality ($=$) \\
   28.47 -  \tt <         & $[\alpha::ord,\alpha]\To bool$ & Left 50 & less than ($<$) \\
   28.48 -  \tt <=        & $[\alpha::ord,\alpha]\To bool$ & Left 50 & 
   28.49 -                less than or equals ($\leq$)\\
   28.50 -  \tt \&        & $[bool,bool]\To bool$ & Right 35 & conjunction ($\conj$) \\
   28.51 -  \tt |         & $[bool,bool]\To bool$ & Right 30 & disjunction ($\disj$) \\
   28.52 -  \tt -->       & $[bool,bool]\To bool$ & Right 25 & implication ($\imp$)
   28.53 -\end{constants}
   28.54 -\subcaption{Infixes}
   28.55 -\caption{Syntax of \texttt{HOL}} \label{hol-constants}
   28.56 -\end{figure}
   28.57 -
   28.58 -
   28.59 -\begin{figure}
   28.60 -\index{*let symbol}
   28.61 -\index{*in symbol}
   28.62 -\dquotes
   28.63 -\[\begin{array}{rclcl}
   28.64 -    term & = & \hbox{expression of class~$term$} \\
   28.65 -         & | & "SOME~" id " . " formula
   28.66 -         & | & "\at~" id " . " formula \\
   28.67 -         & | & 
   28.68 -    \multicolumn{3}{l}{"let"~id~"="~term";"\dots";"~id~"="~term~"in"~term} \\
   28.69 -         & | & 
   28.70 -    \multicolumn{3}{l}{"if"~formula~"then"~term~"else"~term} \\
   28.71 -         & | & "LEAST"~ id " . " formula \\[2ex]
   28.72 - formula & = & \hbox{expression of type~$bool$} \\
   28.73 -         & | & term " = " term \\
   28.74 -         & | & term " \ttilde= " term \\
   28.75 -         & | & term " < " term \\
   28.76 -         & | & term " <= " term \\
   28.77 -         & | & "\ttilde\ " formula \\
   28.78 -         & | & formula " \& " formula \\
   28.79 -         & | & formula " | " formula \\
   28.80 -         & | & formula " --> " formula \\
   28.81 -         & | & "ALL~" id~id^* " . " formula
   28.82 -         & | & "!~~~" id~id^* " . " formula \\
   28.83 -         & | & "EX~~" id~id^* " . " formula 
   28.84 -         & | & "?~~~" id~id^* " . " formula \\
   28.85 -         & | & "EX!~" id~id^* " . " formula
   28.86 -         & | & "?!~~" id~id^* " . " formula \\
   28.87 -  \end{array}
   28.88 -\]
   28.89 -\caption{Full grammar for HOL} \label{hol-grammar}
   28.90 -\end{figure} 
   28.91 -
   28.92 -
   28.93 -\section{Syntax}
   28.94 -
   28.95 -Figure~\ref{hol-constants} lists the constants (including infixes and
   28.96 -binders), while Fig.\ts\ref{hol-grammar} presents the grammar of
   28.97 -higher-order logic.  Note that $a$\verb|~=|$b$ is translated to
   28.98 -$\lnot(a=b)$.
   28.99 -
  28.100 -\begin{warn}
  28.101 -  HOL has no if-and-only-if connective; logical equivalence is expressed using
  28.102 -  equality.  But equality has a high priority, as befitting a relation, while
  28.103 -  if-and-only-if typically has the lowest priority.  Thus, $\lnot\lnot P=P$
  28.104 -  abbreviates $\lnot\lnot (P=P)$ and not $(\lnot\lnot P)=P$.  When using $=$
  28.105 -  to mean logical equivalence, enclose both operands in parentheses.
  28.106 -\end{warn}
  28.107 -
  28.108 -\subsection{Types and overloading}
  28.109 -The universal type class of higher-order terms is called~\cldx{term}.
  28.110 -By default, explicit type variables have class \cldx{term}.  In
  28.111 -particular the equality symbol and quantifiers are polymorphic over
  28.112 -class \texttt{term}.
  28.113 -
  28.114 -The type of formulae, \tydx{bool}, belongs to class \cldx{term}; thus,
  28.115 -formulae are terms.  The built-in type~\tydx{fun}, which constructs
  28.116 -function types, is overloaded with arity {\tt(term,\thinspace
  28.117 -  term)\thinspace term}.  Thus, $\sigma\To\tau$ belongs to class~{\tt
  28.118 -  term} if $\sigma$ and~$\tau$ do, allowing quantification over
  28.119 -functions.
  28.120 -
  28.121 -HOL allows new types to be declared as subsets of existing types,
  28.122 -either using the primitive \texttt{typedef} or the more convenient
  28.123 -\texttt{datatype} (see~{\S}\ref{sec:HOL:datatype}).
  28.124 -
  28.125 -Several syntactic type classes --- \cldx{plus}, \cldx{minus},
  28.126 -\cldx{times} and
  28.127 -\cldx{power} --- permit overloading of the operators {\tt+},\index{*"+
  28.128 -  symbol} {\tt-}\index{*"- symbol}, {\tt*}.\index{*"* symbol} 
  28.129 -and \verb|^|.\index{^@\verb.^. symbol} 
  28.130 -%
  28.131 -They are overloaded to denote the obvious arithmetic operations on types
  28.132 -\tdx{nat}, \tdx{int} and~\tdx{real}. (With the \verb|^| operator, the
  28.133 -exponent always has type~\tdx{nat}.)  Non-arithmetic overloadings are also
  28.134 -done: the operator {\tt-} can denote set difference, while \verb|^| can
  28.135 -denote exponentiation of relations (iterated composition).  Unary minus is
  28.136 -also written as~{\tt-} and is overloaded like its 2-place counterpart; it even
  28.137 -can stand for set complement.
  28.138 -
  28.139 -The constant \cdx{0} is also overloaded.  It serves as the zero element of
  28.140 -several types, of which the most important is \tdx{nat} (the natural
  28.141 -numbers).  The type class \cldx{plus_ac0} comprises all types for which 0
  28.142 -and~+ satisfy the laws $x+y=y+x$, $(x+y)+z = x+(y+z)$ and $0+x = x$.  These
  28.143 -types include the numeric ones \tdx{nat}, \tdx{int} and~\tdx{real} and also
  28.144 -multisets.  The summation operator \cdx{setsum} is available for all types in
  28.145 -this class. 
  28.146 -
  28.147 -Theory \thydx{Ord} defines the syntactic class \cldx{ord} of order
  28.148 -signatures.  The relations $<$ and $\leq$ are polymorphic over this
  28.149 -class, as are the functions \cdx{mono}, \cdx{min} and \cdx{max}, and
  28.150 -the \cdx{LEAST} operator. \thydx{Ord} also defines a subclass
  28.151 -\cldx{order} of \cldx{ord} which axiomatizes the types that are partially
  28.152 -ordered with respect to~$\leq$.  A further subclass \cldx{linorder} of
  28.153 -\cldx{order} axiomatizes linear orderings.
  28.154 -For details, see the file \texttt{Ord.thy}.
  28.155 -                                          
  28.156 -If you state a goal containing overloaded functions, you may need to include
  28.157 -type constraints.  Type inference may otherwise make the goal more
  28.158 -polymorphic than you intended, with confusing results.  For example, the
  28.159 -variables $i$, $j$ and $k$ in the goal $i \leq j \Imp i \leq j+k$ have type
  28.160 -$\alpha::\{ord,plus\}$, although you may have expected them to have some
  28.161 -numeric type, e.g. $nat$.  Instead you should have stated the goal as
  28.162 -$(i::nat) \leq j \Imp i \leq j+k$, which causes all three variables to have
  28.163 -type $nat$.
  28.164 -
  28.165 -\begin{warn}
  28.166 -  If resolution fails for no obvious reason, try setting
  28.167 -  \ttindex{show_types} to \texttt{true}, causing Isabelle to display
  28.168 -  types of terms.  Possibly set \ttindex{show_sorts} to \texttt{true} as
  28.169 -  well, causing Isabelle to display type classes and sorts.
  28.170 -
  28.171 -  \index{unification!incompleteness of}
  28.172 -  Where function types are involved, Isabelle's unification code does not
  28.173 -  guarantee to find instantiations for type variables automatically.  Be
  28.174 -  prepared to use \ttindex{res_inst_tac} instead of \texttt{resolve_tac},
  28.175 -  possibly instantiating type variables.  Setting
  28.176 -  \ttindex{Unify.trace_types} to \texttt{true} causes Isabelle to report
  28.177 -  omitted search paths during unification.\index{tracing!of unification}
  28.178 -\end{warn}
  28.179 -
  28.180 -
  28.181 -\subsection{Binders}
  28.182 -
  28.183 -Hilbert's {\bf description} operator~$\varepsilon x. P[x]$ stands for some~$x$
  28.184 -satisfying~$P$, if such exists.  Since all terms in HOL denote something, a
  28.185 -description is always meaningful, but we do not know its value unless $P$
  28.186 -defines it uniquely.  We may write descriptions as \cdx{Eps}($\lambda x.
  28.187 -P[x]$) or use the syntax \hbox{\tt SOME~$x$.~$P[x]$}.
  28.188 -
  28.189 -Existential quantification is defined by
  28.190 -\[ \exists x. P~x \;\equiv\; P(\varepsilon x. P~x). \]
  28.191 -The unique existence quantifier, $\exists!x. P$, is defined in terms
  28.192 -of~$\exists$ and~$\forall$.  An Isabelle binder, it admits nested
  28.193 -quantifications.  For instance, $\exists!x\,y. P\,x\,y$ abbreviates
  28.194 -$\exists!x. \exists!y. P\,x\,y$; note that this does not mean that there
  28.195 -exists a unique pair $(x,y)$ satisfying~$P\,x\,y$.
  28.196 -
  28.197 -\medskip
  28.198 -
  28.199 -\index{*"! symbol}\index{*"? symbol}\index{HOL system@{\sc hol} system} The
  28.200 -basic Isabelle/HOL binders have two notations.  Apart from the usual
  28.201 -\texttt{ALL} and \texttt{EX} for $\forall$ and $\exists$, Isabelle/HOL also
  28.202 -supports the original notation of Gordon's {\sc hol} system: \texttt{!}\ 
  28.203 -and~\texttt{?}.  In the latter case, the existential quantifier \emph{must} be
  28.204 -followed by a space; thus {\tt?x} is an unknown, while \verb'? x. f x=y' is a
  28.205 -quantification.  Both notations are accepted for input.  The print mode
  28.206 -``\ttindexbold{HOL}'' governs the output notation.  If enabled (e.g.\ by
  28.207 -passing option \texttt{-m HOL} to the \texttt{isabelle} executable),
  28.208 -then~{\tt!}\ and~{\tt?}\ are displayed.
  28.209 -
  28.210 -\medskip
  28.211 -
  28.212 -If $\tau$ is a type of class \cldx{ord}, $P$ a formula and $x$ a
  28.213 -variable of type $\tau$, then the term \cdx{LEAST}~$x. P[x]$ is defined
  28.214 -to be the least (w.r.t.\ $\leq$) $x$ such that $P~x$ holds (see
  28.215 -Fig.~\ref{hol-defs}).  The definition uses Hilbert's $\varepsilon$
  28.216 -choice operator, so \texttt{Least} is always meaningful, but may yield
  28.217 -nothing useful in case there is not a unique least element satisfying
  28.218 -$P$.\footnote{Class $ord$ does not require much of its instances, so
  28.219 -  $\leq$ need not be a well-ordering, not even an order at all!}
  28.220 -
  28.221 -\medskip All these binders have priority 10.
  28.222 -
  28.223 -\begin{warn}
  28.224 -The low priority of binders means that they need to be enclosed in
  28.225 -parenthesis when they occur in the context of other operations.  For example,
  28.226 -instead of $P \land \forall x. Q$ you need to write $P \land (\forall x. Q)$.
  28.227 -\end{warn}
  28.228 -
  28.229 -
  28.230 -\subsection{The let and case constructions}
  28.231 -Local abbreviations can be introduced by a \texttt{let} construct whose
  28.232 -syntax appears in Fig.\ts\ref{hol-grammar}.  Internally it is translated into
  28.233 -the constant~\cdx{Let}.  It can be expanded by rewriting with its
  28.234 -definition, \tdx{Let_def}.
  28.235 -
  28.236 -HOL also defines the basic syntax
  28.237 -\[\dquotes"case"~e~"of"~c@1~"=>"~e@1~"|" \dots "|"~c@n~"=>"~e@n\] 
  28.238 -as a uniform means of expressing \texttt{case} constructs.  Therefore \texttt{case}
  28.239 -and \sdx{of} are reserved words.  Initially, this is mere syntax and has no
  28.240 -logical meaning.  By declaring translations, you can cause instances of the
  28.241 -\texttt{case} construct to denote applications of particular case operators.
  28.242 -This is what happens automatically for each \texttt{datatype} definition
  28.243 -(see~{\S}\ref{sec:HOL:datatype}).
  28.244 -
  28.245 -\begin{warn}
  28.246 -Both \texttt{if} and \texttt{case} constructs have as low a priority as
  28.247 -quantifiers, which requires additional enclosing parentheses in the context
  28.248 -of most other operations.  For example, instead of $f~x = {\tt if\dots
  28.249 -then\dots else}\dots$ you need to write $f~x = ({\tt if\dots then\dots
  28.250 -else\dots})$.
  28.251 -\end{warn}
  28.252 -
  28.253 -\section{Rules of inference}
  28.254 -
  28.255 -\begin{figure}
  28.256 -\begin{ttbox}\makeatother
  28.257 -\tdx{refl}          t = (t::'a)
  28.258 -\tdx{subst}         [| s = t; P s |] ==> P (t::'a)
  28.259 -\tdx{ext}           (!!x::'a. (f x :: 'b) = g x) ==> (\%x. f x) = (\%x. g x)
  28.260 -\tdx{impI}          (P ==> Q) ==> P-->Q
  28.261 -\tdx{mp}            [| P-->Q;  P |] ==> Q
  28.262 -\tdx{iff}           (P-->Q) --> (Q-->P) --> (P=Q)
  28.263 -\tdx{someI}         P(x::'a) ==> P(@x. P x)
  28.264 -\tdx{True_or_False} (P=True) | (P=False)
  28.265 -\end{ttbox}
  28.266 -\caption{The \texttt{HOL} rules} \label{hol-rules}
  28.267 -\end{figure}
  28.268 -
  28.269 -Figure~\ref{hol-rules} shows the primitive inference rules of~HOL, with
  28.270 -their~{\ML} names.  Some of the rules deserve additional comments:
  28.271 -\begin{ttdescription}
  28.272 -\item[\tdx{ext}] expresses extensionality of functions.
  28.273 -\item[\tdx{iff}] asserts that logically equivalent formulae are
  28.274 -  equal.
  28.275 -\item[\tdx{someI}] gives the defining property of the Hilbert
  28.276 -  $\varepsilon$-operator.  It is a form of the Axiom of Choice.  The derived rule
  28.277 -  \tdx{some_equality} (see below) is often easier to use.
  28.278 -\item[\tdx{True_or_False}] makes the logic classical.\footnote{In
  28.279 -    fact, the $\varepsilon$-operator already makes the logic classical, as
  28.280 -    shown by Diaconescu; see Paulson~\cite{paulson-COLOG} for details.}
  28.281 -\end{ttdescription}
  28.282 -
  28.283 -
  28.284 -\begin{figure}\hfuzz=4pt%suppress "Overfull \hbox" message
  28.285 -\begin{ttbox}\makeatother
  28.286 -\tdx{True_def}   True     == ((\%x::bool. x)=(\%x. x))
  28.287 -\tdx{All_def}    All      == (\%P. P = (\%x. True))
  28.288 -\tdx{Ex_def}     Ex       == (\%P. P(@x. P x))
  28.289 -\tdx{False_def}  False    == (!P. P)
  28.290 -\tdx{not_def}    not      == (\%P. P-->False)
  28.291 -\tdx{and_def}    op &     == (\%P Q. !R. (P-->Q-->R) --> R)
  28.292 -\tdx{or_def}     op |     == (\%P Q. !R. (P-->R) --> (Q-->R) --> R)
  28.293 -\tdx{Ex1_def}    Ex1      == (\%P. ? x. P x & (! y. P y --> y=x))
  28.294 -
  28.295 -\tdx{o_def}      op o     == (\%(f::'b=>'c) g x::'a. f(g x))
  28.296 -\tdx{if_def}     If P x y ==
  28.297 -              (\%P x y. @z::'a.(P=True --> z=x) & (P=False --> z=y))
  28.298 -\tdx{Let_def}    Let s f  == f s
  28.299 -\tdx{Least_def}  Least P  == @x. P(x) & (ALL y. P(y) --> x <= y)"
  28.300 -\end{ttbox}
  28.301 -\caption{The \texttt{HOL} definitions} \label{hol-defs}
  28.302 -\end{figure}
  28.303 -
  28.304 -
  28.305 -HOL follows standard practice in higher-order logic: only a few connectives
  28.306 -are taken as primitive, with the remainder defined obscurely
  28.307 -(Fig.\ts\ref{hol-defs}).  Gordon's {\sc hol} system expresses the
  28.308 -corresponding definitions \cite[page~270]{mgordon-hol} using
  28.309 -object-equality~({\tt=}), which is possible because equality in higher-order
  28.310 -logic may equate formulae and even functions over formulae.  But theory~HOL,
  28.311 -like all other Isabelle theories, uses meta-equality~({\tt==}) for
  28.312 -definitions.
  28.313 -\begin{warn}
  28.314 -The definitions above should never be expanded and are shown for completeness
  28.315 -only.  Instead users should reason in terms of the derived rules shown below
  28.316 -or, better still, using high-level tactics.
  28.317 -\end{warn}
  28.318 -
  28.319 -Some of the rules mention type variables; for example, \texttt{refl}
  28.320 -mentions the type variable~{\tt'a}.  This allows you to instantiate
  28.321 -type variables explicitly by calling \texttt{res_inst_tac}.
  28.322 -
  28.323 -
  28.324 -\begin{figure}
  28.325 -\begin{ttbox}
  28.326 -\tdx{sym}         s=t ==> t=s
  28.327 -\tdx{trans}       [| r=s; s=t |] ==> r=t
  28.328 -\tdx{ssubst}      [| t=s; P s |] ==> P t
  28.329 -\tdx{box_equals}  [| a=b;  a=c;  b=d |] ==> c=d  
  28.330 -\tdx{arg_cong}    x = y ==> f x = f y
  28.331 -\tdx{fun_cong}    f = g ==> f x = g x
  28.332 -\tdx{cong}        [| f = g; x = y |] ==> f x = g y
  28.333 -\tdx{not_sym}     t ~= s ==> s ~= t
  28.334 -\subcaption{Equality}
  28.335 -
  28.336 -\tdx{TrueI}       True 
  28.337 -\tdx{FalseE}      False ==> P
  28.338 -
  28.339 -\tdx{conjI}       [| P; Q |] ==> P&Q
  28.340 -\tdx{conjunct1}   [| P&Q |] ==> P
  28.341 -\tdx{conjunct2}   [| P&Q |] ==> Q 
  28.342 -\tdx{conjE}       [| P&Q;  [| P; Q |] ==> R |] ==> R
  28.343 -
  28.344 -\tdx{disjI1}      P ==> P|Q
  28.345 -\tdx{disjI2}      Q ==> P|Q
  28.346 -\tdx{disjE}       [| P | Q; P ==> R; Q ==> R |] ==> R
  28.347 -
  28.348 -\tdx{notI}        (P ==> False) ==> ~ P
  28.349 -\tdx{notE}        [| ~ P;  P |] ==> R
  28.350 -\tdx{impE}        [| P-->Q;  P;  Q ==> R |] ==> R
  28.351 -\subcaption{Propositional logic}
  28.352 -
  28.353 -\tdx{iffI}        [| P ==> Q;  Q ==> P |] ==> P=Q
  28.354 -\tdx{iffD1}       [| P=Q; P |] ==> Q
  28.355 -\tdx{iffD2}       [| P=Q; Q |] ==> P
  28.356 -\tdx{iffE}        [| P=Q; [| P --> Q; Q --> P |] ==> R |] ==> R
  28.357 -\subcaption{Logical equivalence}
  28.358 -
  28.359 -\end{ttbox}
  28.360 -\caption{Derived rules for HOL} \label{hol-lemmas1}
  28.361 -\end{figure}
  28.362 -%
  28.363 -%\tdx{eqTrueI}     P ==> P=True 
  28.364 -%\tdx{eqTrueE}     P=True ==> P 
  28.365 -
  28.366 -
  28.367 -\begin{figure}
  28.368 -\begin{ttbox}\makeatother
  28.369 -\tdx{allI}      (!!x. P x) ==> !x. P x
  28.370 -\tdx{spec}      !x. P x ==> P x
  28.371 -\tdx{allE}      [| !x. P x;  P x ==> R |] ==> R
  28.372 -\tdx{all_dupE}  [| !x. P x;  [| P x; !x. P x |] ==> R |] ==> R
  28.373 -
  28.374 -\tdx{exI}       P x ==> ? x. P x
  28.375 -\tdx{exE}       [| ? x. P x; !!x. P x ==> Q |] ==> Q
  28.376 -
  28.377 -\tdx{ex1I}      [| P a;  !!x. P x ==> x=a |] ==> ?! x. P x
  28.378 -\tdx{ex1E}      [| ?! x. P x;  !!x. [| P x;  ! y. P y --> y=x |] ==> R 
  28.379 -          |] ==> R
  28.380 -
  28.381 -\tdx{some_equality}   [| P a;  !!x. P x ==> x=a |] ==> (@x. P x) = a
  28.382 -\subcaption{Quantifiers and descriptions}
  28.383 -
  28.384 -\tdx{ccontr}          (~P ==> False) ==> P
  28.385 -\tdx{classical}       (~P ==> P) ==> P
  28.386 -\tdx{excluded_middle} ~P | P
  28.387 -
  28.388 -\tdx{disjCI}       (~Q ==> P) ==> P|Q
  28.389 -\tdx{exCI}         (! x. ~ P x ==> P a) ==> ? x. P x
  28.390 -\tdx{impCE}        [| P-->Q; ~ P ==> R; Q ==> R |] ==> R
  28.391 -\tdx{iffCE}        [| P=Q;  [| P;Q |] ==> R;  [| ~P; ~Q |] ==> R |] ==> R
  28.392 -\tdx{notnotD}      ~~P ==> P
  28.393 -\tdx{swap}         ~P ==> (~Q ==> P) ==> Q
  28.394 -\subcaption{Classical logic}
  28.395 -
  28.396 -\tdx{if_P}         P ==> (if P then x else y) = x
  28.397 -\tdx{if_not_P}     ~ P ==> (if P then x else y) = y
  28.398 -\tdx{split_if}     P(if Q then x else y) = ((Q --> P x) & (~Q --> P y))
  28.399 -\subcaption{Conditionals}
  28.400 -\end{ttbox}
  28.401 -\caption{More derived rules} \label{hol-lemmas2}
  28.402 -\end{figure}
  28.403 -
  28.404 -Some derived rules are shown in Figures~\ref{hol-lemmas1}
  28.405 -and~\ref{hol-lemmas2}, with their {\ML} names.  These include natural rules
  28.406 -for the logical connectives, as well as sequent-style elimination rules for
  28.407 -conjunctions, implications, and universal quantifiers.  
  28.408 -
  28.409 -Note the equality rules: \tdx{ssubst} performs substitution in
  28.410 -backward proofs, while \tdx{box_equals} supports reasoning by
  28.411 -simplifying both sides of an equation.
  28.412 -
  28.413 -The following simple tactics are occasionally useful:
  28.414 -\begin{ttdescription}
  28.415 -\item[\ttindexbold{strip_tac} $i$] applies \texttt{allI} and \texttt{impI}
  28.416 -  repeatedly to remove all outermost universal quantifiers and implications
  28.417 -  from subgoal $i$.
  28.418 -\item[\ttindexbold{case_tac} {\tt"}$P${\tt"} $i$] performs case distinction on
  28.419 -  $P$ for subgoal $i$: the latter is replaced by two identical subgoals with
  28.420 -  the added assumptions $P$ and $\lnot P$, respectively.
  28.421 -\item[\ttindexbold{smp_tac} $j$ $i$] applies $j$ times \texttt{spec} and then
  28.422 -  \texttt{mp} in subgoal $i$, which is typically useful when forward-chaining 
  28.423 -  from an induction hypothesis. As a generalization of \texttt{mp_tac}, 
  28.424 -  if there are assumptions $\forall \vec{x}. P \vec{x} \imp Q \vec{x}$ and 
  28.425 -  $P \vec{a}$, ($\vec{x}$ being a vector of $j$ variables)
  28.426 -  then it replaces the universally quantified implication by $Q \vec{a}$. 
  28.427 -  It may instantiate unknowns. It fails if it can do nothing.
  28.428 -\end{ttdescription}
  28.429 -
  28.430 -
  28.431 -\begin{figure} 
  28.432 -\begin{center}
  28.433 -\begin{tabular}{rrr}
  28.434 -  \it name      &\it meta-type  & \it description \\ 
  28.435 -\index{{}@\verb'{}' symbol}
  28.436 -  \verb|{}|     & $\alpha\,set$         & the empty set \\
  28.437 -  \cdx{insert}  & $[\alpha,\alpha\,set]\To \alpha\,set$
  28.438 -        & insertion of element \\
  28.439 -  \cdx{Collect} & $(\alpha\To bool)\To\alpha\,set$
  28.440 -        & comprehension \\
  28.441 -  \cdx{INTER} & $[\alpha\,set,\alpha\To\beta\,set]\To\beta\,set$
  28.442 -        & intersection over a set\\
  28.443 -  \cdx{UNION} & $[\alpha\,set,\alpha\To\beta\,set]\To\beta\,set$
  28.444 -        & union over a set\\
  28.445 -  \cdx{Inter} & $(\alpha\,set)set\To\alpha\,set$
  28.446 -        &set of sets intersection \\
  28.447 -  \cdx{Union} & $(\alpha\,set)set\To\alpha\,set$
  28.448 -        &set of sets union \\
  28.449 -  \cdx{Pow}   & $\alpha\,set \To (\alpha\,set)set$
  28.450 -        & powerset \\[1ex]
  28.451 -  \cdx{range}   & $(\alpha\To\beta )\To\beta\,set$
  28.452 -        & range of a function \\[1ex]
  28.453 -  \cdx{Ball}~~\cdx{Bex} & $[\alpha\,set,\alpha\To bool]\To bool$
  28.454 -        & bounded quantifiers
  28.455 -\end{tabular}
  28.456 -\end{center}
  28.457 -\subcaption{Constants}
  28.458 -
  28.459 -\begin{center}
  28.460 -\begin{tabular}{llrrr} 
  28.461 -  \it symbol &\it name     &\it meta-type & \it priority & \it description \\
  28.462 -  \sdx{INT}  & \cdx{INTER1}  & $(\alpha\To\beta\,set)\To\beta\,set$ & 10 & 
  28.463 -        intersection\\
  28.464 -  \sdx{UN}  & \cdx{UNION1}  & $(\alpha\To\beta\,set)\To\beta\,set$ & 10 & 
  28.465 -        union 
  28.466 -\end{tabular}
  28.467 -\end{center}
  28.468 -\subcaption{Binders} 
  28.469 -
  28.470 -\begin{center}
  28.471 -\index{*"`"` symbol}
  28.472 -\index{*": symbol}
  28.473 -\index{*"<"= symbol}
  28.474 -\begin{tabular}{rrrr} 
  28.475 -  \it symbol    & \it meta-type & \it priority & \it description \\ 
  28.476 -  \tt ``        & $[\alpha\To\beta ,\alpha\,set]\To  \beta\,set$
  28.477 -        & Left 90 & image \\
  28.478 -  \sdx{Int}     & $[\alpha\,set,\alpha\,set]\To\alpha\,set$
  28.479 -        & Left 70 & intersection ($\int$) \\
  28.480 -  \sdx{Un}      & $[\alpha\,set,\alpha\,set]\To\alpha\,set$
  28.481 -        & Left 65 & union ($\un$) \\
  28.482 -  \tt:          & $[\alpha ,\alpha\,set]\To bool$       
  28.483 -        & Left 50 & membership ($\in$) \\
  28.484 -  \tt <=        & $[\alpha\,set,\alpha\,set]\To bool$
  28.485 -        & Left 50 & subset ($\subseteq$) 
  28.486 -\end{tabular}
  28.487 -\end{center}
  28.488 -\subcaption{Infixes}
  28.489 -\caption{Syntax of the theory \texttt{Set}} \label{hol-set-syntax}
  28.490 -\end{figure} 
  28.491 -
  28.492 -
  28.493 -\begin{figure} 
  28.494 -\begin{center} \tt\frenchspacing
  28.495 -\index{*"! symbol}
  28.496 -\begin{tabular}{rrr} 
  28.497 -  \it external          & \it internal  & \it description \\ 
  28.498 -  $a$ \ttilde: $b$      & \ttilde($a$ : $b$)    & \rm not in\\
  28.499 -  {\ttlbrace}$a@1$, $\ldots${\ttrbrace}  &  insert $a@1$ $\ldots$ {\ttlbrace}{\ttrbrace} & \rm finite set \\
  28.500 -  {\ttlbrace}$x$. $P[x]${\ttrbrace}        &  Collect($\lambda x. P[x]$) &
  28.501 -        \rm comprehension \\
  28.502 -  \sdx{INT} $x$:$A$. $B[x]$      & INTER $A$ $\lambda x. B[x]$ &
  28.503 -        \rm intersection \\
  28.504 -  \sdx{UN}{\tt\ }  $x$:$A$. $B[x]$      & UNION $A$ $\lambda x. B[x]$ &
  28.505 -        \rm union \\
  28.506 -  \sdx{ALL} $x$:$A$.\ $P[x]$ or \texttt{!} $x$:$A$.\ $P[x]$ &
  28.507 -        Ball $A$ $\lambda x.\ P[x]$ & 
  28.508 -        \rm bounded $\forall$ \\
  28.509 -  \sdx{EX}{\tt\ } $x$:$A$.\ $P[x]$ or \texttt{?} $x$:$A$.\ $P[x]$ & 
  28.510 -        Bex $A$ $\lambda x.\ P[x]$ & \rm bounded $\exists$
  28.511 -\end{tabular}
  28.512 -\end{center}
  28.513 -\subcaption{Translations}
  28.514 -
  28.515 -\dquotes
  28.516 -\[\begin{array}{rclcl}
  28.517 -    term & = & \hbox{other terms\ldots} \\
  28.518 -         & | & "{\ttlbrace}{\ttrbrace}" \\
  28.519 -         & | & "{\ttlbrace} " term\; ("," term)^* " {\ttrbrace}" \\
  28.520 -         & | & "{\ttlbrace} " id " . " formula " {\ttrbrace}" \\
  28.521 -         & | & term " `` " term \\
  28.522 -         & | & term " Int " term \\
  28.523 -         & | & term " Un " term \\
  28.524 -         & | & "INT~~"  id ":" term " . " term \\
  28.525 -         & | & "UN~~~"  id ":" term " . " term \\
  28.526 -         & | & "INT~~"  id~id^* " . " term \\
  28.527 -         & | & "UN~~~"  id~id^* " . " term \\[2ex]
  28.528 - formula & = & \hbox{other formulae\ldots} \\
  28.529 -         & | & term " : " term \\
  28.530 -         & | & term " \ttilde: " term \\
  28.531 -         & | & term " <= " term \\
  28.532 -         & | & "ALL " id ":" term " . " formula
  28.533 -         & | & "!~" id ":" term " . " formula \\
  28.534 -         & | & "EX~~" id ":" term " . " formula
  28.535 -         & | & "?~" id ":" term " . " formula \\
  28.536 -  \end{array}
  28.537 -\]
  28.538 -\subcaption{Full Grammar}
  28.539 -\caption{Syntax of the theory \texttt{Set} (continued)} \label{hol-set-syntax2}
  28.540 -\end{figure} 
  28.541 -
  28.542 -
  28.543 -\section{A formulation of set theory}
  28.544 -Historically, higher-order logic gives a foundation for Russell and
  28.545 -Whitehead's theory of classes.  Let us use modern terminology and call them
  28.546 -{\bf sets}, but note that these sets are distinct from those of ZF set theory,
  28.547 -and behave more like ZF classes.
  28.548 -\begin{itemize}
  28.549 -\item
  28.550 -Sets are given by predicates over some type~$\sigma$.  Types serve to
  28.551 -define universes for sets, but type-checking is still significant.
  28.552 -\item
  28.553 -There is a universal set (for each type).  Thus, sets have complements, and
  28.554 -may be defined by absolute comprehension.
  28.555 -\item
  28.556 -Although sets may contain other sets as elements, the containing set must
  28.557 -have a more complex type.
  28.558 -\end{itemize}
  28.559 -Finite unions and intersections have the same behaviour in HOL as they do
  28.560 -in~ZF.  In HOL the intersection of the empty set is well-defined, denoting the
  28.561 -universal set for the given type.
  28.562 -
  28.563 -\subsection{Syntax of set theory}\index{*set type}
  28.564 -HOL's set theory is called \thydx{Set}.  The type $\alpha\,set$ is essentially
  28.565 -the same as $\alpha\To bool$.  The new type is defined for clarity and to
  28.566 -avoid complications involving function types in unification.  The isomorphisms
  28.567 -between the two types are declared explicitly.  They are very natural:
  28.568 -\texttt{Collect} maps $\alpha\To bool$ to $\alpha\,set$, while \hbox{\tt op :}
  28.569 -maps in the other direction (ignoring argument order).
  28.570 -
  28.571 -Figure~\ref{hol-set-syntax} lists the constants, infixes, and syntax
  28.572 -translations.  Figure~\ref{hol-set-syntax2} presents the grammar of the new
  28.573 -constructs.  Infix operators include union and intersection ($A\un B$
  28.574 -and $A\int B$), the subset and membership relations, and the image
  28.575 -operator~{\tt``}\@.  Note that $a$\verb|~:|$b$ is translated to
  28.576 -$\lnot(a\in b)$.  
  28.577 -
  28.578 -The $\{a@1,\ldots\}$ notation abbreviates finite sets constructed in
  28.579 -the obvious manner using~\texttt{insert} and~$\{\}$:
  28.580 -\begin{eqnarray*}
  28.581 -  \{a, b, c\} & \equiv &
  28.582 -  \texttt{insert} \, a \, ({\tt insert} \, b \, ({\tt insert} \, c \, \{\}))
  28.583 -\end{eqnarray*}
  28.584 -
  28.585 -The set \hbox{\tt{\ttlbrace}$x$.\ $P[x]${\ttrbrace}} consists of all $x$ (of
  28.586 -suitable type) that satisfy~$P[x]$, where $P[x]$ is a formula that may contain
  28.587 -free occurrences of~$x$.  This syntax expands to \cdx{Collect}$(\lambda x.
  28.588 -P[x])$.  It defines sets by absolute comprehension, which is impossible in~ZF;
  28.589 -the type of~$x$ implicitly restricts the comprehension.
  28.590 -
  28.591 -The set theory defines two {\bf bounded quantifiers}:
  28.592 -\begin{eqnarray*}
  28.593 -   \forall x\in A. P[x] &\hbox{abbreviates}& \forall x. x\in A\imp P[x] \\
  28.594 -   \exists x\in A. P[x] &\hbox{abbreviates}& \exists x. x\in A\conj P[x]
  28.595 -\end{eqnarray*}
  28.596 -The constants~\cdx{Ball} and~\cdx{Bex} are defined
  28.597 -accordingly.  Instead of \texttt{Ball $A$ $P$} and \texttt{Bex $A$ $P$} we may
  28.598 -write\index{*"! symbol}\index{*"? symbol}
  28.599 -\index{*ALL symbol}\index{*EX symbol} 
  28.600 -%
  28.601 -\hbox{\tt ALL~$x$:$A$.\ $P[x]$} and \hbox{\tt EX~$x$:$A$.\ $P[x]$}.  The
  28.602 -original notation of Gordon's {\sc hol} system is supported as well:
  28.603 -\texttt{!}\ and \texttt{?}.
  28.604 -
  28.605 -Unions and intersections over sets, namely $\bigcup@{x\in A}B[x]$ and
  28.606 -$\bigcap@{x\in A}B[x]$, are written 
  28.607 -\sdx{UN}~\hbox{\tt$x$:$A$.\ $B[x]$} and
  28.608 -\sdx{INT}~\hbox{\tt$x$:$A$.\ $B[x]$}.  
  28.609 -
  28.610 -Unions and intersections over types, namely $\bigcup@x B[x]$ and $\bigcap@x
  28.611 -B[x]$, are written \sdx{UN}~\hbox{\tt$x$.\ $B[x]$} and
  28.612 -\sdx{INT}~\hbox{\tt$x$.\ $B[x]$}.  They are equivalent to the previous
  28.613 -union and intersection operators when $A$ is the universal set.
  28.614 -
  28.615 -The operators $\bigcup A$ and $\bigcap A$ act upon sets of sets.  They are
  28.616 -not binders, but are equal to $\bigcup@{x\in A}x$ and $\bigcap@{x\in A}x$,
  28.617 -respectively.
  28.618 -
  28.619 -
  28.620 -
  28.621 -\begin{figure} \underscoreon
  28.622 -\begin{ttbox}
  28.623 -\tdx{mem_Collect_eq}    (a : {\ttlbrace}x. P x{\ttrbrace}) = P a
  28.624 -\tdx{Collect_mem_eq}    {\ttlbrace}x. x:A{\ttrbrace} = A
  28.625 -
  28.626 -\tdx{empty_def}         {\ttlbrace}{\ttrbrace}          == {\ttlbrace}x. False{\ttrbrace}
  28.627 -\tdx{insert_def}        insert a B  == {\ttlbrace}x. x=a{\ttrbrace} Un B
  28.628 -\tdx{Ball_def}          Ball A P    == ! x. x:A --> P x
  28.629 -\tdx{Bex_def}           Bex A P     == ? x. x:A & P x
  28.630 -\tdx{subset_def}        A <= B      == ! x:A. x:B
  28.631 -\tdx{Un_def}            A Un B      == {\ttlbrace}x. x:A | x:B{\ttrbrace}
  28.632 -\tdx{Int_def}           A Int B     == {\ttlbrace}x. x:A & x:B{\ttrbrace}
  28.633 -\tdx{set_diff_def}      A - B       == {\ttlbrace}x. x:A & x~:B{\ttrbrace}
  28.634 -\tdx{Compl_def}         -A          == {\ttlbrace}x. ~ x:A{\ttrbrace}
  28.635 -\tdx{INTER_def}         INTER A B   == {\ttlbrace}y. ! x:A. y: B x{\ttrbrace}
  28.636 -\tdx{UNION_def}         UNION A B   == {\ttlbrace}y. ? x:A. y: B x{\ttrbrace}
  28.637 -\tdx{INTER1_def}        INTER1 B    == INTER {\ttlbrace}x. True{\ttrbrace} B 
  28.638 -\tdx{UNION1_def}        UNION1 B    == UNION {\ttlbrace}x. True{\ttrbrace} B 
  28.639 -\tdx{Inter_def}         Inter S     == (INT x:S. x)
  28.640 -\tdx{Union_def}         Union S     == (UN  x:S. x)
  28.641 -\tdx{Pow_def}           Pow A       == {\ttlbrace}B. B <= A{\ttrbrace}
  28.642 -\tdx{image_def}         f``A        == {\ttlbrace}y. ? x:A. y=f x{\ttrbrace}
  28.643 -\tdx{range_def}         range f     == {\ttlbrace}y. ? x. y=f x{\ttrbrace}
  28.644 -\end{ttbox}
  28.645 -\caption{Rules of the theory \texttt{Set}} \label{hol-set-rules}
  28.646 -\end{figure}
  28.647 -
  28.648 -
  28.649 -\begin{figure} \underscoreon
  28.650 -\begin{ttbox}
  28.651 -\tdx{CollectI}        [| P a |] ==> a : {\ttlbrace}x. P x{\ttrbrace}
  28.652 -\tdx{CollectD}        [| a : {\ttlbrace}x. P x{\ttrbrace} |] ==> P a
  28.653 -\tdx{CollectE}        [| a : {\ttlbrace}x. P x{\ttrbrace};  P a ==> W |] ==> W
  28.654 -
  28.655 -\tdx{ballI}           [| !!x. x:A ==> P x |] ==> ! x:A. P x
  28.656 -\tdx{bspec}           [| ! x:A. P x;  x:A |] ==> P x
  28.657 -\tdx{ballE}           [| ! x:A. P x;  P x ==> Q;  ~ x:A ==> Q |] ==> Q
  28.658 -
  28.659 -\tdx{bexI}            [| P x;  x:A |] ==> ? x:A. P x
  28.660 -\tdx{bexCI}           [| ! x:A. ~ P x ==> P a;  a:A |] ==> ? x:A. P x
  28.661 -\tdx{bexE}            [| ? x:A. P x;  !!x. [| x:A; P x |] ==> Q  |] ==> Q
  28.662 -\subcaption{Comprehension and Bounded quantifiers}
  28.663 -
  28.664 -\tdx{subsetI}         (!!x. x:A ==> x:B) ==> A <= B
  28.665 -\tdx{subsetD}         [| A <= B;  c:A |] ==> c:B
  28.666 -\tdx{subsetCE}        [| A <= B;  ~ (c:A) ==> P;  c:B ==> P |] ==> P
  28.667 -
  28.668 -\tdx{subset_refl}     A <= A
  28.669 -\tdx{subset_trans}    [| A<=B;  B<=C |] ==> A<=C
  28.670 -
  28.671 -\tdx{equalityI}       [| A <= B;  B <= A |] ==> A = B
  28.672 -\tdx{equalityD1}      A = B ==> A<=B
  28.673 -\tdx{equalityD2}      A = B ==> B<=A
  28.674 -\tdx{equalityE}       [| A = B;  [| A<=B; B<=A |] ==> P |]  ==>  P
  28.675 -
  28.676 -\tdx{equalityCE}      [| A = B;  [| c:A; c:B |] ==> P;  
  28.677 -                           [| ~ c:A; ~ c:B |] ==> P 
  28.678 -                |]  ==>  P
  28.679 -\subcaption{The subset and equality relations}
  28.680 -\end{ttbox}
  28.681 -\caption{Derived rules for set theory} \label{hol-set1}
  28.682 -\end{figure}
  28.683 -
  28.684 -
  28.685 -\begin{figure} \underscoreon
  28.686 -\begin{ttbox}
  28.687 -\tdx{emptyE}   a : {\ttlbrace}{\ttrbrace} ==> P
  28.688 -
  28.689 -\tdx{insertI1} a : insert a B
  28.690 -\tdx{insertI2} a : B ==> a : insert b B
  28.691 -\tdx{insertE}  [| a : insert b A;  a=b ==> P;  a:A ==> P |] ==> P
  28.692 -
  28.693 -\tdx{ComplI}   [| c:A ==> False |] ==> c : -A
  28.694 -\tdx{ComplD}   [| c : -A |] ==> ~ c:A
  28.695 -
  28.696 -\tdx{UnI1}     c:A ==> c : A Un B
  28.697 -\tdx{UnI2}     c:B ==> c : A Un B
  28.698 -\tdx{UnCI}     (~c:B ==> c:A) ==> c : A Un B
  28.699 -\tdx{UnE}      [| c : A Un B;  c:A ==> P;  c:B ==> P |] ==> P
  28.700 -
  28.701 -\tdx{IntI}     [| c:A;  c:B |] ==> c : A Int B
  28.702 -\tdx{IntD1}    c : A Int B ==> c:A
  28.703 -\tdx{IntD2}    c : A Int B ==> c:B
  28.704 -\tdx{IntE}     [| c : A Int B;  [| c:A; c:B |] ==> P |] ==> P
  28.705 -
  28.706 -\tdx{UN_I}     [| a:A;  b: B a |] ==> b: (UN x:A. B x)
  28.707 -\tdx{UN_E}     [| b: (UN x:A. B x);  !!x.[| x:A;  b:B x |] ==> R |] ==> R
  28.708 -
  28.709 -\tdx{INT_I}    (!!x. x:A ==> b: B x) ==> b : (INT x:A. B x)
  28.710 -\tdx{INT_D}    [| b: (INT x:A. B x);  a:A |] ==> b: B a
  28.711 -\tdx{INT_E}    [| b: (INT x:A. B x);  b: B a ==> R;  ~ a:A ==> R |] ==> R
  28.712 -
  28.713 -\tdx{UnionI}   [| X:C;  A:X |] ==> A : Union C
  28.714 -\tdx{UnionE}   [| A : Union C;  !!X.[| A:X;  X:C |] ==> R |] ==> R
  28.715 -
  28.716 -\tdx{InterI}   [| !!X. X:C ==> A:X |] ==> A : Inter C
  28.717 -\tdx{InterD}   [| A : Inter C;  X:C |] ==> A:X
  28.718 -\tdx{InterE}   [| A : Inter C;  A:X ==> R;  ~ X:C ==> R |] ==> R
  28.719 -
  28.720 -\tdx{PowI}     A<=B ==> A: Pow B
  28.721 -\tdx{PowD}     A: Pow B ==> A<=B
  28.722 -
  28.723 -\tdx{imageI}   [| x:A |] ==> f x : f``A
  28.724 -\tdx{imageE}   [| b : f``A;  !!x.[| b=f x;  x:A |] ==> P |] ==> P
  28.725 -
  28.726 -\tdx{rangeI}   f x : range f
  28.727 -\tdx{rangeE}   [| b : range f;  !!x.[| b=f x |] ==> P |] ==> P
  28.728 -\end{ttbox}
  28.729 -\caption{Further derived rules for set theory} \label{hol-set2}
  28.730 -\end{figure}
  28.731 -
  28.732 -
  28.733 -\subsection{Axioms and rules of set theory}
  28.734 -Figure~\ref{hol-set-rules} presents the rules of theory \thydx{Set}.  The
  28.735 -axioms \tdx{mem_Collect_eq} and \tdx{Collect_mem_eq} assert
  28.736 -that the functions \texttt{Collect} and \hbox{\tt op :} are isomorphisms.  Of
  28.737 -course, \hbox{\tt op :} also serves as the membership relation.
  28.738 -
  28.739 -All the other axioms are definitions.  They include the empty set, bounded
  28.740 -quantifiers, unions, intersections, complements and the subset relation.
  28.741 -They also include straightforward constructions on functions: image~({\tt``})
  28.742 -and \texttt{range}.
  28.743 -
  28.744 -%The predicate \cdx{inj_on} is used for simulating type definitions.
  28.745 -%The statement ${\tt inj_on}~f~A$ asserts that $f$ is injective on the
  28.746 -%set~$A$, which specifies a subset of its domain type.  In a type
  28.747 -%definition, $f$ is the abstraction function and $A$ is the set of valid
  28.748 -%representations; we should not expect $f$ to be injective outside of~$A$.
  28.749 -
  28.750 -%\begin{figure} \underscoreon
  28.751 -%\begin{ttbox}
  28.752 -%\tdx{Inv_f_f}    inj f ==> Inv f (f x) = x
  28.753 -%\tdx{f_Inv_f}    y : range f ==> f(Inv f y) = y
  28.754 -%
  28.755 -%\tdx{Inv_injective}
  28.756 -%    [| Inv f x=Inv f y; x: range f;  y: range f |] ==> x=y
  28.757 -%
  28.758 -%
  28.759 -%\tdx{monoI}      [| !!A B. A <= B ==> f A <= f B |] ==> mono f
  28.760 -%\tdx{monoD}      [| mono f;  A <= B |] ==> f A <= f B
  28.761 -%
  28.762 -%\tdx{injI}       [| !! x y. f x = f y ==> x=y |] ==> inj f
  28.763 -%\tdx{inj_inverseI}              (!!x. g(f x) = x) ==> inj f
  28.764 -%\tdx{injD}       [| inj f; f x = f y |] ==> x=y
  28.765 -%
  28.766 -%\tdx{inj_onI}  (!!x y. [| f x=f y; x:A; y:A |] ==> x=y) ==> inj_on f A
  28.767 -%\tdx{inj_onD}  [| inj_on f A;  f x=f y;  x:A;  y:A |] ==> x=y
  28.768 -%
  28.769 -%\tdx{inj_on_inverseI}
  28.770 -%    (!!x. x:A ==> g(f x) = x) ==> inj_on f A
  28.771 -%\tdx{inj_on_contraD}
  28.772 -%    [| inj_on f A;  x~=y;  x:A;  y:A |] ==> ~ f x=f y
  28.773 -%\end{ttbox}
  28.774 -%\caption{Derived rules involving functions} \label{hol-fun}
  28.775 -%\end{figure}
  28.776 -
  28.777 -
  28.778 -\begin{figure} \underscoreon
  28.779 -\begin{ttbox}
  28.780 -\tdx{Union_upper}     B:A ==> B <= Union A
  28.781 -\tdx{Union_least}     [| !!X. X:A ==> X<=C |] ==> Union A <= C
  28.782 -
  28.783 -\tdx{Inter_lower}     B:A ==> Inter A <= B
  28.784 -\tdx{Inter_greatest}  [| !!X. X:A ==> C<=X |] ==> C <= Inter A
  28.785 -
  28.786 -\tdx{Un_upper1}       A <= A Un B
  28.787 -\tdx{Un_upper2}       B <= A Un B
  28.788 -\tdx{Un_least}        [| A<=C;  B<=C |] ==> A Un B <= C
  28.789 -
  28.790 -\tdx{Int_lower1}      A Int B <= A
  28.791 -\tdx{Int_lower2}      A Int B <= B
  28.792 -\tdx{Int_greatest}    [| C<=A;  C<=B |] ==> C <= A Int B
  28.793 -\end{ttbox}
  28.794 -\caption{Derived rules involving subsets} \label{hol-subset}
  28.795 -\end{figure}
  28.796 -
  28.797 -
  28.798 -\begin{figure} \underscoreon   \hfuzz=4pt%suppress "Overfull \hbox" message
  28.799 -\begin{ttbox}
  28.800 -\tdx{Int_absorb}        A Int A = A
  28.801 -\tdx{Int_commute}       A Int B = B Int A
  28.802 -\tdx{Int_assoc}         (A Int B) Int C  =  A Int (B Int C)
  28.803 -\tdx{Int_Un_distrib}    (A Un B)  Int C  =  (A Int C) Un (B Int C)
  28.804 -
  28.805 -\tdx{Un_absorb}         A Un A = A
  28.806 -\tdx{Un_commute}        A Un B = B Un A
  28.807 -\tdx{Un_assoc}          (A Un B)  Un C  =  A Un (B Un C)
  28.808 -\tdx{Un_Int_distrib}    (A Int B) Un C  =  (A Un C) Int (B Un C)
  28.809 -
  28.810 -\tdx{Compl_disjoint}    A Int (-A) = {\ttlbrace}x. False{\ttrbrace}
  28.811 -\tdx{Compl_partition}   A Un  (-A) = {\ttlbrace}x. True{\ttrbrace}
  28.812 -\tdx{double_complement} -(-A) = A
  28.813 -\tdx{Compl_Un}          -(A Un B)  = (-A) Int (-B)
  28.814 -\tdx{Compl_Int}         -(A Int B) = (-A) Un (-B)
  28.815 -
  28.816 -\tdx{Union_Un_distrib}  Union(A Un B) = (Union A) Un (Union B)
  28.817 -\tdx{Int_Union}         A Int (Union B) = (UN C:B. A Int C)
  28.818 -
  28.819 -\tdx{Inter_Un_distrib}  Inter(A Un B) = (Inter A) Int (Inter B)
  28.820 -\tdx{Un_Inter}          A Un (Inter B) = (INT C:B. A Un C)
  28.821 -
  28.822 -\end{ttbox}
  28.823 -\caption{Set equalities} \label{hol-equalities}
  28.824 -\end{figure}
  28.825 -%\tdx{Un_Union_image}    (UN x:C.(A x) Un (B x)) = Union(A``C) Un Union(B``C)
  28.826 -%\tdx{Int_Inter_image}   (INT x:C.(A x) Int (B x)) = Inter(A``C) Int Inter(B``C)
  28.827 -
  28.828 -Figures~\ref{hol-set1} and~\ref{hol-set2} present derived rules.  Most are
  28.829 -obvious and resemble rules of Isabelle's ZF set theory.  Certain rules, such
  28.830 -as \tdx{subsetCE}, \tdx{bexCI} and \tdx{UnCI}, are designed for classical
  28.831 -reasoning; the rules \tdx{subsetD}, \tdx{bexI}, \tdx{Un1} and~\tdx{Un2} are
  28.832 -not strictly necessary but yield more natural proofs.  Similarly,
  28.833 -\tdx{equalityCE} supports classical reasoning about extensionality, after the
  28.834 -fashion of \tdx{iffCE}.  See the file \texttt{HOL/Set.ML} for proofs
  28.835 -pertaining to set theory.
  28.836 -
  28.837 -Figure~\ref{hol-subset} presents lattice properties of the subset relation.
  28.838 -Unions form least upper bounds; non-empty intersections form greatest lower
  28.839 -bounds.  Reasoning directly about subsets often yields clearer proofs than
  28.840 -reasoning about the membership relation.  See the file \texttt{HOL/subset.ML}.
  28.841 -
  28.842 -Figure~\ref{hol-equalities} presents many common set equalities.  They
  28.843 -include commutative, associative and distributive laws involving unions,
  28.844 -intersections and complements.  For a complete listing see the file {\tt
  28.845 -HOL/equalities.ML}.
  28.846 -
  28.847 -\begin{warn}
  28.848 -\texttt{Blast_tac} proves many set-theoretic theorems automatically.
  28.849 -Hence you seldom need to refer to the theorems above.
  28.850 -\end{warn}
  28.851 -
  28.852 -\begin{figure}
  28.853 -\begin{center}
  28.854 -\begin{tabular}{rrr}
  28.855 -  \it name      &\it meta-type  & \it description \\ 
  28.856 -  \cdx{inj}~~\cdx{surj}& $(\alpha\To\beta )\To bool$
  28.857 -        & injective/surjective \\
  28.858 -  \cdx{inj_on}        & $[\alpha\To\beta ,\alpha\,set]\To bool$
  28.859 -        & injective over subset\\
  28.860 -  \cdx{inv} & $(\alpha\To\beta)\To(\beta\To\alpha)$ & inverse function
  28.861 -\end{tabular}
  28.862 -\end{center}
  28.863 -
  28.864 -\underscoreon
  28.865 -\begin{ttbox}
  28.866 -\tdx{inj_def}         inj f      == ! x y. f x=f y --> x=y
  28.867 -\tdx{surj_def}        surj f     == ! y. ? x. y=f x
  28.868 -\tdx{inj_on_def}      inj_on f A == !x:A. !y:A. f x=f y --> x=y
  28.869 -\tdx{inv_def}         inv f      == (\%y. @x. f(x)=y)
  28.870 -\end{ttbox}
  28.871 -\caption{Theory \thydx{Fun}} \label{fig:HOL:Fun}
  28.872 -\end{figure}
  28.873 -
  28.874 -\subsection{Properties of functions}\nopagebreak
  28.875 -Figure~\ref{fig:HOL:Fun} presents a theory of simple properties of functions.
  28.876 -Note that ${\tt inv}~f$ uses Hilbert's $\varepsilon$ to yield an inverse
  28.877 -of~$f$.  See the file \texttt{HOL/Fun.ML} for a complete listing of the derived
  28.878 -rules.  Reasoning about function composition (the operator~\sdx{o}) and the
  28.879 -predicate~\cdx{surj} is done simply by expanding the definitions.
  28.880 -
  28.881 -There is also a large collection of monotonicity theorems for constructions
  28.882 -on sets in the file \texttt{HOL/mono.ML}.
  28.883 -
  28.884 -
  28.885 -\section{Simplification and substitution}
  28.886 -
  28.887 -Simplification tactics tactics such as \texttt{Asm_simp_tac} and \texttt{Full_simp_tac} use the default simpset
  28.888 -(\texttt{simpset()}), which works for most purposes.  A quite minimal
  28.889 -simplification set for higher-order logic is~\ttindexbold{HOL_ss};
  28.890 -even more frugal is \ttindexbold{HOL_basic_ss}.  Equality~($=$), which
  28.891 -also expresses logical equivalence, may be used for rewriting.  See
  28.892 -the file \texttt{HOL/simpdata.ML} for a complete listing of the basic
  28.893 -simplification rules.
  28.894 -
  28.895 -See \iflabelundefined{chap:classical}{the {\em Reference Manual\/}}%
  28.896 -{Chaps.\ts\ref{substitution} and~\ref{simp-chap}} for details of substitution
  28.897 -and simplification.
  28.898 -
  28.899 -\begin{warn}\index{simplification!of conjunctions}%
  28.900 -  Reducing $a=b\conj P(a)$ to $a=b\conj P(b)$ is sometimes advantageous.  The
  28.901 -  left part of a conjunction helps in simplifying the right part.  This effect
  28.902 -  is not available by default: it can be slow.  It can be obtained by
  28.903 -  including \ttindex{conj_cong} in a simpset, \verb$addcongs [conj_cong]$.
  28.904 -\end{warn}
  28.905 -
  28.906 -\begin{warn}\index{simplification!of \texttt{if}}\label{if-simp}%