--- a/src/Benchmarks/Quickcheck_Benchmark/Needham_Schroeder_Base.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Benchmarks/Quickcheck_Benchmark/Needham_Schroeder_Base.thy Sat Jan 05 17:24:33 2019 +0100
@@ -151,15 +151,15 @@
fun of_set compfuns (Type ("fun", [T, _])) =
case body_type (Predicate_Compile_Aux.mk_monadT compfuns T) of
Type ("Quickcheck_Exhaustive.three_valued", _) =>
- Const(@{const_name neg_cps_of_set}, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
- | Type ("Predicate.pred", _) => Const(@{const_name pred_of_set}, HOLogic.mk_setT T --> Predicate_Compile_Aux.mk_monadT compfuns T)
- | _ => Const(@{const_name pos_cps_of_set}, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
+ Const(\<^const_name>\<open>neg_cps_of_set\<close>, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
+ | Type ("Predicate.pred", _) => Const(\<^const_name>\<open>pred_of_set\<close>, HOLogic.mk_setT T --> Predicate_Compile_Aux.mk_monadT compfuns T)
+ | _ => Const(\<^const_name>\<open>pos_cps_of_set\<close>, HOLogic.mk_setT T --> (Predicate_Compile_Aux.mk_monadT compfuns T))
fun member compfuns (U as Type ("fun", [T, _])) =
(absdummy T (absdummy (HOLogic.mk_setT T) (Predicate_Compile_Aux.mk_if compfuns
- (Const (@{const_name "Set.member"}, T --> HOLogic.mk_setT T --> @{typ bool}) $ Bound 1 $ Bound 0))))
+ (Const (\<^const_name>\<open>Set.member\<close>, T --> HOLogic.mk_setT T --> \<^typ>\<open>bool\<close>) $ Bound 1 $ Bound 0))))
in
- Core_Data.force_modes_and_compilations @{const_name Set.member}
+ Core_Data.force_modes_and_compilations \<^const_name>\<open>Set.member\<close>
[(oi, (of_set, false)), (ii, (member, false))]
end
\<close>
@@ -186,7 +186,7 @@
code_pred [generator_cps] synth' unfolding synth'_def by (rule synth.cases) fastforce+
-setup \<open>Predicate_Compile_Data.ignore_consts [@{const_name analz}, @{const_name knows}]\<close>
+setup \<open>Predicate_Compile_Data.ignore_consts [\<^const_name>\<open>analz\<close>, \<^const_name>\<open>knows\<close>]\<close>
declare ListMem_iff[symmetric, code_pred_inline]
declare [[quickcheck_timing]]
--- a/src/Benchmarks/Record_Benchmark/Record_Benchmark.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Benchmarks/Record_Benchmark/Record_Benchmark.thy Sat Jan 05 17:24:33 2019 +0100
@@ -356,49 +356,49 @@
lemma "(r\<lparr>A255:=x,A253:=y,A255:=z \<rparr>) = r\<lparr>A253:=y,A255:=z\<rparr>"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.upd_simproc]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.upd_simproc]) 1\<close>)
done
lemma "(\<forall>r. P (A155 r)) \<longrightarrow> (\<forall>x. P x)"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply simp
done
lemma "(\<forall>r. P (A155 r)) \<longrightarrow> (\<forall>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply simp
done
lemma "(\<exists>r. P (A155 r)) \<longrightarrow> (\<exists>x. P x)"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply simp
done
lemma "(\<exists>r. P (A155 r)) \<longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply simp
done
lemma "\<And>r. P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.split_simproc (K ~1)]) 1\<close>)
apply auto
done
lemma "\<And>r. P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
lemma "P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
lemma fixes r shows "P (A155 r) \<Longrightarrow> (\<exists>x. P x)"
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
@@ -409,7 +409,7 @@
assume "P (A155 r)"
then have "\<exists>x. P x"
apply -
- apply (tactic \<open>Record.split_simp_tac @{context} [] (K ~1) 1\<close>)
+ apply (tactic \<open>Record.split_simp_tac \<^context> [] (K ~1) 1\<close>)
apply auto
done
end
@@ -417,7 +417,7 @@
lemma "\<exists>r. A155 r = x"
apply (tactic \<open>simp_tac
- (put_simpset HOL_basic_ss @{context} addsimprocs [Record.ex_sel_eq_simproc]) 1\<close>)
+ (put_simpset HOL_basic_ss \<^context> addsimprocs [Record.ex_sel_eq_simproc]) 1\<close>)
done
print_record many_A
--- a/src/Doc/Classes/Classes.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Classes/Classes.thy Sat Jan 05 17:24:33 2019 +0100
@@ -119,7 +119,7 @@
subsection \<open>Class instantiation \label{sec:class_inst}\<close>
text \<open>
- The concrete type @{typ int} is made a @{class semigroup} instance
+ The concrete type \<^typ>\<open>int\<close> is made a \<^class>\<open>semigroup\<close> instance
by providing a suitable definition for the class parameter \<open>(\<otimes>)\<close> and a proof for the specification of @{fact assoc}. This is
accomplished by the @{command instantiation} target:
\<close>
@@ -149,11 +149,10 @@
relevant primitive proof goals; typically it is the first method
applied in an instantiation proof.
- From now on, the type-checker will consider @{typ int} as a @{class
- semigroup} automatically, i.e.\ any general results are immediately
+ From now on, the type-checker will consider \<^typ>\<open>int\<close> as a \<^class>\<open>semigroup\<close> automatically, i.e.\ any general results are immediately
available on concrete instances.
- \<^medskip> Another instance of @{class semigroup} yields the natural
+ \<^medskip> Another instance of \<^class>\<open>semigroup\<close> yields the natural
numbers:
\<close>
@@ -206,7 +205,7 @@
\<^noindent> Associativity of product semigroups is established using
the definition of \<open>(\<otimes>)\<close> on products and the hypothetical
associativity of the type components; these hypotheses are
- legitimate due to the @{class semigroup} constraints imposed on the
+ legitimate due to the \<^class>\<open>semigroup\<close> constraints imposed on the
type components by the @{command instance} proposition. Indeed,
this pattern often occurs with parametric types and type classes.
\<close>
@@ -216,7 +215,7 @@
text \<open>
We define a subclass \<open>monoidl\<close> (a semigroup with a left-hand
- neutral) by extending @{class semigroup} with one additional
+ neutral) by extending \<^class>\<open>semigroup\<close> with one additional
parameter \<open>neutral\<close> together with its characteristic property:
\<close>
@@ -388,7 +387,7 @@
qed
text \<open>
- \<^noindent> Here the \qt{@{keyword "in"} @{class group}} target
+ \<^noindent> Here the \qt{@{keyword "in"} \<^class>\<open>group\<close>} target
specification indicates that the result is recorded within that
context for later use. This local theorem is also lifted to the
global one @{fact "group.left_cancel:"} @{prop [source] "\<And>x y z ::
@@ -433,7 +432,7 @@
functors that have a canonical interpretation as type classes.
There is also the possibility of other interpretations. For
example, \<open>list\<close>s also form a monoid with \<open>append\<close> and
- @{term "[]"} as operations, but it seems inappropriate to apply to
+ \<^term>\<open>[]\<close> as operations, but it seems inappropriate to apply to
lists the same operations as for genuinely algebraic types. In such
a case, we can simply make a particular interpretation of monoids
for lists:
@@ -469,7 +468,7 @@
text \<open>
\<^noindent> This pattern is also helpful to reuse abstract
specifications on the \emph{same} type. For example, think of a
- class \<open>preorder\<close>; for type @{typ nat}, there are at least two
+ class \<open>preorder\<close>; for type \<^typ>\<open>nat\<close>, there are at least two
possible instances: the natural order or the order induced by the
divides relation. But only one of these instances can be used for
@{command instantiation}; using the locale behind the class \<open>preorder\<close>, it is still possible to utilise the same abstract
--- a/src/Doc/Classes/Setup.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Classes/Setup.thy Sat Jan 05 17:24:33 2019 +0100
@@ -18,18 +18,18 @@
fun alpha_ast_tr [] = Ast.Variable "'a"
| alpha_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
fun alpha_ofsort_ast_tr [ast] =
- Ast.Appl [Ast.Constant @{syntax_const "_ofsort"}, Ast.Variable "'a", ast]
+ Ast.Appl [Ast.Constant \<^syntax_const>\<open>_ofsort\<close>, Ast.Variable "'a", ast]
| alpha_ofsort_ast_tr asts = raise Ast.AST ("alpha_ast_tr", asts);
fun beta_ast_tr [] = Ast.Variable "'b"
| beta_ast_tr asts = raise Ast.AST ("beta_ast_tr", asts);
fun beta_ofsort_ast_tr [ast] =
- Ast.Appl [Ast.Constant @{syntax_const "_ofsort"}, Ast.Variable "'b", ast]
+ Ast.Appl [Ast.Constant \<^syntax_const>\<open>_ofsort\<close>, Ast.Variable "'b", ast]
| beta_ofsort_ast_tr asts = raise Ast.AST ("beta_ast_tr", asts);
in
- [(@{syntax_const "_alpha"}, K alpha_ast_tr),
- (@{syntax_const "_alpha_ofsort"}, K alpha_ofsort_ast_tr),
- (@{syntax_const "_beta"}, K beta_ast_tr),
- (@{syntax_const "_beta_ofsort"}, K beta_ofsort_ast_tr)]
+ [(\<^syntax_const>\<open>_alpha\<close>, K alpha_ast_tr),
+ (\<^syntax_const>\<open>_alpha_ofsort\<close>, K alpha_ofsort_ast_tr),
+ (\<^syntax_const>\<open>_beta\<close>, K beta_ast_tr),
+ (\<^syntax_const>\<open>_beta_ofsort\<close>, K beta_ofsort_ast_tr)]
end
\<close>
--- a/src/Doc/Codegen/Adaptation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Adaptation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -127,10 +127,9 @@
\secref{sec:adaptation_mechanisms})
\item Such parametrisations can involve references to a
- target-specific standard \<open>library\<close> (e.g. using the \<open>Haskell\<close> @{verbatim Maybe} type instead of the \<open>HOL\<close>
- @{type "option"} type); if such are used, the corresponding
- identifiers (in our example, @{verbatim Maybe}, @{verbatim
- Nothing} and @{verbatim Just}) also have to be considered \<open>reserved\<close>.
+ target-specific standard \<open>library\<close> (e.g. using the \<open>Haskell\<close> \<^verbatim>\<open>Maybe\<close> type instead of the \<open>HOL\<close>
+ \<^type>\<open>option\<close> type); if such are used, the corresponding
+ identifiers (in our example, \<^verbatim>\<open>Maybe\<close>, \<^verbatim>\<open>Nothing\<close> and \<^verbatim>\<open>Just\<close>) also have to be considered \<open>reserved\<close>.
\item Even more, the user can enrich the library of the
target-language by providing code snippets (\qt{\<open>includes\<close>}) which are prepended to any generated code (see
@@ -147,7 +146,7 @@
subsection \<open>Common adaptation applications \label{sec:common_adaptation}\<close>
text \<open>
- The @{theory Main} theory of Isabelle/HOL already provides a code
+ The \<^theory>\<open>Main\<close> theory of Isabelle/HOL already provides a code
generator setup which should be suitable for most applications.
Common extensions and modifications are available by certain
theories in \<^dir>\<open>~~/src/HOL/Library\<close>; beside being useful in
@@ -156,22 +155,21 @@
\begin{description}
- \item[@{theory "HOL.Code_Numeral"}] provides additional numeric
- types @{typ integer} and @{typ natural} isomorphic to types
- @{typ int} and @{typ nat} respectively. Type @{typ integer}
- is mapped to target-language built-in integers; @{typ natural}
- is implemented as abstract type over @{typ integer}.
+ \item[\<^theory>\<open>HOL.Code_Numeral\<close>] provides additional numeric
+ types \<^typ>\<open>integer\<close> and \<^typ>\<open>natural\<close> isomorphic to types
+ \<^typ>\<open>int\<close> and \<^typ>\<open>nat\<close> respectively. Type \<^typ>\<open>integer\<close>
+ is mapped to target-language built-in integers; \<^typ>\<open>natural\<close>
+ is implemented as abstract type over \<^typ>\<open>integer\<close>.
Useful for code setups which involve e.g.~indexing
of target-language arrays. Part of \<open>HOL-Main\<close>.
- \item[@{theory "HOL.String"}] provides an additional datatype @{typ
- String.literal} which is isomorphic to lists of 7-bit (ASCII) characters;
- @{typ String.literal}s are mapped to target-language strings.
+ \item[\<^theory>\<open>HOL.String\<close>] provides an additional datatype \<^typ>\<open>String.literal\<close> which is isomorphic to lists of 7-bit (ASCII) characters;
+ \<^typ>\<open>String.literal\<close>s are mapped to target-language strings.
- Literal values of type @{typ String.literal} can be written
+ Literal values of type \<^typ>\<open>String.literal\<close> can be written
as \<open>STR ''\<dots>''\<close> for sequences of printable characters and
\<open>STR 0x\<dots>\<close> for one single ASCII code point given
- as hexadecimal numeral; @{typ String.literal} supports concatenation
+ as hexadecimal numeral; \<^typ>\<open>String.literal\<close> supports concatenation
\<open>\<dots> + \<dots>\<close> for all standard target languages.
Note that the particular notion of \qt{string} is target-language
@@ -181,37 +179,36 @@
like verifying parsing algorithms require a dedicated
target-language specific model.
- Nevertheless @{typ String.literal}s can be analyzed; the core operations
- for this are @{term_type String.asciis_of_literal} and
- @{term_type String.literal_of_asciis} which are implemented
- in a target-language-specific way; particularly @{const String.asciis_of_literal}
+ Nevertheless \<^typ>\<open>String.literal\<close>s can be analyzed; the core operations
+ for this are \<^term_type>\<open>String.asciis_of_literal\<close> and
+ \<^term_type>\<open>String.literal_of_asciis\<close> which are implemented
+ in a target-language-specific way; particularly \<^const>\<open>String.asciis_of_literal\<close>
checks its argument at runtime to make sure that it does
not contain non-ASCII-characters, to safeguard consistency.
- On top of these, more abstract conversions like @{term_type
- String.explode} and @{term_type String.implode}
+ On top of these, more abstract conversions like \<^term_type>\<open>String.explode\<close> and \<^term_type>\<open>String.implode\<close>
are implemented.
Part of \<open>HOL-Main\<close>.
- \item[\<open>Code_Target_Int\<close>] implements type @{typ int}
- by @{typ integer} and thus by target-language built-in integers.
+ \item[\<open>Code_Target_Int\<close>] implements type \<^typ>\<open>int\<close>
+ by \<^typ>\<open>integer\<close> and thus by target-language built-in integers.
\item[\<open>Code_Binary_Nat\<close>] implements type
- @{typ nat} using a binary rather than a linear representation,
+ \<^typ>\<open>nat\<close> using a binary rather than a linear representation,
which yields a considerable speedup for computations.
- Pattern matching with @{term "0::nat"} / @{const "Suc"} is eliminated
+ Pattern matching with \<^term>\<open>0::nat\<close> / \<^const>\<open>Suc\<close> is eliminated
by a preprocessor.\label{abstract_nat}
- \item[\<open>Code_Target_Nat\<close>] implements type @{typ nat}
- by @{typ integer} and thus by target-language built-in integers.
- Pattern matching with @{term "0::nat"} / @{const "Suc"} is eliminated
+ \item[\<open>Code_Target_Nat\<close>] implements type \<^typ>\<open>nat\<close>
+ by \<^typ>\<open>integer\<close> and thus by target-language built-in integers.
+ Pattern matching with \<^term>\<open>0::nat\<close> / \<^const>\<open>Suc\<close> is eliminated
by a preprocessor.
\item[\<open>Code_Target_Numeral\<close>] is a convenience theory
containing both \<open>Code_Target_Nat\<close> and
\<open>Code_Target_Int\<close>.
- \item[@{theory "HOL-Library.IArray"}] provides a type @{typ "'a iarray"}
+ \item[\<^theory>\<open>HOL-Library.IArray\<close>] provides a type \<^typ>\<open>'a iarray\<close>
isomorphic to lists but implemented by (effectively immutable)
arrays \emph{in SML only}.
@@ -245,9 +242,7 @@
distinguished entities with have nothing to do with the SML-built-in
notion of \qt{bool}. This results in less readable code;
additionally, eager evaluation may cause programs to loop or break
- which would perfectly terminate when the existing SML @{verbatim
- "bool"} would be used. To map the HOL @{typ bool} on SML @{verbatim
- "bool"}, we may use \qn{custom serialisations}:
+ which would perfectly terminate when the existing SML \<^verbatim>\<open>bool\<close> would be used. To map the HOL \<^typ>\<open>bool\<close> on SML \<^verbatim>\<open>bool\<close>, we may use \qn{custom serialisations}:
\<close>
code_printing %quotett
@@ -263,7 +258,7 @@
custom serialisation starts with a target language identifier
followed by an expression, which during code serialisation is
inserted whenever the type constructor would occur. Each
- ``@{verbatim "_"}'' in a serialisation expression is treated as a
+ ``\<^verbatim>\<open>_\<close>'' in a serialisation expression is treated as a
placeholder for the constant's or the type constructor's arguments.
\<close>
@@ -300,7 +295,7 @@
text \<open>
\noindent Next, we try to map HOL pairs to SML pairs, using the
- infix ``@{verbatim "*"}'' type constructor and parentheses:
+ infix ``\<^verbatim>\<open>*\<close>'' type constructor and parentheses:
\<close>
(*<*)
code_printing %invisible
@@ -312,11 +307,11 @@
| constant Pair \<rightharpoonup> (SML) "!((_),/ (_))"
text \<open>
- \noindent The initial bang ``@{verbatim "!"}'' tells the serialiser
+ \noindent The initial bang ``\<^verbatim>\<open>!\<close>'' tells the serialiser
never to put parentheses around the whole expression (they are
already present), while the parentheses around argument place
holders tell not to put parentheses around the arguments. The slash
- ``@{verbatim "/"}'' (followed by arbitrary white space) inserts a
+ ``\<^verbatim>\<open>/\<close>'' (followed by arbitrary white space) inserts a
space which may be used as a break if necessary during pretty
printing.
@@ -326,9 +321,9 @@
serialisations are completely axiomatic.
A further noteworthy detail is that any special character in a
- custom serialisation may be quoted using ``@{verbatim "'"}''; thus,
- in ``@{verbatim "fn '_ => _"}'' the first ``@{verbatim "_"}'' is a
- proper underscore while the second ``@{verbatim "_"}'' is a
+ custom serialisation may be quoted using ``\<^verbatim>\<open>'\<close>''; thus,
+ in ``\<^verbatim>\<open>fn '_ => _\<close>'' the first ``\<^verbatim>\<open>_\<close>'' is a
+ proper underscore while the second ``\<^verbatim>\<open>_\<close>'' is a
placeholder.
\<close>
@@ -337,8 +332,8 @@
text \<open>
For convenience, the default \<open>HOL\<close> setup for \<open>Haskell\<close>
- maps the @{class equal} class to its counterpart in \<open>Haskell\<close>,
- giving custom serialisations for the class @{class equal}
+ maps the \<^class>\<open>equal\<close> class to its counterpart in \<open>Haskell\<close>,
+ giving custom serialisations for the class \<^class>\<open>equal\<close>
and its operation @{const [source] HOL.equal}.
\<close>
@@ -348,7 +343,7 @@
text \<open>
\noindent A problem now occurs whenever a type which is an instance
- of @{class equal} in \<open>HOL\<close> is mapped on a \<open>Haskell\<close>-built-in type which is also an instance of \<open>Haskell\<close>
+ of \<^class>\<open>equal\<close> in \<open>HOL\<close> is mapped on a \<open>Haskell\<close>-built-in type which is also an instance of \<open>Haskell\<close>
\<open>Eq\<close>:
\<close>
--- a/src/Doc/Codegen/Computations.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Computations.thy Sat Jan 05 17:24:33 2019 +0100
@@ -80,7 +80,7 @@
placeholder for its corresponding type in ML under code generation.
\<^item> Then the corresponding computation is an ML function of type
- @{ML_type "Proof.context -> term -> 'ml"}
+ \<^ML_type>\<open>Proof.context -> term -> 'ml\<close>
partially implementing the morphism \<open>\<Phi> :: \<tau> \<rightarrow> T\<close> for all
\<^emph>\<open>input terms\<close> consisting only of input constants and applications.
@@ -154,7 +154,7 @@
Hence the functional argument accepts the following parameters
- \<^item> A postprocessor function @{ML_type "term -> term"}.
+ \<^item> A postprocessor function \<^ML_type>\<open>term -> term\<close>.
\<^item> The resulting value as optional argument.
@@ -165,7 +165,7 @@
\<close>
ML_val %quotetypewriter \<open>
- comp_nat @{context} @{term "sum_list [Suc 0, Suc (Suc 0)] * Suc (Suc 0)"}
+ comp_nat \<^context> \<^term>\<open>sum_list [Suc 0, Suc (Suc 0)] * Suc (Suc 0)\<close>
\<close>
text \<open>
@@ -193,7 +193,7 @@
(fn post => post o HOLogic.mk_nat o int_of_nat o the);
val comp_nat_list = @{computation "nat list"}
- (fn post => post o HOLogic.mk_list @{typ nat} o
+ (fn post => post o HOLogic.mk_list \<^typ>\<open>nat\<close> o
map (HOLogic.mk_nat o int_of_nat) o the);
end
@@ -272,11 +272,11 @@
ML %quotetypewriter \<open>
local
- fun raw_dvd (b, ct) = Thm.mk_binop @{cterm "Pure.eq :: bool \<Rightarrow> bool \<Rightarrow> prop"}
- ct (if b then @{cterm True} else @{cterm False});
+ fun raw_dvd (b, ct) = Thm.mk_binop \<^cterm>\<open>Pure.eq :: bool \<Rightarrow> bool \<Rightarrow> prop\<close>
+ ct (if b then \<^cterm>\<open>True\<close> else \<^cterm>\<open>False\<close>);
val (_, dvd_oracle) = Context.>>> (Context.map_theory_result
- (Thm.add_oracle (@{binding dvd}, raw_dvd)));
+ (Thm.add_oracle (\<^binding>\<open>dvd\<close>, raw_dvd)));
in
@@ -293,7 +293,7 @@
text \<open>
\<^item> Antiquotation @{ML_antiquotation computation_conv} basically yields
- a conversion of type @{ML_type "Proof.context -> cterm -> thm"}
+ a conversion of type \<^ML_type>\<open>Proof.context -> cterm -> thm\<close>
(see further @{cite "isabelle-implementation"}).
\<^item> The antiquotation expects one functional argument to bridge the
@@ -311,8 +311,8 @@
\<close> (*<*)
(*>*) ML_val %quotetypewriter \<open>
- conv_dvd @{context} @{cterm "7 dvd ( 62437867527846782 :: int)"};
- conv_dvd @{context} @{cterm "7 dvd (-62437867527846783 :: int)"};
+ conv_dvd \<^context> \<^cterm>\<open>7 dvd ( 62437867527846782 :: int)\<close>;
+ conv_dvd \<^context> \<^cterm>\<open>7 dvd (-62437867527846783 :: int)\<close>;
\<close>
text \<open>
@@ -341,7 +341,7 @@
fun integer_of_int (@{code int_of_integer} k) = k
- val cterm_of_int = Thm.cterm_of @{context} o HOLogic.mk_numeral o integer_of_int;
+ val cterm_of_int = Thm.cterm_of \<^context> o HOLogic.mk_numeral o integer_of_int;
val divisor = Thm.dest_arg o Thm.dest_arg;
@@ -366,8 +366,8 @@
\<close>
ML_val %quotetypewriter \<open>
- conv_div @{context}
- @{cterm "46782454343499999992777742432342242323423425 div (7 :: int)"}
+ conv_div \<^context>
+ \<^cterm>\<open>46782454343499999992777742432342242323423425 div (7 :: int)\<close>
\<close>
text \<open>
@@ -388,7 +388,7 @@
text \<open>
The \<open>computation_check\<close> antiquotation is convenient if
only a positive checking of propositions is desired, because then
- the result type is fixed (@{typ prop}) and all the technical
+ the result type is fixed (\<^typ>\<open>prop\<close>) and all the technical
matter concerning postprocessing and oracles is done in the framework
once and for all:
\<close>
@@ -402,17 +402,17 @@
\<close>
text \<open>
- \noindent The HOL judgement @{term Trueprop} embeds an expression
- of type @{typ bool} into @{typ prop}.
+ \noindent The HOL judgement \<^term>\<open>Trueprop\<close> embeds an expression
+ of type \<^typ>\<open>bool\<close> into \<^typ>\<open>prop\<close>.
\<close>
ML_val %quotetypewriter \<open>
- check_nat @{context} @{cprop "less (Suc (Suc 0)) (Suc (Suc (Suc 0)))"}
+ check_nat \<^context> \<^cprop>\<open>less (Suc (Suc 0)) (Suc (Suc (Suc 0)))\<close>
\<close>
text \<open>
\noindent Note that such computations can only \<^emph>\<open>check\<close>
- for @{typ prop}s to hold but not \<^emph>\<open>decide\<close>.
+ for \<^typ>\<open>prop\<close>s to hold but not \<^emph>\<open>decide\<close>.
\<close>
@@ -436,7 +436,7 @@
naively: the compilation pattern for computations fails whenever
target-language literals are involved; since various
common code generator setups (see \secref{sec:common_adaptation})
- implement @{typ nat} and @{typ int} by target-language literals,
+ implement \<^typ>\<open>nat\<close> and \<^typ>\<open>int\<close> by target-language literals,
this problem manifests whenever numeric types are involved.
In practice, this is circumvented with a dedicated preprocessor
setup for literals (see also \secref{sec:input_constants_pitfalls}).
@@ -446,7 +446,7 @@
too much detail:
\<close>
-paragraph \<open>An example for @{typ nat}\<close>
+paragraph \<open>An example for \<^typ>\<open>nat\<close>\<close>
ML %quotetypewriter \<open>
val check_nat = @{computation_check terms:
@@ -456,10 +456,10 @@
\<close>
ML_val %quotetypewriter \<open>
- check_nat @{context} @{cprop "even (Suc 0 + 1 + 2 + 3 + 4 + 5)"}
+ check_nat \<^context> \<^cprop>\<open>even (Suc 0 + 1 + 2 + 3 + 4 + 5)\<close>
\<close>
-paragraph \<open>An example for @{typ int}\<close>
+paragraph \<open>An example for \<^typ>\<open>int\<close>\<close>
ML %quotetypewriter \<open>
val check_int = @{computation_check terms:
@@ -469,10 +469,10 @@
\<close>
ML_val %quotetypewriter \<open>
- check_int @{context} @{cprop "even ((0::int) + 1 + 2 + 3 + -1 + -2 + -3)"}
+ check_int \<^context> \<^cprop>\<open>even ((0::int) + 1 + 2 + 3 + -1 + -2 + -3)\<close>
\<close>
-paragraph \<open>An example for @{typ String.literal}\<close>
+paragraph \<open>An example for \<^typ>\<open>String.literal\<close>\<close>
definition %quote is_cap_letter :: "String.literal \<Rightarrow> bool"
where "is_cap_letter s \<longleftrightarrow> (case String.asciis_of_literal s
@@ -485,7 +485,7 @@
\<close>
ML_val %quotetypewriter \<open>
- check_literal @{context} @{cprop "is_cap_letter (STR ''Q'')"}
+ check_literal \<^context> \<^cprop>\<open>is_cap_letter (STR ''Q'')\<close>
\<close>
@@ -500,7 +500,7 @@
One option is to hardcode using code antiquotations (see \secref{sec:code_antiq}).
Another option is to use pre-existing infrastructure in HOL:
- @{ML "Reification.conv"} and @{ML "Reification.tac"}.
+ \<^ML>\<open>Reification.conv\<close> and \<^ML>\<open>Reification.tac\<close>.
A simplistic example:
\<close>
@@ -518,23 +518,23 @@
| "interp (Neg f) vs \<longleftrightarrow> \<not> interp f vs"
text \<open>
- \noindent The datatype @{type form_ord} represents formulae whose semantics is given by
- @{const interp}. Note that values are represented by variable indices (@{typ nat})
- whose concrete values are given in list @{term vs}.
+ \noindent The datatype \<^type>\<open>form_ord\<close> represents formulae whose semantics is given by
+ \<^const>\<open>interp\<close>. Note that values are represented by variable indices (\<^typ>\<open>nat\<close>)
+ whose concrete values are given in list \<^term>\<open>vs\<close>.
\<close>
ML %quotetypewriter (*<*) \<open>\<close>
lemma "thm": fixes x y z :: "'a::order" shows "x < y \<and> x < z \<equiv> interp (And (Less (Suc 0) (Suc (Suc 0))) (Less (Suc 0) 0)) [z, x, y]"
ML_prf %quotetypewriter
(*>*) \<open>val thm =
- Reification.conv @{context} @{thms interp.simps} @{cterm "x < y \<and> x < z"}\<close> (*<*)
-by (tactic \<open>ALLGOALS (resolve_tac @{context} [thm])\<close>)
+ Reification.conv \<^context> @{thms interp.simps} \<^cterm>\<open>x < y \<and> x < z\<close>\<close> (*<*)
+by (tactic \<open>ALLGOALS (resolve_tac \<^context> [thm])\<close>)
(*>*)
text \<open>
- \noindent By virtue of @{fact interp.simps}, @{ML "Reification.conv"} provides a conversion
+ \noindent By virtue of @{fact interp.simps}, \<^ML>\<open>Reification.conv\<close> provides a conversion
which, for this concrete example, yields @{thm thm [no_vars]}. Note that the argument
- to @{const interp} does not contain any free variables and can thus be evaluated
+ to \<^const>\<open>interp\<close> does not contain any free variables and can thus be evaluated
using evaluation.
A less meager example can be found in the AFP, session \<open>Regular-Sets\<close>,
--- a/src/Doc/Codegen/Evaluation.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Evaluation.thy Sat Jan 05 17:24:33 2019 +0100
@@ -12,8 +12,8 @@
Recalling \secref{sec:principle}, code generation turns a system of
equations into a program with the \emph{same} equational semantics.
As a consequence, this program can be used as a \emph{rewrite
- engine} for terms: rewriting a term @{term "t"} using a program to a
- term @{term "t'"} yields the theorems @{prop "t \<equiv> t'"}. This
+ engine} for terms: rewriting a term \<^term>\<open>t\<close> using a program to a
+ term \<^term>\<open>t'\<close> yields the theorems \<^prop>\<open>t \<equiv> t'\<close>. This
application of code generation in the following is referred to as
\emph{evaluation}.
\<close>
@@ -158,10 +158,10 @@
\begin{tabular}{l||c|c|c}
& \<open>simp\<close> & \<open>nbe\<close> & \<open>code\<close> \tabularnewline \hline \hline
interactive evaluation & @{command value} \<open>[simp]\<close> & @{command value} \<open>[nbe]\<close> & @{command value} \<open>[code]\<close> \tabularnewline
- plain evaluation & & & \ttsize@{ML "Code_Evaluation.dynamic_value"} \tabularnewline \hline
+ plain evaluation & & & \ttsize\<^ML>\<open>Code_Evaluation.dynamic_value\<close> \tabularnewline \hline
evaluation method & @{method code_simp} & @{method normalization} & @{method eval} \tabularnewline
- property conversion & & & \ttsize@{ML "Code_Runtime.dynamic_holds_conv"} \tabularnewline \hline
- conversion & \ttsize@{ML "Code_Simp.dynamic_conv"} & \ttsize@{ML "Nbe.dynamic_conv"}
+ property conversion & & & \ttsize\<^ML>\<open>Code_Runtime.dynamic_holds_conv\<close> \tabularnewline \hline
+ conversion & \ttsize\<^ML>\<open>Code_Simp.dynamic_conv\<close> & \ttsize\<^ML>\<open>Nbe.dynamic_conv\<close>
\end{tabular}
\<close>
@@ -181,8 +181,8 @@
text \<open>
For \<open>simp\<close> and \<open>nbe\<close> static evaluation can be achieved using
- @{ML Code_Simp.static_conv} and @{ML Nbe.static_conv}.
- Note that @{ML Nbe.static_conv} by its very nature
+ \<^ML>\<open>Code_Simp.static_conv\<close> and \<^ML>\<open>Nbe.static_conv\<close>.
+ Note that \<^ML>\<open>Nbe.static_conv\<close> by its very nature
requires an invocation of the ML compiler for every call,
which can produce significant overhead.
\<close>
--- a/src/Doc/Codegen/Foundations.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Foundations.thy Sat Jan 05 17:24:33 2019 +0100
@@ -119,7 +119,7 @@
Pre- and postprocessor can be setup to transfer between
expressions suitable for logical reasoning and expressions
suitable for execution. As example, take list membership; logically
- it is expressed as @{term "x \<in> set xs"}. But for execution
+ it is expressed as \<^term>\<open>x \<in> set xs\<close>. But for execution
the intermediate set is not desirable. Hence the following
specification:
\<close>
@@ -144,7 +144,7 @@
\emph{Function transformers} provide a very general
interface, transforming a list of function theorems to another list
of function theorems, provided that neither the heading constant nor
- its type change. The @{term "0::nat"} / @{const Suc} pattern
+ its type change. The \<^term>\<open>0::nat\<close> / \<^const>\<open>Suc\<close> pattern
used in theory \<open>Code_Abstract_Nat\<close> (see \secref{abstract_nat})
uses this interface.
@@ -195,8 +195,8 @@
\<close>
text \<open>
- \noindent You may note that the equality test @{term "xs = []"} has
- been replaced by the predicate @{term "List.null xs"}. This is due
+ \noindent You may note that the equality test \<^term>\<open>xs = []\<close> has
+ been replaced by the predicate \<^term>\<open>List.null xs\<close>. This is due
to the default setup of the \qn{preprocessor}.
This possibility to select arbitrary code equations is the key
@@ -218,8 +218,7 @@
code_thms %quote dequeue
text \<open>
- \noindent This prints a table with the code equations for @{const
- dequeue}, including \emph{all} code equations those equations depend
+ \noindent This prints a table with the code equations for \<^const>\<open>dequeue\<close>, including \emph{all} code equations those equations depend
on recursively. These dependencies themselves can be visualized using
the @{command_def code_deps} command.
\<close>
@@ -242,7 +241,7 @@
text \<open>
\noindent During preprocessing, the membership test is rewritten,
- resulting in @{const List.member}, which itself performs an explicit
+ resulting in \<^const>\<open>List.member\<close>, which itself performs an explicit
equality check, as can be seen in the corresponding \<open>SML\<close> code:
\<close>
@@ -253,11 +252,10 @@
text \<open>
\noindent Obviously, polymorphic equality is implemented the Haskell
way using a type class. How is this achieved? HOL introduces an
- explicit class @{class equal} with a corresponding operation @{const
- HOL.equal} such that @{thm equal [no_vars]}. The preprocessing
- framework does the rest by propagating the @{class equal} constraints
+ explicit class \<^class>\<open>equal\<close> with a corresponding operation \<^const>\<open>HOL.equal\<close> such that @{thm equal [no_vars]}. The preprocessing
+ framework does the rest by propagating the \<^class>\<open>equal\<close> constraints
through all dependent code equations. For datatypes, instances of
- @{class equal} are implicitly derived when possible. For other types,
+ \<^class>\<open>equal\<close> are implicitly derived when possible. For other types,
you may instantiate \<open>equal\<close> manually like any other type class.
\<close>
@@ -281,7 +279,7 @@
text \<open>
\noindent In the corresponding code, there is no equation
- for the pattern @{term "AQueue [] []"}:
+ for the pattern \<^term>\<open>AQueue [] []\<close>:
\<close>
text %quotetypewriter \<open>
@@ -307,10 +305,9 @@
by (simp_all add: strict_dequeue'_def split: list.splits)
text \<open>
- Observe that on the right hand side of the definition of @{const
- "strict_dequeue'"}, the unspecified constant @{const empty_queue} occurs.
- An attempt to generate code for @{const strict_dequeue'} would
- make the code generator complain that @{const empty_queue} has
+ Observe that on the right hand side of the definition of \<^const>\<open>strict_dequeue'\<close>, the unspecified constant \<^const>\<open>empty_queue\<close> occurs.
+ An attempt to generate code for \<^const>\<open>strict_dequeue'\<close> would
+ make the code generator complain that \<^const>\<open>empty_queue\<close> has
no associated code equations. In most situations unimplemented
constants indeed indicated a broken program; however such
constants can also be thought of as function definitions which always fail,
@@ -339,7 +336,7 @@
declare %quote [[code abort: undefined]]
text \<open>
- \noindent -- hence @{const undefined} can always be used in such
+ \noindent -- hence \<^const>\<open>undefined\<close> can always be used in such
situations.
\<close>
--- a/src/Doc/Codegen/Further.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Further.thy Sat Jan 05 17:24:33 2019 +0100
@@ -30,7 +30,7 @@
arbitrary ML code as well.
A typical example for @{command code_reflect} can be found in the
- @{theory HOL.Predicate} theory.
+ \<^theory>\<open>HOL.Predicate\<close> theory.
\<close>
@@ -188,7 +188,7 @@
text \<open>
\noindent This amends the interpretation morphisms such that
occurrences of the foundational term @{term [source] "power.powers (\<lambda>n (f :: 'a \<Rightarrow> 'a). f ^^ n)"}
- are folded to a newly defined constant @{const funpows}.
+ are folded to a newly defined constant \<^const>\<open>funpows\<close>.
After this setup procedure, code generation can continue as usual:
\<close>
--- a/src/Doc/Codegen/Inductive_Predicate.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Inductive_Predicate.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,7 +25,7 @@
this compiler are described in detail in
@{cite "Berghofer-Bulwahn-Haftmann:2009:TPHOL"}.
- Consider the simple predicate @{const append} given by these two
+ Consider the simple predicate \<^const>\<open>append\<close> given by these two
introduction rules:
\<close>
@@ -49,7 +49,7 @@
output. Modes are similar to types, but use the notation \<open>i\<close>
for input and \<open>o\<close> for output.
- For @{term "append"}, the compiler can infer the following modes:
+ For \<^term>\<open>append\<close>, the compiler can infer the following modes:
\begin{itemize}
\item \<open>i \<Rightarrow> i \<Rightarrow> i \<Rightarrow> bool\<close>
\item \<open>i \<Rightarrow> i \<Rightarrow> o \<Rightarrow> bool\<close>
@@ -203,8 +203,7 @@
predicate could be inferred that are not disambiguated by the
pattern of the set comprehension. To disambiguate the modes for the
arguments of a predicate, you can state the modes explicitly in the
- @{command "values"} command. Consider the simple predicate @{term
- "succ"}:
+ @{command "values"} command. Consider the simple predicate \<^term>\<open>succ\<close>:
\<close>
inductive %quote succ :: "nat \<Rightarrow> nat \<Rightarrow> bool" where
@@ -243,14 +242,13 @@
(if append [Suc 0, 2] ys zs then Some ys else None)\<close>}
\item If you know that the execution returns only one value (it is
- deterministic), then you can use the combinator @{term
- "Predicate.the"}, e.g., a functional concatenation of lists is
+ deterministic), then you can use the combinator \<^term>\<open>Predicate.the\<close>, e.g., a functional concatenation of lists is
defined with
@{term [display] "functional_concat xs ys = Predicate.the (append_i_i_o xs ys)"}
Note that if the evaluation does not return a unique value, it
- raises a run-time error @{term "not_unique"}.
+ raises a run-time error \<^term>\<open>not_unique\<close>.
\end{itemize}
\<close>
--- a/src/Doc/Codegen/Introduction.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Introduction.thy Sat Jan 05 17:24:33 2019 +0100
@@ -186,8 +186,7 @@
\<close>
text \<open>
- \noindent Note the parameters with trailing underscore (@{verbatim
- "A_"}), which are the dictionary parameters.
+ \noindent Note the parameters with trailing underscore (\<^verbatim>\<open>A_\<close>), which are the dictionary parameters.
\<close>
--- a/src/Doc/Codegen/Refinement.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Codegen/Refinement.thy Sat Jan 05 17:24:33 2019 +0100
@@ -56,8 +56,7 @@
by (simp_all add: fib_step_def)
text \<open>
- \noindent What remains is to implement @{const fib} by @{const
- fib_step} as follows:
+ \noindent What remains is to implement \<^const>\<open>fib\<close> by \<^const>\<open>fib_step\<close> as follows:
\<close>
lemma %quote [code]:
@@ -110,7 +109,7 @@
code_datatype %quote AQueue
text \<open>
- \noindent Here we define a \qt{constructor} @{const "AQueue"} which
+ \noindent Here we define a \qt{constructor} \<^const>\<open>AQueue\<close> which
is defined in terms of \<open>Queue\<close> and interprets its arguments
according to what the \emph{content} of an amortised queue is supposed
to be.
@@ -147,7 +146,7 @@
\noindent It is good style, although no absolute requirement, to
provide code equations for the original artefacts of the implemented
type, if possible; in our case, these are the datatype constructor
- @{const Queue} and the case combinator @{const case_queue}:
+ \<^const>\<open>Queue\<close> and the case combinator \<^const>\<open>case_queue\<close>:
\<close>
lemma %quote Queue_AQueue [code]:
@@ -168,10 +167,10 @@
text \<open>
The same techniques can also be applied to types which are not
- specified as datatypes, e.g.~type @{typ int} is originally specified
+ specified as datatypes, e.g.~type \<^typ>\<open>int\<close> is originally specified
as quotient type by means of @{command_def typedef}, but for code
generation constants allowing construction of binary numeral values
- are used as constructors for @{typ int}.
+ are used as constructors for \<^typ>\<open>int\<close>.
This approach however fails if the representation of a type demands
invariants; this issue is discussed in the next section.
@@ -183,20 +182,20 @@
text \<open>
Datatype representation involving invariants require a dedicated
setup for the type and its primitive operations. As a running
- example, we implement a type @{typ "'a dlist"} of lists consisting
+ example, we implement a type \<^typ>\<open>'a dlist\<close> of lists consisting
of distinct elements.
- The specification of @{typ "'a dlist"} itself can be found in theory
- @{theory "HOL-Library.Dlist"}.
+ The specification of \<^typ>\<open>'a dlist\<close> itself can be found in theory
+ \<^theory>\<open>HOL-Library.Dlist\<close>.
The first step is to decide on which representation the abstract
- type (in our example @{typ "'a dlist"}) should be implemented.
- Here we choose @{typ "'a list"}. Then a conversion from the concrete
+ type (in our example \<^typ>\<open>'a dlist\<close>) should be implemented.
+ Here we choose \<^typ>\<open>'a list\<close>. Then a conversion from the concrete
type to the abstract type must be specified, here:
\<close>
text %quote \<open>
- @{term_type Dlist}
+ \<^term_type>\<open>Dlist\<close>
\<close>
text \<open>
@@ -205,7 +204,7 @@
\<close>
text %quote \<open>
- @{term_type list_of_dlist}
+ \<^term_type>\<open>list_of_dlist\<close>
\<close>
text \<open>
@@ -219,19 +218,19 @@
text \<open>
\noindent Note that so far the invariant on representations
- (@{term_type distinct}) has never been mentioned explicitly:
+ (\<^term_type>\<open>distinct\<close>) has never been mentioned explicitly:
the invariant is only referred to implicitly: all values in
- set @{term "{xs. list_of_dlist (Dlist xs) = xs}"} are invariant,
- and in our example this is exactly @{term "{xs. distinct xs}"}.
+ set \<^term>\<open>{xs. list_of_dlist (Dlist xs) = xs}\<close> are invariant,
+ and in our example this is exactly \<^term>\<open>{xs. distinct xs}\<close>.
- The primitive operations on @{typ "'a dlist"} are specified
- indirectly using the projection @{const list_of_dlist}. For
- the empty \<open>dlist\<close>, @{const Dlist.empty}, we finally want
+ The primitive operations on \<^typ>\<open>'a dlist\<close> are specified
+ indirectly using the projection \<^const>\<open>list_of_dlist\<close>. For
+ the empty \<open>dlist\<close>, \<^const>\<open>Dlist.empty\<close>, we finally want
the code equation
\<close>
text %quote \<open>
- @{term "Dlist.empty = Dlist []"}
+ \<^term>\<open>Dlist.empty = Dlist []\<close>
\<close>
text \<open>
@@ -244,7 +243,7 @@
text \<open>
\noindent This equation logically encodes both the desired code
- equation and that the expression @{const Dlist} is applied to obeys
+ equation and that the expression \<^const>\<open>Dlist\<close> is applied to obeys
the implicit invariant. Equations for insertion and removal are
similar:
\<close>
@@ -270,9 +269,9 @@
for the meta theory of datatype refinement involving invariants.
Typical data structures implemented by representations involving
- invariants are available in the library, theory @{theory "HOL-Library.Mapping"}
- specifies key-value-mappings (type @{typ "('a, 'b) mapping"});
- these can be implemented by red-black-trees (theory @{theory "HOL-Library.RBT"}).
+ invariants are available in the library, theory \<^theory>\<open>HOL-Library.Mapping\<close>
+ specifies key-value-mappings (type \<^typ>\<open>('a, 'b) mapping\<close>);
+ these can be implemented by red-black-trees (theory \<^theory>\<open>HOL-Library.RBT\<close>).
\<close>
end
--- a/src/Doc/Corec/Corec.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Corec/Corec.thy Sat Jan 05 17:24:33 2019 +0100
@@ -34,7 +34,7 @@
primitive corecursion. It describes @{command corec} and related commands:\
@{command corecursive}, @{command friend_of_corec}, and @{command coinduction_upto}.
It also covers the @{method corec_unique} proof method.
-The package is not part of @{theory Main}; it is located in
+The package is not part of \<^theory>\<open>Main\<close>; it is located in
\<^file>\<open>~~/src/HOL/Library/BNF_Corec.thy\<close>.
The @{command corec} command generalizes \keyw{primcorec} in three main
@@ -149,7 +149,7 @@
\noindent
Pointwise sum meets the friendliness criterion. We register it as a friend using
the @{command friend_of_corec} command. The command requires us to give a
-specification of @{const ssum} where a constructor (@{const SCons}) occurs at
+specification of \<^const>\<open>ssum\<close> where a constructor (\<^const>\<open>SCons\<close>) occurs at
the outermost position on the right-hand side. Here, we can simply reuse the
\keyw{primcorec} specification above:
\<close>
@@ -171,7 +171,7 @@
@{thm [source] relator_eq} theorem collection before it invokes
@{method transfer_prover}.
-After registering @{const ssum} as a friend, we can use it in the corecursive
+After registering \<^const>\<open>ssum\<close> as a friend, we can use it in the corecursive
call context, either inside or outside the constructor guard:
\<close>
@@ -204,7 +204,7 @@
The parametricity subgoal is given to \<open>transfer_prover_eq\<close>
(Section~\ref{ssec:transfer-prover-eq}).
-The @{const sprod} and @{const sexp} functions provide shuffle product and
+The \<^const>\<open>sprod\<close> and \<^const>\<open>sexp\<close> functions provide shuffle product and
exponentiation on streams. We can use them to define the stream of factorial
numbers in two different ways:
\<close>
@@ -230,7 +230,7 @@
\noindent
In general, the arguments may be any bounded natural functor (BNF)
@{cite "isabelle-datatypes"}, with the restriction that the target codatatype
-(@{typ "nat stream"}) may occur only in a \emph{live} position of the BNF. For
+(\<^typ>\<open>nat stream\<close>) may occur only in a \emph{live} position of the BNF. For
this reason, the following function, on unbounded sets, cannot be registered as
a friend:
\<close>
@@ -252,7 +252,7 @@
Node (lab: 'a) (sub: "'a tree list")
text \<open>
-We first define the pointwise sum of two trees analogously to @{const ssum}:
+We first define the pointwise sum of two trees analogously to \<^const>\<open>ssum\<close>:
\<close>
corec (friend) tsum :: "('a :: plus) tree \<Rightarrow> 'a tree \<Rightarrow> 'a tree" where
@@ -261,13 +261,13 @@
text \<open>
\noindent
-Here, @{const map} is the standard map function on lists, and @{const zip}
-converts two parallel lists into a list of pairs. The @{const tsum} function is
+Here, \<^const>\<open>map\<close> is the standard map function on lists, and \<^const>\<open>zip\<close>
+converts two parallel lists into a list of pairs. The \<^const>\<open>tsum\<close> function is
primitively corecursive. Instead of @{command corec} \<open>(friend)\<close>, we could
also have used \keyw{primcorec} and @{command friend_of_corec}, as we did for
-@{const ssum}.
+\<^const>\<open>ssum\<close>.
-Once @{const tsum} is registered as friendly, we can use it in the corecursive
+Once \<^const>\<open>tsum\<close> is registered as friendly, we can use it in the corecursive
call context of another function:
\<close>
@@ -280,7 +280,7 @@
@{command corec}, @{command corecursive}, and @{command friend_of_corec}. In
particular, nesting through the function type can be expressed using
\<open>\<lambda>\<close>-abstractions and function applications rather than through composition
-(@{term "(\<circ>)"}, the map function for \<open>\<Rightarrow>\<close>). For example:
+(\<^term>\<open>(\<circ>)\<close>, the map function for \<open>\<Rightarrow>\<close>). For example:
\<close>
codatatype 'a language =
@@ -322,7 +322,7 @@
finite number of unguarded recursive calls perform this calculation before
reaching a guarded corecursive call. Intuitively, the unguarded recursive call
can be unfolded to arbitrary finite depth, ultimately yielding a purely
-corecursive definition. An example is the @{term primes} function from Di
+corecursive definition. An example is the \<^term>\<open>primes\<close> function from Di
Gianantonio and Miculan @{cite "di-gianantonio-miculan-2003"}:
\<close>
@@ -343,15 +343,15 @@
The @{command corecursive} command is a variant of @{command corec} that allows
us to specify a termination argument for any unguarded self-call.
-When called with \<open>m = 1\<close> and \<open>n = 2\<close>, the @{const primes}
+When called with \<open>m = 1\<close> and \<open>n = 2\<close>, the \<^const>\<open>primes\<close>
function computes the stream of prime numbers. The unguarded call in the
-\<open>else\<close> branch increments @{term n} until it is coprime to the first
-argument @{term m} (i.e., the greatest common divisor of @{term m} and
-@{term n} is \<open>1\<close>).
+\<open>else\<close> branch increments \<^term>\<open>n\<close> until it is coprime to the first
+argument \<^term>\<open>m\<close> (i.e., the greatest common divisor of \<^term>\<open>m\<close> and
+\<^term>\<open>n\<close> is \<open>1\<close>).
-For any positive integers @{term m} and @{term n}, the numbers @{term m} and
+For any positive integers \<^term>\<open>m\<close> and \<^term>\<open>n\<close>, the numbers \<^term>\<open>m\<close> and
\<open>m * n + 1\<close> are coprime, yielding an upper bound on the number of times
-@{term n} is increased. Hence, the function will take the \<open>else\<close> branch at
+\<^term>\<open>n\<close> is increased. Hence, the function will take the \<open>else\<close> branch at
most finitely often before taking the then branch and producing one constructor.
There is a slight complication when \<open>m = 0 \<and> n > 1\<close>: Without the first
disjunct in the \<open>if\<close> condition, the function could stall. (This corner
@@ -410,7 +410,7 @@
specifications, our package provides the more advanced proof principle of
\emph{coinduction up to congruence}---or simply \emph{coinduction up-to}.
-The structural coinduction principle for @{typ "'a stream"}, called
+The structural coinduction principle for \<^typ>\<open>'a stream\<close>, called
@{thm [source] stream.coinduct}, is as follows:
%
\begin{indentblock}
@@ -421,9 +421,9 @@
providing a relation \<open>R\<close> that relates \<open>l\<close> and \<open>r\<close> (first
premise) and that constitutes a bisimulation (second premise). Streams that are
related by a bisimulation cannot be distinguished by taking observations (via
-the selectors @{const shd} and @{const stl}); hence they must be equal.
+the selectors \<^const>\<open>shd\<close> and \<^const>\<open>stl\<close>); hence they must be equal.
-The coinduction up-to principle after registering @{const sskew} as friendly is
+The coinduction up-to principle after registering \<^const>\<open>sskew\<close> as friendly is
available as @{thm [source] sskew.coinduct} and as one of the components of
the theorem collection @{thm [source] stream.coinduct_upto}:
%
@@ -432,10 +432,10 @@
\end{indentblock}
%
This rule is almost identical to structural coinduction, except that the
-corecursive application of @{term R} is generalized to
-@{term "stream.v5.congclp R"}.
+corecursive application of \<^term>\<open>R\<close> is generalized to
+\<^term>\<open>stream.v5.congclp R\<close>.
-The @{const stream.v5.congclp} predicate is equipped with the following
+The \<^const>\<open>stream.v5.congclp\<close> predicate is equipped with the following
introduction rules:
\begin{indentblock}
@@ -471,9 +471,9 @@
The introduction rules are also available as
@{thm [source] sskew.cong_intros}.
-Notice that there is no introduction rule corresponding to @{const sexp},
-because @{const sexp} has a more restrictive result type than @{const sskew}
-(@{typ "nat stream"} vs. @{typ "('a :: {plus,times}) stream"}.
+Notice that there is no introduction rule corresponding to \<^const>\<open>sexp\<close>,
+because \<^const>\<open>sexp\<close> has a more restrictive result type than \<^const>\<open>sskew\<close>
+(\<^typ>\<open>nat stream\<close> vs. \<^typ>\<open>('a :: {plus,times}) stream\<close>.
The version numbers, here \<open>v5\<close>, distinguish the different congruence
closures generated for a given codatatype as more friends are registered. As
@@ -486,10 +486,10 @@
most situations. For this purpose, the package maintains the collection
@{thm [source] stream.coinduct_upto} of coinduction principles ordered by
increasing generality, which works well with Isabelle's philosophy of applying
-the first rule that matches. For example, after registering @{const ssum} as a
-friend, proving the equality @{term "l = r"} on @{typ "nat stream"} might
-require coinduction principle for @{term "nat stream"}, which is up to
-@{const ssum}.
+the first rule that matches. For example, after registering \<^const>\<open>ssum\<close> as a
+friend, proving the equality \<^term>\<open>l = r\<close> on \<^typ>\<open>nat stream\<close> might
+require coinduction principle for \<^term>\<open>nat stream\<close>, which is up to
+\<^const>\<open>ssum\<close>.
The collection @{thm [source] stream.coinduct_upto} is guaranteed to be complete
and up to date with respect to the type instances of definitions considered so
@@ -523,18 +523,18 @@
coinduction principles:
%
\begin{itemize}
-\item @{typ "('a, int) tllist"} up to @{const TNil}, @{const TCons}, and
- @{const square_terminal};
-\item @{typ "(nat, 'b) tllist"} up to @{const TNil}, @{const TCons}, and
- @{const square_elems};
-\item @{typ "('a, 'b) tllist"} up to @{const TNil} and @{const TCons}.
+\item \<^typ>\<open>('a, int) tllist\<close> up to \<^const>\<open>TNil\<close>, \<^const>\<open>TCons\<close>, and
+ \<^const>\<open>square_terminal\<close>;
+\item \<^typ>\<open>(nat, 'b) tllist\<close> up to \<^const>\<open>TNil\<close>, \<^const>\<open>TCons\<close>, and
+ \<^const>\<open>square_elems\<close>;
+\item \<^typ>\<open>('a, 'b) tllist\<close> up to \<^const>\<open>TNil\<close> and \<^const>\<open>TCons\<close>.
\end{itemize}
%
The following variant is missing:
%
\begin{itemize}
-\item @{typ "(nat, int) tllist"} up to @{const TNil}, @{const TCons},
- @{const square_elems}, and @{const square_terminal}.
+\item \<^typ>\<open>(nat, int) tllist\<close> up to \<^const>\<open>TNil\<close>, \<^const>\<open>TCons\<close>,
+ \<^const>\<open>square_elems\<close>, and \<^const>\<open>square_terminal\<close>.
\end{itemize}
%
To generate it without having to define a new function with @{command corec},
@@ -569,23 +569,23 @@
The @{command corec}, @{command corecursive}, and @{command friend_of_corec}
commands generate a property \<open>f.unique\<close> about the function of interest
-@{term f} that can be used to prove that any function that satisfies
-@{term f}'s corecursive specification must be equal to~@{term f}. For example:
+\<^term>\<open>f\<close> that can be used to prove that any function that satisfies
+\<^term>\<open>f\<close>'s corecursive specification must be equal to~\<^term>\<open>f\<close>. For example:
\[@{thm ssum.unique[no_vars]}\]
The uniqueness principles are not restricted to functions defined using
@{command corec} or @{command corecursive} or registered with
-@{command friend_of_corec}. Suppose @{term "t x"} is an arbitrary term
-depending on @{term x}. The @{method corec_unique} proof method, provided by our
+@{command friend_of_corec}. Suppose \<^term>\<open>t x\<close> is an arbitrary term
+depending on \<^term>\<open>x\<close>. The @{method corec_unique} proof method, provided by our
tool, transforms subgoals of the form
-\[@{term "(\<forall>x. f x = H x f) \<Longrightarrow> f x = t x"}\]
+\[\<^term>\<open>(\<forall>x. f x = H x f) \<Longrightarrow> f x = t x\<close>\]
into
-\[@{term "\<forall>x. t x = H x t"}\]
-The higher-order functional @{term H} must be such that @{term "f x = H x f"}
+\[\<^term>\<open>\<forall>x. t x = H x t\<close>\]
+The higher-order functional \<^term>\<open>H\<close> must be such that \<^term>\<open>f x = H x f\<close>
would be a valid @{command corec} specification, but without nested self-calls
or unguarded (recursive) calls. Thus, @{method corec_unique} proves uniqueness
-of @{term t} with respect to the given corecursive equation regardless of how
-@{term t} was defined. For example:
+of \<^term>\<open>t\<close> with respect to the given corecursive equation regardless of how
+\<^term>\<open>t\<close> was defined. For example:
\<close>
lemma
@@ -625,12 +625,12 @@
@{command_def "corecursive"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
(@@{command corec} | @@{command corecursive}) target? \<newline>
@{syntax cr_options}? fix @'where' prop
;
@{syntax_def cr_options}: '(' ((@{syntax plugins} | 'friend' | 'transfer') + ',') ')'
-\<close>}
+\<close>
\medskip
@@ -677,12 +677,12 @@
@{command_def "friend_of_corec"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command friend_of_corec} target? \<newline>
@{syntax foc_options}? fix @'where' prop
;
@{syntax_def foc_options}: '(' ((@{syntax plugins} | 'transfer') + ',') ')'
-\<close>}
+\<close>
\medskip
@@ -720,9 +720,9 @@
@{command_def "coinduction_upto"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command coinduction_upto} target? name ':' type
-\<close>}
+\<close>
\medskip
@@ -774,9 +774,9 @@
\label{ssec:corec-and-corecursive-theorems}\<close>
text \<open>
-For a function @{term f} over codatatype \<open>t\<close>, the @{command corec} and
+For a function \<^term>\<open>f\<close> over codatatype \<open>t\<close>, the @{command corec} and
@{command corecursive} commands generate the following properties (listed for
-@{const sexp}, cf. Section~\ref{ssec:simple-corecursion}):
+\<^const>\<open>sexp\<close>, cf. Section~\ref{ssec:simple-corecursion}):
\begin{indentblock}
\begin{description}
@@ -799,7 +799,7 @@
\item[\<open>f.\<close>\hthm{inner_induct}\rm:] ~ \\
This property is only generated for mixed recursive--corecursive definitions.
-For @{const primes} (Section~\ref{ssec:mixed-recursion-corecursion}, it reads as
+For \<^const>\<open>primes\<close> (Section~\ref{ssec:mixed-recursion-corecursion}, it reads as
follows: \\[\jot]
@{thm primes.inner_induct[no_vars]}
@@ -892,7 +892,7 @@
text \<open>
The @{method transfer_prover_eq} proof method replaces the equality relation
-@{term "(=)"} with compound relator expressions according to
+\<^term>\<open>(=)\<close> with compound relator expressions according to
@{thm [source] relator_eq} before calling @{method transfer_prover} on the
current subgoal. It tends to work better than plain @{method transfer_prover} on
the parametricity proof obligations of @{command corecursive} and
@@ -917,7 +917,7 @@
this derivation fails if in the arguments of a higher-order constant a type variable
occurs on both sides of the function type constructor. The required naturality
theorem can then be declared with @{attribute friend_of_corec_simps}. See
-@{file "~~/src/HOL/Corec_Examples/Tests/Iterate_GPV.thy"} for an example.
+\<^file>\<open>~~/src/HOL/Corec_Examples/Tests/Iterate_GPV.thy\<close> for an example.
\<close>
--- a/src/Doc/Datatypes/Datatypes.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Datatypes/Datatypes.thy Sat Jan 05 17:24:33 2019 +0100
@@ -77,7 +77,7 @@
finitely many direct subtrees, whereas those of the second and fourth may have
infinite branching.
-The package is part of @{theory Main}. Additional functionality is provided by
+The package is part of \<^theory>\<open>Main\<close>. Additional functionality is provided by
the theory \<^file>\<open>~~/src/HOL/Library/BNF_Axiomatization.thy\<close>.
The package, like its predecessor, fully adheres to the LCF philosophy
@@ -180,10 +180,10 @@
text \<open>
\noindent
-@{const Truue}, @{const Faalse}, and @{const Perhaaps} have the type @{typ trool}.
+\<^const>\<open>Truue\<close>, \<^const>\<open>Faalse\<close>, and \<^const>\<open>Perhaaps\<close> have the type \<^typ>\<open>trool\<close>.
Polymorphic types are possible, such as the following option type, modeled after
-its homologue from the @{theory HOL.Option} theory:
+its homologue from the \<^theory>\<open>HOL.Option\<close> theory:
\<close>
(*<*)
@@ -231,7 +231,7 @@
text \<open>
\noindent
Lists were shown in the introduction. Terminated lists are a variant that
-stores a value of type @{typ 'b} at the very end:
+stores a value of type \<^typ>\<open>'b\<close> at the very end:
\<close>
datatype (*<*)(in early) (*>*)('a, 'b) tlist = TNil 'b | TCons 'a "('a, 'b) tlist"
@@ -269,7 +269,7 @@
text \<open>
\emph{Nested recursion} occurs when recursive occurrences of a type appear under
a type constructor. The introduction showed some examples of trees with nesting
-through lists. A more complex example, that reuses our @{type option} type,
+through lists. A more complex example, that reuses our \<^type>\<open>option\<close> type,
follows:
\<close>
@@ -297,7 +297,7 @@
text \<open>
\noindent
-The following definition of @{typ 'a}-branching trees is legal:
+The following definition of \<^typ>\<open>'a\<close>-branching trees is legal:
\<close>
datatype 'a ftree = FTLeaf 'a | FTNode "'a \<Rightarrow> 'a ftree"
@@ -314,9 +314,9 @@
In general, type constructors \<open>('a\<^sub>1, \<dots>, 'a\<^sub>m) t\<close>
allow recursion on a subset of their type arguments \<open>'a\<^sub>1\<close>, \ldots,
\<open>'a\<^sub>m\<close>. These type arguments are called \emph{live}; the remaining
-type arguments are called \emph{dead}. In @{typ "'a \<Rightarrow> 'b"} and
-@{typ "('a, 'b) fun_copy"}, the type variable @{typ 'a} is dead and
-@{typ 'b} is live.
+type arguments are called \emph{dead}. In \<^typ>\<open>'a \<Rightarrow> 'b\<close> and
+\<^typ>\<open>('a, 'b) fun_copy\<close>, the type variable \<^typ>\<open>'a\<close> is dead and
+\<^typ>\<open>'b\<close> is live.
Type constructors must be registered as BNFs to have live arguments. This is
done automatically for datatypes and codatatypes introduced by the
@@ -403,24 +403,24 @@
\medskip
-The discriminator @{const null} and the selectors @{const hd} and @{const tl}
+The discriminator \<^const>\<open>null\<close> and the selectors \<^const>\<open>hd\<close> and \<^const>\<open>tl\<close>
are characterized by the following conditional equations:
%
\[@{thm list.collapse(1)[of xs, no_vars]}
\qquad @{thm list.collapse(2)[of xs, no_vars]}\]
%
For two-constructor datatypes, a single discriminator constant is sufficient.
-The discriminator associated with @{const Cons} is simply
-@{term "\<lambda>xs. \<not> null xs"}.
+The discriminator associated with \<^const>\<open>Cons\<close> is simply
+\<^term>\<open>\<lambda>xs. \<not> null xs\<close>.
The \keyw{where} clause at the end of the command specifies a default value for
selectors applied to constructors on which they are not a priori specified.
In the example, it is used to ensure that the tail of the empty list is itself
(instead of being left unspecified).
-Because @{const Nil} is nullary, it is also possible to use
-@{term "\<lambda>xs. xs = Nil"} as a discriminator. This is the default behavior
-if we omit the identifier @{const null} and the associated colon. Some users
+Because \<^const>\<open>Nil\<close> is nullary, it is also possible to use
+\<^term>\<open>\<lambda>xs. xs = Nil\<close> as a discriminator. This is the default behavior
+if we omit the identifier \<^const>\<open>null\<close> and the associated colon. Some users
argue against this, because the mixture of constructors and selectors in the
characteristic theorems can lead Isabelle's automation to switch between the
constructor and the destructor view in surprising ways.
@@ -469,7 +469,7 @@
@{command_def "datatype"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command datatype} target? @{syntax dt_options}? @{syntax dt_spec}
;
@{syntax_def dt_options}: '(' ((@{syntax plugins} | 'discs_sels') + ',') ')'
@@ -480,7 +480,7 @@
@{syntax map_rel_pred}? (@'where' (prop + '|'))? + @'and')
;
@{syntax_def map_rel_pred}: @'for' ((('map' | 'rel' | 'pred') ':' name) +)
-\<close>}
+\<close>
\medskip
@@ -516,18 +516,18 @@
The left-hand sides of the datatype equations specify the name of the type to
define, its type parameters, and additional information:
-@{rail \<open>
+\<^rail>\<open>
@{syntax_def dt_name}: @{syntax tyargs}? name mixfix?
;
@{syntax_def tyargs}: typefree | '(' (('dead' | name ':')? typefree + ',') ')'
-\<close>}
+\<close>
\medskip
\noindent
The syntactic entity \synt{name} denotes an identifier, \synt{mixfix} denotes
the usual parenthesized mixfix notation, and \synt{typefree} denotes fixed type
-variable (@{typ 'a}, @{typ 'b}, \ldots) @{cite "isabelle-isar-ref"}.
+variable (\<^typ>\<open>'a\<close>, \<^typ>\<open>'b\<close>, \ldots) @{cite "isabelle-isar-ref"}.
The optional names preceding the type variables allow to override the default
names of the set functions (\<open>set\<^sub>1_t\<close>, \ldots, \<open>set\<^sub>m_t\<close>). Type
@@ -541,9 +541,9 @@
Inside a mutually recursive specification, all defined datatypes must
mention exactly the same type variables in the same order.
-@{rail \<open>
+\<^rail>\<open>
@{syntax_def dt_ctor}: (name ':')? name (@{syntax dt_ctor_arg} * ) mixfix?
-\<close>}
+\<close>
\medskip
@@ -555,9 +555,9 @@
\<open>\<lambda>x. x = C\<^sub>j\<close> for nullary constructors and
\<open>t.is_C\<^sub>j\<close> otherwise.
-@{rail \<open>
+\<^rail>\<open>
@{syntax_def dt_ctor_arg}: type | '(' name ':' type ')'
-\<close>}
+\<close>
\medskip
@@ -580,9 +580,9 @@
@{command_def "datatype_compat"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command datatype_compat} (name +)
-\<close>}
+\<close>
\medskip
@@ -595,7 +595,7 @@
text \<open>\blankline\<close>
- ML \<open>Old_Datatype_Data.get_info @{theory} @{type_name even_nat}\<close>
+ ML \<open>Old_Datatype_Data.get_info \<^theory> \<^type_name>\<open>even_nat\<close>\<close>
text \<open>
The syntactic entity \synt{name} denotes an identifier @{cite "isabelle-isar-ref"}.
@@ -712,7 +712,7 @@
text \<open>
The free constructor theorems are partitioned in three subgroups. The first
subgroup of properties is concerned with the constructors. They are listed below
-for @{typ "'a list"}:
+for \<^typ>\<open>'a list\<close>:
\begin{indentblock}
\begin{description}
@@ -803,14 +803,14 @@
@{thm list.collapse(2)[no_vars]} \\
The \<open>[simp]\<close> attribute is exceptionally omitted for datatypes equipped
with a single nullary constructor, because a property of the form
-@{prop "x = C"} is not suitable as a simplification rule.
+\<^prop>\<open>x = C\<close> is not suitable as a simplification rule.
\item[\<open>t.\<close>\hthm{distinct_disc} \<open>[dest]\<close>\rm:] ~ \\
-These properties are missing for @{typ "'a list"} because there is only one
+These properties are missing for \<^typ>\<open>'a list\<close> because there is only one
proper discriminator. If the datatype had been introduced with a second
-discriminator called @{const nonnull}, they would have read as follows: \\[\jot]
-@{prop "null list \<Longrightarrow> \<not> nonnull list"} \\
-@{prop "nonnull list \<Longrightarrow> \<not> null list"}
+discriminator called \<^const>\<open>nonnull\<close>, they would have read as follows: \\[\jot]
+\<^prop>\<open>null list \<Longrightarrow> \<not> nonnull list\<close> \\
+\<^prop>\<open>nonnull list \<Longrightarrow> \<not> null list\<close>
\item[\<open>t.\<close>\hthm{exhaust_disc} \<open>[case_names C\<^sub>1 \<dots> C\<^sub>n]\<close>\rm:] ~ \\
@{thm list.exhaust_disc[no_vars]}
@@ -851,7 +851,7 @@
text \<open>
The functorial theorems are generated for type constructors with at least
-one live type argument (e.g., @{typ "'a list"}). They are partitioned in two
+one live type argument (e.g., \<^typ>\<open>'a list\<close>). They are partitioned in two
subgroups. The first subgroup consists of properties involving the
constructors or the destructors and either a set function, the map function,
the predicator, or the relator:
@@ -867,7 +867,7 @@
%(Section~\ref{ssec:transfer}).
\item[\<open>t.\<close>\hthm{sel_transfer} \<open>[transfer_rule]\<close>\rm:] ~ \\
-This property is missing for @{typ "'a list"} because there is no common
+This property is missing for \<^typ>\<open>'a list\<close> because there is no common
selector to all constructors. \\
The \<open>[transfer_rule]\<close> attribute is set by the \<open>transfer\<close> plugin
(Section~\ref{ssec:transfer}).
@@ -1182,10 +1182,10 @@
induction rule can be obtained by applying the \<open>[unfolded
all_mem_range]\<close> attribute on \<open>t.induct\<close>.
-\item \emph{The @{const size} function has a slightly different definition.}
+\item \emph{The \<^const>\<open>size\<close> function has a slightly different definition.}
The new function returns \<open>1\<close> instead of \<open>0\<close> for some nonrecursive
constructors. This departure from the old behavior made it possible to implement
-@{const size} in terms of the generic function \<open>t.size_t\<close>. Moreover,
+\<^const>\<open>size\<close> in terms of the generic function \<open>t.size_t\<close>. Moreover,
the new function considers nested occurrences of a value, in the nested
recursive case. The old behavior can be obtained by disabling the \<open>size\<close>
plugin (Section~\ref{sec:selecting-plugins}) and instantiating the
@@ -1381,7 +1381,7 @@
text \<open>
In a departure from the old datatype package, nested recursion is normally
handled via the map functions of the nesting type constructors. For example,
-recursive calls are lifted to lists using @{const map}:
+recursive calls are lifted to lists using \<^const>\<open>map\<close>:
\<close>
(*<*)
@@ -1397,7 +1397,7 @@
\noindent
The next example features recursion through the \<open>option\<close> type. Although
\<open>option\<close> is not a new-style datatype, it is registered as a BNF with the
-map function @{const map_option}:
+map function \<^const>\<open>map_option\<close>:
\<close>
primrec (*<*)(in early) (*>*)sum_btree :: "('a::{zero,plus}) btree \<Rightarrow> 'a" where
@@ -1435,7 +1435,7 @@
text \<open>
\noindent
For recursion through curried $n$-ary functions, $n$ applications of
-@{term "(\<circ>)"} are necessary. The examples below illustrate the case where
+\<^term>\<open>(\<circ>)\<close> are necessary. The examples below illustrate the case where
$n = 2$:
\<close>
@@ -1532,7 +1532,7 @@
%
% * higher-order approach, considering nesting as nesting, is more
% compositional -- e.g. we saw how we could reuse an existing polymorphic
-% at or the_default, whereas @{const ats\<^sub>f\<^sub>f} is much more specific
+% at or the_default, whereas \<^const>\<open>ats\<^sub>f\<^sub>f\<close> is much more specific
%
% * but:
% * is perhaps less intuitive, because it requires higher-order thinking
@@ -1540,7 +1540,7 @@
% mutually recursive version might be nicer
% * is somewhat indirect -- must apply a map first, then compute a result
% (cannot mix)
-% * the auxiliary functions like @{const ats\<^sub>f\<^sub>f} are sometimes useful in own right
+% * the auxiliary functions like \<^const>\<open>ats\<^sub>f\<^sub>f\<close> are sometimes useful in own right
%
% * impact on automation unclear
%
@@ -1561,14 +1561,14 @@
@{command_def "primrec"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command primrec} target? @{syntax pr_options}? fixes \<newline>
@'where' (@{syntax pr_equation} + '|')
;
@{syntax_def pr_options}: '(' ((@{syntax plugins} | 'nonexhaustive' | 'transfer') + ',') ')'
;
@{syntax_def pr_equation}: thmdecl? prop
-\<close>}
+\<close>
\medskip
@@ -1617,7 +1617,7 @@
text \<open>
The @{command primrec} command generates the following properties (listed
-for @{const tfold}):
+for \<^const>\<open>tfold\<close>):
\begin{indentblock}
\begin{description}
@@ -1816,8 +1816,8 @@
text \<open>
\noindent
-Notice that the @{const cont} selector is associated with both @{const Skip}
-and @{const Action}.
+Notice that the \<^const>\<open>cont\<close> selector is associated with both \<^const>\<open>Skip\<close>
+and \<^const>\<open>Action\<close>.
\<close>
@@ -1863,9 +1863,9 @@
@{command_def "codatatype"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command codatatype} target? @{syntax dt_options}? @{syntax dt_spec}
-\<close>}
+\<close>
\medskip
@@ -1927,7 +1927,7 @@
\label{sssec:coinductive-theorems}\<close>
text \<open>
-The coinductive theorems are listed below for @{typ "'a llist"}:
+The coinductive theorems are listed below for \<^typ>\<open>'a llist\<close>:
\begin{indentblock}
\begin{description}
@@ -2206,7 +2206,7 @@
\label{sssec:primcorec-nested-corecursion}\<close>
text \<open>
-The next pair of examples generalize the @{const literate} and @{const siterate}
+The next pair of examples generalize the \<^const>\<open>literate\<close> and \<^const>\<open>siterate\<close>
functions (Section~\ref{sssec:primcorec-nested-corecursion}) to possibly
infinite trees in which subnodes are organized either as a lazy list (\<open>tree\<^sub>i\<^sub>i\<close>) or as a finite set (\<open>tree\<^sub>i\<^sub>s\<close>). They rely on the map functions of
the nesting type constructors to lift the corecursive calls:
@@ -2224,9 +2224,9 @@
\noindent
Both examples follow the usual format for constructor arguments associated
with nested recursive occurrences of the datatype. Consider
-@{const iterate\<^sub>i\<^sub>i}. The term @{term "g x"} constructs an @{typ "'a llist"}
-value, which is turned into an @{typ "'a tree\<^sub>i\<^sub>i llist"} value using
-@{const lmap}.
+\<^const>\<open>iterate\<^sub>i\<^sub>i\<close>. The term \<^term>\<open>g x\<close> constructs an \<^typ>\<open>'a llist\<close>
+value, which is turned into an \<^typ>\<open>'a tree\<^sub>i\<^sub>i llist\<close> value using
+\<^const>\<open>lmap\<close>.
This format may sometimes feel artificial. The following function constructs
a tree with a single, infinite branch from a stream:
@@ -2288,7 +2288,7 @@
text \<open>
\noindent
For recursion through curried $n$-ary functions, $n$ applications of
-@{term "(\<circ>)"} are necessary. The examples below illustrate the case where
+\<^term>\<open>(\<circ>)\<close> are necessary. The examples below illustrate the case where
$n = 2$:
\<close>
@@ -2361,8 +2361,8 @@
text \<open>
The constructor view is similar to the code view, but there is one separate
conditional equation per constructor rather than a single unconditional
-equation. Examples that rely on a single constructor, such as @{const literate}
-and @{const siterate}, are identical in both styles.
+equation. Examples that rely on a single constructor, such as \<^const>\<open>literate\<close>
+and \<^const>\<open>siterate\<close>, are identical in both styles.
Here is an example where there is a difference:
\<close>
@@ -2374,15 +2374,15 @@
text \<open>
\noindent
-With the constructor view, we must distinguish between the @{const LNil} and
-the @{const LCons} case. The condition for @{const LCons} is
-left implicit, as the negation of that for @{const LNil}.
+With the constructor view, we must distinguish between the \<^const>\<open>LNil\<close> and
+the \<^const>\<open>LCons\<close> case. The condition for \<^const>\<open>LCons\<close> is
+left implicit, as the negation of that for \<^const>\<open>LNil\<close>.
For this example, the constructor view is slightly more involved than the
code equation. Recall the code view version presented in
Section~\ref{sssec:primcorec-simple-corecursion}.
% TODO: \[{thm code_view.lapp.code}\]
-The constructor view requires us to analyze the second argument (@{term ys}).
+The constructor view requires us to analyze the second argument (\<^term>\<open>ys\<close>).
The code equation generated from the constructor view also suffers from this.
% TODO: \[{thm lapp.code}\]
@@ -2407,14 +2407,14 @@
text \<open>
\noindent
-Since there is no sequentiality, we can apply the equation for @{const Choice}
-without having first to discharge @{term "n mod (4::int) \<noteq> 0"},
-@{term "n mod (4::int) \<noteq> 1"}, and
-@{term "n mod (4::int) \<noteq> 2"}.
+Since there is no sequentiality, we can apply the equation for \<^const>\<open>Choice\<close>
+without having first to discharge \<^term>\<open>n mod (4::int) \<noteq> 0\<close>,
+\<^term>\<open>n mod (4::int) \<noteq> 1\<close>, and
+\<^term>\<open>n mod (4::int) \<noteq> 2\<close>.
The price to pay for this elegance is that we must discharge exclusiveness proof
obligations, one for each pair of conditions
-@{term "(n mod (4::int) = i, n mod (4::int) = j)"}
-with @{term "i < j"}. If we prefer not to discharge any obligations, we can
+\<^term>\<open>(n mod (4::int) = i, n mod (4::int) = j)\<close>
+with \<^term>\<open>i < j\<close>. If we prefer not to discharge any obligations, we can
enable the \<open>sequential\<close> option. This pushes the problem to the users of
the generated properties.
%Here are more examples to conclude:
@@ -2455,8 +2455,8 @@
text \<open>
\noindent
-The first formula in the @{const literate} specification indicates which
-constructor to choose. For @{const siterate} and @{const every_snd}, no such
+The first formula in the \<^const>\<open>literate\<close> specification indicates which
+constructor to choose. For \<^const>\<open>siterate\<close> and \<^const>\<open>every_snd\<close>, no such
formula is necessary, since the type has only one constructor. The last two
formulas are equations specifying the value of the result for the relevant
selectors. Corecursive calls appear directly to the right of the equal sign.
@@ -2514,8 +2514,7 @@
text \<open>
\noindent
-Using the \<open>of\<close> keyword, different equations are specified for @{const
-cont} depending on which constructor is selected.
+Using the \<open>of\<close> keyword, different equations are specified for \<^const>\<open>cont\<close> depending on which constructor is selected.
Here are more examples to conclude:
\<close>
@@ -2550,14 +2549,14 @@
@{command_def "primcorecursive"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
(@@{command primcorec} | @@{command primcorecursive}) target? \<newline>
@{syntax pcr_options}? fixes @'where' (@{syntax pcr_formula} + '|')
;
@{syntax_def pcr_options}: '(' ((@{syntax plugins} | 'sequential' | 'exhaustive' | 'transfer') + ',') ')'
;
@{syntax_def pcr_formula}: thmdecl? prop (@'of' (term * ))?
-\<close>}
+\<close>
\medskip
@@ -2610,7 +2609,7 @@
text \<open>
The @{command primcorec} and @{command primcorecursive} commands generate the
-following properties (listed for @{const literate}):
+following properties (listed for \<^const>\<open>literate\<close>):
\begin{indentblock}
\begin{description}
@@ -2640,12 +2639,12 @@
(Section~\ref{ssec:code-generator}).
\item[\<open>f.\<close>\hthm{exclude}\rm:] ~ \\
-These properties are missing for @{const literate} because no exclusiveness
+These properties are missing for \<^const>\<open>literate\<close> because no exclusiveness
proof obligations arose. In general, the properties correspond to the
discharged proof obligations.
\item[\<open>f.\<close>\hthm{exhaust}\rm:] ~ \\
-This property is missing for @{const literate} because no exhaustiveness
+This property is missing for \<^const>\<open>literate\<close> because no exhaustiveness
proof obligation arose. In general, the property correspond to the discharged
proof obligation.
@@ -2734,7 +2733,7 @@
An $n$-ary BNF is a type constructor equipped with a map function
(functorial action), $n$ set functions (natural transformations),
and an infinite cardinal bound that satisfy certain properties.
-For example, @{typ "'a llist"} is a unary BNF.
+For example, \<^typ>\<open>'a llist\<close> is a unary BNF.
Its predicator \<open>llist_all ::
('a \<Rightarrow> bool) \<Rightarrow>
'a llist \<Rightarrow> bool\<close>
@@ -2745,7 +2744,7 @@
'a llist \<Rightarrow> 'b llist \<Rightarrow> bool\<close>
extends binary predicates over elements to binary predicates over parallel
lazy lists. The cardinal bound limits the number of elements returned by the
-set function; it may not depend on the cardinality of @{typ 'a}.
+set function; it may not depend on the cardinality of \<^typ>\<open>'a\<close>.
The type constructors introduced by @{command datatype} and
@{command codatatype} are automatically registered as BNFs. In addition, a
@@ -2765,8 +2764,8 @@
command. Some of the proof obligations are best viewed with the theory
\<^file>\<open>~~/src/HOL/Library/Cardinal_Notations.thy\<close> imported.
-The type is simply a copy of the function space @{typ "'d \<Rightarrow> 'a"}, where @{typ 'a}
-is live and @{typ 'd} is dead. We introduce it together with its map function,
+The type is simply a copy of the function space \<^typ>\<open>'d \<Rightarrow> 'a\<close>, where \<^typ>\<open>'a\<close>
+is live and \<^typ>\<open>'d\<close> is dead. We introduce it together with its map function,
set function, predicator, and relator.
\<close>
@@ -2870,7 +2869,7 @@
For many typedefs, lifting the BNF structure from the raw type to the abstract
type can be done uniformly. This is the task of the @{command lift_bnf} command.
-Using @{command lift_bnf}, the above registration of @{typ "('d, 'a) fn"} as a
+Using @{command lift_bnf}, the above registration of \<^typ>\<open>('d, 'a) fn\<close> as a
BNF becomes much shorter:
\<close>
@@ -2885,7 +2884,7 @@
(*>*)
text \<open>
-For type copies (@{command typedef}s with @{term UNIV} as the representing set),
+For type copies (@{command typedef}s with \<^term>\<open>UNIV\<close> as the representing set),
the proof obligations are so simple that they can be
discharged automatically, yielding another command, @{command copy_bnf}, which
does not emit any proof obligations:
@@ -2925,7 +2924,7 @@
The @{command lift_bnf} command requires us to prove that the set of nonempty lists
is closed under the map function and the zip function. The latter only
occurs implicitly in the goal, in form of the variable
-@{term "zs :: ('a \<times> 'b) list"}.
+\<^term>\<open>zs :: ('a \<times> 'b) list\<close>.
\<close>
lift_bnf (*<*)(no_warn_wits) (*>*)'a nonempty_list
@@ -2946,8 +2945,8 @@
reasoning abstractly about an arbitrary BNF. The @{command bnf_axiomatization}
command below introduces a type \<open>('a, 'b, 'c) F\<close>, three set constants,
a map function, a predicator, a relator, and a nonemptiness witness that depends only on
-@{typ 'a}. The type \<open>'a \<Rightarrow> ('a, 'b, 'c) F\<close> of the witness can be read
-as an implication: Given a witness for @{typ 'a}, we can construct a witness for
+\<^typ>\<open>'a\<close>. The type \<open>'a \<Rightarrow> ('a, 'b, 'c) F\<close> of the witness can be read
+as an implication: Given a witness for \<^typ>\<open>'a\<close>, we can construct a witness for
\<open>('a, 'b, 'c) F\<close>. The BNF properties are postulated as axioms.
\<close>
@@ -2971,12 +2970,12 @@
@{command_def "bnf"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command bnf} target? (name ':')? type \<newline>
'map:' term ('sets:' (term +))? 'bd:' term \<newline>
('wits:' (term +))? ('rel:' term)? \<newline>
('pred:' term)? @{syntax plugins}?
-\<close>}
+\<close>
\medskip
@@ -3004,7 +3003,7 @@
@{command_def "lift_bnf"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command lift_bnf} target? lb_options? \<newline>
@{syntax tyargs} name wit_terms? \<newline>
('via' thm)? @{syntax map_rel_pred}?
@@ -3012,15 +3011,14 @@
@{syntax_def lb_options}: '(' ((@{syntax plugins} | 'no_warn_wits') + ',') ')'
;
@{syntax_def wit_terms}: '[' 'wits' ':' terms ']'
-\<close>}
+\<close>
\medskip
\noindent
The @{command lift_bnf} command registers as a BNF an existing type (the
\emph{abstract type}) that was defined as a subtype of a BNF (the \emph{raw
type}) using the @{command typedef} command. To achieve this, it lifts the BNF
-structure on the raw type to the abstract type following a @{term
-type_definition} theorem. The theorem is usually inferred from the type, but can
+structure on the raw type to the abstract type following a \<^term>\<open>type_definition\<close> theorem. The theorem is usually inferred from the type, but can
also be explicitly supplied by means of the optional \<open>via\<close> clause. In
addition, custom names for the set functions, the map function, the predicator, and the relator,
as well as nonemptiness witnesses can be specified.
@@ -3040,15 +3038,15 @@
@{command_def "copy_bnf"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command copy_bnf} target? ('(' @{syntax plugins} ')')? \<newline>
@{syntax tyargs} name ('via' thm)? @{syntax map_rel_pred}?
-\<close>}
+\<close>
\medskip
\noindent
The @{command copy_bnf} command performs the same lifting as @{command lift_bnf}
-for type copies (@{command typedef}s with @{term UNIV} as the representing set),
+for type copies (@{command typedef}s with \<^term>\<open>UNIV\<close> as the representing set),
without requiring the user to discharge any proof obligations or provide
nonemptiness witnesses.
\<close>
@@ -3061,13 +3059,13 @@
@{command_def "bnf_axiomatization"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command bnf_axiomatization} target? ('(' @{syntax plugins} ')')? \<newline>
@{syntax tyargs}? name @{syntax wit_types}? \<newline>
mixfix? @{syntax map_rel_pred}?
;
@{syntax_def wit_types}: '[' 'wits' ':' types ']'
-\<close>}
+\<close>
\medskip
@@ -3078,7 +3076,7 @@
The syntactic entity \synt{target} can be used to specify a local context,
\synt{name} denotes an identifier, \synt{typefree} denotes fixed type variable
-(@{typ 'a}, @{typ 'b}, \ldots), \synt{mixfix} denotes the usual parenthesized
+(\<^typ>\<open>'a\<close>, \<^typ>\<open>'b\<close>, \ldots), \synt{mixfix} denotes the usual parenthesized
mixfix notation, and \synt{types} denotes a space-separated list of types
@{cite "isabelle-isar-ref"}.
@@ -3107,9 +3105,9 @@
@{command_def "print_bnfs"} & : & \<open>local_theory \<rightarrow>\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command print_bnfs}
-\<close>}
+\<close>
\<close>
@@ -3147,13 +3145,13 @@
@{command_def "free_constructors"} & : & \<open>local_theory \<rightarrow> proof(prove)\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command free_constructors} target? @{syntax dt_options} \<newline>
name 'for' (@{syntax fc_ctor} + '|') \<newline>
(@'where' (prop + '|'))?
;
@{syntax_def fc_ctor}: (name ':')? term (name * )
-\<close>}
+\<close>
\medskip
@@ -3188,10 +3186,10 @@
@{command_def "simps_of_case"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command simps_of_case} target? (name ':')? \<newline>
(thm + ) (@'splits' ':' (thm + ))?
-\<close>}
+\<close>
\medskip
@@ -3227,10 +3225,10 @@
@{command_def "case_of_simps"} & : & \<open>local_theory \<rightarrow> local_theory\<close>
\end{matharray}
-@{rail \<open>
+\<^rail>\<open>
@@{command case_of_simps} target? (name ':')? \<newline>
(thm + )
-\<close>}
+\<close>
\medskip
@@ -3334,7 +3332,7 @@
For each datatype \<open>t\<close>, the \hthm{size} plugin generates a generic size
function \<open>t.size_t\<close> as well as a specific instance
\<open>size :: t \<Rightarrow> nat\<close> belonging to the \<open>size\<close> type class. The
-\keyw{fun} command relies on @{const size} to prove termination of recursive
+\keyw{fun} command relies on \<^const>\<open>size\<close> to prove termination of recursive
functions on datatypes.
The plugin derives the following properties:
@@ -3356,9 +3354,9 @@
@{thm list.size_gen_o_map[no_vars]}
\item[\<open>t.\<close>\hthm{size_neq}\rm:] ~ \\
-This property is missing for @{typ "'a list"}. If the @{term size} function
+This property is missing for \<^typ>\<open>'a list\<close>. If the \<^term>\<open>size\<close> function
always evaluates to a non-zero value, this theorem has the form
-@{prop "\<not> size x = 0"}.
+\<^prop>\<open>\<not> size x = 0\<close>.
\end{description}
\end{indentblock}
@@ -3371,8 +3369,8 @@
\<open>'a\<^sub>1, \<dots>, 'a\<^sub>m\<close>, by default \<open>u\<close> values are given a size of 0. This
can be improved upon by registering a custom size function of type
\<open>('a\<^sub>1 \<Rightarrow> nat) \<Rightarrow> \<dots> \<Rightarrow> ('a\<^sub>m \<Rightarrow> nat) \<Rightarrow> u \<Rightarrow> nat\<close> using
-the ML function @{ML BNF_LFP_Size.register_size} or
-@{ML BNF_LFP_Size.register_size_global}. See theory
+the ML function \<^ML>\<open>BNF_LFP_Size.register_size\<close> or
+\<^ML>\<open>BNF_LFP_Size.register_size_global\<close>. See theory
\<^file>\<open>~~/src/HOL/Library/Multiset.thy\<close> for an example.
\<close>
--- a/src/Doc/Eisbach/Manual.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Eisbach/Manual.thy Sat Jan 05 17:24:33 2019 +0100
@@ -17,7 +17,7 @@
The syntax diagram below refers to some syntactic categories that are
further defined in @{cite "isabelle-isar-ref"}.
- @{rail \<open>
+ \<^rail>\<open>
@@{command method} name args @'=' method
;
args: term_args? method_args? \<newline> fact_args? decl_args?
@@ -29,7 +29,7 @@
fact_args: @'uses' (name+)
;
decl_args: @'declares' (name+)
- \<close>}
+ \<close>
\<close>
@@ -68,12 +68,12 @@
text \<open>
Methods can also abstract over terms using the @{keyword_def "for"} keyword,
optionally providing type constraints. For instance, the following proof
- method \<open>intro_ex\<close> takes a term @{term y} of any type, which it uses to
- instantiate the @{term x}-variable of \<open>exI\<close> (existential introduction)
+ method \<open>intro_ex\<close> takes a term \<^term>\<open>y\<close> of any type, which it uses to
+ instantiate the \<^term>\<open>x\<close>-variable of \<open>exI\<close> (existential introduction)
before applying the result as a rule. The instantiation is performed here by
Isar's @{attribute_ref "where"} attribute. If the current subgoal is to find
- a witness for the given predicate @{term Q}, then this has the effect of
- committing to @{term y}.
+ a witness for the given predicate \<^term>\<open>Q\<close>, then this has the effect of
+ committing to \<^term>\<open>y\<close>.
\<close>
method intro_ex for Q :: "'a \<Rightarrow> bool" and y :: 'a =
@@ -81,7 +81,7 @@
text \<open>
- The term parameters @{term y} and @{term Q} can be used arbitrarily inside
+ The term parameters \<^term>\<open>y\<close> and \<^term>\<open>Q\<close> can be used arbitrarily inside
the method body, as part of attribute applications or arguments to other
methods. The expression is type-checked as far as possible when the method
is defined, however dynamic type errors can still occur when it is invoked
@@ -224,15 +224,15 @@
text \<open>
The only non-trivial part above is the final alternative \<open>(erule notE ;
solve \<open>prop_solver\<close>)\<close>. Here, in the case that all other alternatives fail,
- the method takes one of the assumptions @{term "\<not> P"} of the current goal
+ the method takes one of the assumptions \<^term>\<open>\<not> P\<close> of the current goal
and eliminates it with the rule \<open>notE\<close>, causing the goal to be proved to
- become @{term P}. The method then recursively invokes itself on the
+ become \<^term>\<open>P\<close>. The method then recursively invokes itself on the
remaining goals. The job of the recursive call is to demonstrate that there
- is a contradiction in the original assumptions (i.e.\ that @{term P} can be
+ is a contradiction in the original assumptions (i.e.\ that \<^term>\<open>P\<close> can be
derived from them). Note this recursive invocation is applied with the
@{method solve} method combinator to ensure that a contradiction will indeed
be shown. In the case where a contradiction cannot be found, backtracking
- will occur and a different assumption @{term "\<not> Q"} will be chosen for
+ will occur and a different assumption \<^term>\<open>\<not> Q\<close> will be chosen for
elimination.
Note that the recursive call to @{method prop_solver} does not have any
@@ -283,7 +283,7 @@
The syntax diagram below refers to some syntactic categories that are
further defined in @{cite "isabelle-isar-ref"}.
- @{rail \<open>
+ \<^rail>\<open>
@@{method match} kind @'in' (pattern '\<Rightarrow>' @{syntax text} + '\<bar>')
;
kind:
@@ -295,7 +295,7 @@
fact_name: @{syntax name} @{syntax attributes}? ':'
;
args: '(' (('multi' | 'cut' nat?) + ',') ')'
- \<close>}
+ \<close>
Matching allows methods to introspect the goal state, and to implement more
explicit control flow. In the basic case, a term or fact \<open>ts\<close> is given to
@@ -313,8 +313,8 @@
text \<open>
In this example we have a structured Isar proof, with the named assumption
- \<open>X\<close> and a conclusion @{term "P"}. With the match method we can find the
- local facts @{term "Q \<longrightarrow> P"} and @{term "Q"}, binding them to separately as
+ \<open>X\<close> and a conclusion \<^term>\<open>P\<close>. With the match method we can find the
+ local facts \<^term>\<open>Q \<longrightarrow> P\<close> and \<^term>\<open>Q\<close>, binding them to separately as
\<open>I\<close> and \<open>I'\<close>. We then specialize the modus-ponens rule @{thm mp [of Q P]} to
these facts to solve the goal.
\<close>
@@ -354,10 +354,9 @@
\<open>match conclusion in A \<Rightarrow> \<open>insert mp [OF I I']\<close>\<close>)
text \<open>
- In this example @{term A} is a match variable which is bound to @{term P}
+ In this example \<^term>\<open>A\<close> is a match variable which is bound to \<^term>\<open>P\<close>
upon a successful match. The inner @{method match} then matches the
- now-bound @{term A} (bound to @{term P}) against the conclusion (also @{term
- P}), finally applying the specialized rule to solve the goal.
+ now-bound \<^term>\<open>A\<close> (bound to \<^term>\<open>P\<close>) against the conclusion (also \<^term>\<open>P\<close>), finally applying the specialized rule to solve the goal.
Schematic terms like \<open>?P\<close> may also be used to specify match variables, but
the result of the match is not bound, and thus cannot be used in the inner
@@ -377,14 +376,14 @@
\<open>rule exI [where P = Q and x = y, OF U]\<close>\<close>)
text \<open>
- The first @{method match} matches the pattern @{term "\<exists>x. Q x"} against the
- current conclusion, binding the term @{term "Q"} in the inner match. Next
+ The first @{method match} matches the pattern \<^term>\<open>\<exists>x. Q x\<close> against the
+ current conclusion, binding the term \<^term>\<open>Q\<close> in the inner match. Next
the pattern \<open>Q y\<close> is matched against all premises of the current subgoal. In
- this case @{term "Q"} is fixed and @{term "y"} may be instantiated. Once a
+ this case \<^term>\<open>Q\<close> is fixed and \<^term>\<open>y\<close> may be instantiated. Once a
match is found, the local fact \<open>U\<close> is bound to the matching premise and the
- variable @{term "y"} is bound to the matching witness. The existential
- introduction rule \<open>exI:\<close>~@{thm exI} is then instantiated with @{term "y"} as
- the witness and @{term "Q"} as the predicate, with its proof obligation
+ variable \<^term>\<open>y\<close> is bound to the matching witness. The existential
+ introduction rule \<open>exI:\<close>~@{thm exI} is then instantiated with \<^term>\<open>y\<close> as
+ the witness and \<^term>\<open>Q\<close> as the predicate, with its proof obligation
solved by the local fact U (using the Isar attribute @{attribute OF}). The
following example is a trivial use of this method.
\<close>
@@ -413,11 +412,10 @@
\<open>erule allE [where x = y]\<close>)
text \<open>
- Here we take a single parameter @{term y} and specialize the universal
+ Here we take a single parameter \<^term>\<open>y\<close> and specialize the universal
elimination rule (@{thm allE}) to it, then attempt to apply this specialized
rule with @{method erule}. The method @{method erule} will attempt to unify
- with a universal quantifier in the premises that matches the type of @{term
- y}. Since @{keyword "premises"} causes a focus, however, there are no
+ with a universal quantifier in the premises that matches the type of \<^term>\<open>y\<close>. Since @{keyword "premises"} causes a focus, however, there are no
subgoal premises to be found and thus @{method my_allE_bad} will always
fail. If focusing instead left the premises in place, using methods like
@{method erule} would lead to unintended behaviour, specifically during
@@ -475,8 +473,8 @@
text \<open>
In this example, the inner @{method match} can find the focused premise
- @{term B}. In contrast, the @{method assumption} method would fail here due
- to @{term B} not being logically accessible.
+ \<^term>\<open>B\<close>. In contrast, the @{method assumption} method would fail here due
+ to \<^term>\<open>B\<close> not being logically accessible.
\<close>
lemma "A \<Longrightarrow> A \<and> (B \<longrightarrow> B)"
@@ -485,10 +483,8 @@
\<bar> H': B \<Rightarrow> \<open>rule H'\<close>\<close>)
text \<open>
- In this example, the only premise that exists in the first focus is @{term
- "A"}. Prior to the inner match, the rule \<open>impI\<close> changes the goal @{term "B \<longrightarrow>
- B"} into @{term "B \<Longrightarrow> B"}. A standard premise match would also include @{term
- A} as an original premise of the outer match. The \<open>local\<close> argument limits
+ In this example, the only premise that exists in the first focus is \<^term>\<open>A\<close>. Prior to the inner match, the rule \<open>impI\<close> changes the goal \<^term>\<open>B \<longrightarrow>
+ B\<close> into \<^term>\<open>B \<Longrightarrow> B\<close>. A standard premise match would also include \<^term>\<open>A\<close> as an original premise of the outer match. The \<open>local\<close> argument limits
the match to newly focused premises.
\<close>
@@ -558,8 +554,7 @@
text \<open>
In this example, the order of schematics in \<open>asm\<close> is actually \<open>?y ?x\<close>, but
we instantiate our matched rule in the opposite order. This is because the
- effective rule @{term I} was bound from the match, which declared the @{typ
- 'a} slot first and the @{typ 'b} slot second.
+ effective rule \<^term>\<open>I\<close> was bound from the match, which declared the \<^typ>\<open>'a\<close> slot first and the \<^typ>\<open>'b\<close> slot second.
To get the dynamic behaviour of @{attribute of} we can choose to invoke it
\<^emph>\<open>unchecked\<close>. This avoids trying to do any type inference for the provided
@@ -586,8 +581,8 @@
text \<open>
In this example, the pattern \<open>\<And>x :: 'a. ?P x \<Longrightarrow> ?Q x\<close> matches against the
- only premise, giving an appropriately typed slot for @{term y}. After the
- match, the resulting rule is instantiated to @{term y} and then declared as
+ only premise, giving an appropriately typed slot for \<^term>\<open>y\<close>. After the
+ match, the resulting rule is instantiated to \<^term>\<open>y\<close> and then declared as
an @{attribute intros} rule. This is then picked up by @{method prop_solver}
to solve the goal.
\<close>
@@ -611,7 +606,7 @@
done
text \<open>
- In the first @{method match}, without the \<open>(multi)\<close> argument, @{term I} is
+ In the first @{method match}, without the \<open>(multi)\<close> argument, \<^term>\<open>I\<close> is
only ever be bound to one of the members of \<open>asms\<close>. This backtracks over
both possibilities (see next section), however neither assumption in
isolation is sufficient to solve to goal. The use of the @{method solves}
@@ -623,7 +618,7 @@
Using for-fixed variables in patterns imposes additional constraints on the
results. In all previous examples, the choice of using \<open>?P\<close> or a for-fixed
- @{term P} only depended on whether or not @{term P} was mentioned in another
+ \<^term>\<open>P\<close> only depended on whether or not \<^term>\<open>P\<close> was mentioned in another
pattern or the inner method. When using a multi-match, however, all
for-fixed terms must agree in the results.
\<close>
@@ -653,10 +648,9 @@
text \<open>
Dummy patterns may be given as placeholders for unique schematics in
patterns. They implicitly receive all currently bound variables as
- arguments, and are coerced into the @{typ prop} type whenever possible. For
+ arguments, and are coerced into the \<^typ>\<open>prop\<close> type whenever possible. For
example, the trivial dummy pattern \<open>_\<close> will match any proposition. In
- contrast, by default the pattern \<open>?P\<close> is considered to have type @{typ
- bool}. It will not bind anything with meta-logical connectives (e.g. \<open>_ \<Longrightarrow> _\<close>
+ contrast, by default the pattern \<open>?P\<close> is considered to have type \<^typ>\<open>bool\<close>. It will not bind anything with meta-logical connectives (e.g. \<open>_ \<Longrightarrow> _\<close>
or \<open>_ &&& _\<close>).
\<close>
@@ -718,8 +712,8 @@
\<open>rule mp [OF I' I [THEN conjunct1]]\<close>)
text \<open>
- In this example, once a conjunction is found (@{term "P \<and> Q"}), all possible
- implications of @{term "P"} in the premises are considered, evaluating the
+ In this example, once a conjunction is found (\<^term>\<open>P \<and> Q\<close>), all possible
+ implications of \<^term>\<open>P\<close> in the premises are considered, evaluating the
inner @{method rule} with each consequent. No other conjunctions will be
considered, with method failure occurring once all implications of the form
\<open>P \<longrightarrow> ?U\<close> have been explored. Here the left-right processing of individual
@@ -735,8 +729,8 @@
text \<open>
In this example, the first lemma is solved by \<open>foo\<^sub>2\<close>, by first picking
- @{term "A \<longrightarrow> D"} for \<open>I'\<close>, then backtracking and ultimately succeeding after
- picking @{term "A \<longrightarrow> C"}. In the second lemma, however, @{term "C \<and> D"} is
+ \<^term>\<open>A \<longrightarrow> D\<close> for \<open>I'\<close>, then backtracking and ultimately succeeding after
+ picking \<^term>\<open>A \<longrightarrow> C\<close>. In the second lemma, however, \<^term>\<open>C \<and> D\<close> is
matched first, the second pattern in the match cannot be found and so the
method fails, falling through to @{method prop_solver}.
@@ -768,14 +762,14 @@
text \<open>
Intuitively it seems like this proof should fail to check. The first match
- result, which binds @{term I} to the first two members of \<open>asms\<close>, fails the
- second inner match due to binding @{term P} to @{term A}. Backtracking then
- attempts to bind @{term I} to the third member of \<open>asms\<close>. This passes all
+ result, which binds \<^term>\<open>I\<close> to the first two members of \<open>asms\<close>, fails the
+ second inner match due to binding \<^term>\<open>P\<close> to \<^term>\<open>A\<close>. Backtracking then
+ attempts to bind \<^term>\<open>I\<close> to the third member of \<open>asms\<close>. This passes all
inner matches, but fails when @{method rule} cannot successfully apply this
to the current goal. After this, a valid match that is produced by the
- unifier is one which binds @{term P} to simply \<open>\<lambda>a. A ?x\<close>. The first inner
- match succeeds because \<open>\<lambda>a. A ?x\<close> does not match @{term A}. The next inner
- match succeeds because @{term I} has only been bound to the first member of
+ unifier is one which binds \<^term>\<open>P\<close> to simply \<open>\<lambda>a. A ?x\<close>. The first inner
+ match succeeds because \<open>\<lambda>a. A ?x\<close> does not match \<^term>\<open>A\<close>. The next inner
+ match succeeds because \<^term>\<open>I\<close> has only been bound to the first member of
\<open>asms\<close>. This is due to @{method match} considering \<open>\<lambda>a. A ?x\<close> and \<open>\<lambda>a. A ?y\<close>
as distinct terms.
@@ -808,7 +802,7 @@
text \<open>
For the first member of \<open>asms\<close> the dummy pattern successfully matches
- against @{term "B \<Longrightarrow> C"} and so the proof is successful.
+ against \<^term>\<open>B \<Longrightarrow> C\<close> and so the proof is successful.
\<close>
lemma
@@ -820,16 +814,16 @@
text \<open>
This proof will fail to solve the goal. Our match pattern will only match
- rules which have a single premise, and conclusion @{term C}, so the first
+ rules which have a single premise, and conclusion \<^term>\<open>C\<close>, so the first
member of \<open>asms\<close> is not bound and thus the proof fails. Matching a pattern
- of the form @{term "P \<Longrightarrow> Q"} against this fact will bind @{term "P"} to
- @{term "A"} and @{term Q} to @{term "B \<Longrightarrow> C"}. Our pattern, with a concrete
- @{term "C"} in the conclusion, will fail to match this fact.
+ of the form \<^term>\<open>P \<Longrightarrow> Q\<close> against this fact will bind \<^term>\<open>P\<close> to
+ \<^term>\<open>A\<close> and \<^term>\<open>Q\<close> to \<^term>\<open>B \<Longrightarrow> C\<close>. Our pattern, with a concrete
+ \<^term>\<open>C\<close> in the conclusion, will fail to match this fact.
To express our desired match, we may \<^emph>\<open>uncurry\<close> our rules before matching
against them. This forms a meta-conjunction of all premises in a fact, so
that only one implication remains. For example the uncurried version of
- @{term "A \<Longrightarrow> B \<Longrightarrow> C"} is @{term "A &&& B \<Longrightarrow> C"}. This will now match our
+ \<^term>\<open>A \<Longrightarrow> B \<Longrightarrow> C\<close> is \<^term>\<open>A &&& B \<Longrightarrow> C\<close>. This will now match our
desired pattern \<open>_ \<Longrightarrow> C\<close>, and can be \<^emph>\<open>curried\<close> after the match to put it
back into normal form.
\<close>
@@ -861,8 +855,7 @@
In the first @{method match} we attempt to find a member of \<open>asms\<close> which
matches our goal precisely. This fails due to no such member existing. The
second match reverses the role of the fact in the match, by first giving a
- general pattern @{term P}. This bound pattern is then matched against @{term
- "A y"}. In this case, @{term P} is bound to \<open>A ?x\<close> and so it successfully
+ general pattern \<^term>\<open>P\<close>. This bound pattern is then matched against \<^term>\<open>A y\<close>. In this case, \<^term>\<open>P\<close> is bound to \<open>A ?x\<close> and so it successfully
matches.
\<close>
@@ -885,7 +878,7 @@
text \<open>
In this example the type \<open>'b\<close> is matched to \<open>'a\<close>, however statically they
are formally distinct types. The first match binds \<open>'b\<close> while the inner
- match serves to coerce @{term y} into having the type \<open>'b\<close>. This allows the
+ match serves to coerce \<^term>\<open>y\<close> into having the type \<open>'b\<close>. This allows the
rule instantiation to successfully apply.
\<close>
@@ -895,8 +888,7 @@
section \<open>Tracing methods\<close>
text \<open>
- Method tracing is supported by auxiliary print methods provided by @{theory
- "HOL-Eisbach.Eisbach_Tools"}. These include @{method print_fact}, @{method
+ Method tracing is supported by auxiliary print methods provided by \<^theory>\<open>HOL-Eisbach.Eisbach_Tools\<close>. These include @{method print_fact}, @{method
print_term} and @{method print_type}. Whenever a print method is evaluated
it leaves the goal unchanged and writes its argument as tracing output.
@@ -957,7 +949,7 @@
text \<open>
Here the new @{method splits} method transforms the goal to use only logical
- connectives: @{term "L = [] \<longrightarrow> False \<and> (\<forall>x y. L = x # y \<longrightarrow> True)"}. This goal
+ connectives: \<^term>\<open>L = [] \<longrightarrow> False \<and> (\<forall>x y. L = x # y \<longrightarrow> True)\<close>. This goal
is then in a form solvable by @{method prop_solver} when given the universal
quantifier introduction rule \<open>allI\<close>.
\<close>
--- a/src/Doc/Eisbach/Preface.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Eisbach/Preface.thy Sat Jan 05 17:24:33 2019 +0100
@@ -32,9 +32,8 @@
well as the @{method match} method, as well as discussing their integration
with existing Isar concepts such as @{command named_theorems}.
- These commands are provided by theory @{theory "HOL-Eisbach.Eisbach"}: it
- needs to be imported by all Eisbach applications. Theory theory @{theory
- "HOL-Eisbach.Eisbach_Tools"} provides additional proof methods and
+ These commands are provided by theory \<^theory>\<open>HOL-Eisbach.Eisbach\<close>: it
+ needs to be imported by all Eisbach applications. Theory theory \<^theory>\<open>HOL-Eisbach.Eisbach_Tools\<close> provides additional proof methods and
attributes that are occasionally useful.
\<close>
--- a/src/Doc/Functions/Functions.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Functions/Functions.thy Sat Jan 05 17:24:33 2019 +0100
@@ -25,8 +25,7 @@
giving its name, its type,
and a set of defining recursive equations.
If we leave out the type, the most general type will be
- inferred, which can sometimes lead to surprises: Since both @{term
- "1::nat"} and \<open>+\<close> are overloaded, we would end up
+ inferred, which can sometimes lead to surprises: Since both \<^term>\<open>1::nat\<close> and \<open>+\<close> are overloaded, we would end up
with \<open>fib :: nat \<Rightarrow> 'a::{one,plus}\<close>.
\<close>
@@ -88,13 +87,13 @@
Isabelle provides customized induction rules for recursive
functions. These rules follow the recursive structure of the
definition. Here is the rule @{thm [source] sep.induct} arising from the
- above definition of @{const sep}:
+ above definition of \<^const>\<open>sep\<close>:
@{thm [display] sep.induct}
We have a step case for list with at least two elements, and two
base cases for the zero- and the one-element list. Here is a simple
- proof about @{const sep} and @{const map}
+ proof about \<^const>\<open>sep\<close> and \<^const>\<open>map\<close>
\<close>
lemma "map f (sep x ys) = sep (f x) (map f ys)"
@@ -219,7 +218,7 @@
implicitly refers to the last function definition.
The \<open>relation\<close> method takes a relation of
- type @{typ "('a \<times> 'a) set"}, where @{typ "'a"} is the argument type of
+ type \<^typ>\<open>('a \<times> 'a) set\<close>, where \<^typ>\<open>'a\<close> is the argument type of
the function. If the function has multiple curried arguments, then
these are packed together into a tuple, as it happened in the above
example.
@@ -259,8 +258,7 @@
This corresponds to a nested
loop where one index counts up and the other down. Termination can
be proved using a lexicographic combination of two measures, namely
- the value of \<open>N\<close> and the above difference. The @{const
- "measures"} combinator generalizes \<open>measure\<close> by taking a
+ the value of \<open>N\<close> and the above difference. The \<^const>\<open>measures\<close> combinator generalizes \<open>measure\<close> by taking a
list of measure functions.
\<close>
@@ -368,7 +366,7 @@
text \<open>
To eliminate the mutual dependencies, Isabelle internally
creates a single function operating on the sum
- type @{typ "nat + nat"}. Then, @{const even} and @{const odd} are
+ type \<^typ>\<open>nat + nat\<close>. Then, \<^const>\<open>even\<close> and \<^const>\<open>odd\<close> are
defined as projections. Consequently, termination has to be proved
simultaneously for both functions, by specifying a measure on the
sum type:
@@ -390,7 +388,7 @@
generally requires simultaneous induction. The induction rule @{thm [source] "even_odd.induct"}
generated from the above definition reflects this.
- Let us prove something about @{const even} and @{const odd}:
+ Let us prove something about \<^const>\<open>even\<close> and \<^const>\<open>odd\<close>:
\<close>
lemma even_odd_mod2:
@@ -405,7 +403,7 @@
text \<open>
We get four subgoals, which correspond to the clauses in the
- definition of @{const even} and @{const odd}:
+ definition of \<^const>\<open>even\<close> and \<^const>\<open>odd\<close>:
@{subgoals[display,indent=0]}
Simplification solves the first two goals, leaving us with two
statements about the \<open>mod\<close> operation to prove:
@@ -428,7 +426,7 @@
In proofs like this, the simultaneous induction is really essential:
Even if we are just interested in one of the results, the other
one is necessary to strengthen the induction hypothesis. If we leave
- out the statement about @{const odd} and just write @{term True} instead,
+ out the statement about \<^const>\<open>odd\<close> and just write \<^term>\<open>True\<close> instead,
the same proof fails:
\<close>
@@ -471,7 +469,7 @@
@{thm[display] list_to_option.elims}
\noindent
- This lets us eliminate an assumption of the form @{prop "list_to_option xs = y"} and replace it
+ This lets us eliminate an assumption of the form \<^prop>\<open>list_to_option xs = y\<close> and replace it
with the two cases, e.g.:
\<close>
@@ -488,7 +486,7 @@
text \<open>
Sometimes it is convenient to derive specialized versions of the \<open>elim\<close> rules above and
keep them around as facts explicitly. For example, it is natural to show that if
- @{prop "list_to_option xs = Some y"}, then @{term xs} must be a singleton. The command
+ \<^prop>\<open>list_to_option xs = Some y\<close>, then \<^term>\<open>xs\<close> must be a singleton. The command
\cmd{fun\_cases} derives such facts automatically, by instantiating and simplifying the general
elimination rules given some pattern:
\<close>
@@ -511,15 +509,15 @@
Up to now, we used pattern matching only on datatypes, and the
patterns were always disjoint and complete, and if they weren't,
they were made disjoint automatically like in the definition of
- @{const "sep"} in \S\ref{patmatch}.
+ \<^const>\<open>sep\<close> in \S\ref{patmatch}.
This automatic splitting can significantly increase the number of
equations involved, and this is not always desirable. The following
example shows the problem:
Suppose we are modeling incomplete knowledge about the world by a
- three-valued datatype, which has values @{term "T"}, @{term "F"}
- and @{term "X"} for true, false and uncertain propositions, respectively.
+ three-valued datatype, which has values \<^term>\<open>T\<close>, \<^term>\<open>F\<close>
+ and \<^term>\<open>X\<close> for true, false and uncertain propositions, respectively.
\<close>
datatype P3 = T | F | X
@@ -538,7 +536,7 @@
text \<open>
This definition is useful, because the equations can directly be used
as simplification rules. But the patterns overlap: For example,
- the expression @{term "And T T"} is matched by both the first and
+ the expression \<^term>\<open>And T T\<close> is matched by both the first and
the second equation. By default, Isabelle makes the patterns disjoint by
splitting them up, producing instances:
\<close>
@@ -553,14 +551,14 @@
\begin{enumerate}
\item If the datatype has many constructors, there can be an
- explosion of equations. For @{const "And"}, we get seven instead of
+ explosion of equations. For \<^const>\<open>And\<close>, we get seven instead of
five equations, which can be tolerated, but this is just a small
example.
\item Since splitting makes the equations \qt{less general}, they
- do not always match in rewriting. While the term @{term "And x F"}
- can be simplified to @{term "F"} with the original equations, a
- (manual) case split on @{term "x"} is now necessary.
+ do not always match in rewriting. While the term \<^term>\<open>And x F\<close>
+ can be simplified to \<^term>\<open>F\<close> with the original equations, a
+ (manual) case split on \<^term>\<open>x\<close> is now necessary.
\item The splitting also concerns the induction rule @{thm [source]
"And.induct"}. Instead of five premises it now has seven, which
@@ -573,8 +571,8 @@
If we do not want the automatic splitting, we can switch it off by
leaving out the \cmd{sequential} option. However, we will have to
prove that our pattern matching is consistent\footnote{This prevents
- us from defining something like @{term "f x = True"} and @{term "f x
- = False"} simultaneously.}:
+ us from defining something like \<^term>\<open>f x = True\<close> and \<^term>\<open>f x
+ = False\<close> simultaneously.}:
\<close>
function And2 :: "P3 \<Rightarrow> P3 \<Rightarrow> P3"
@@ -592,11 +590,11 @@
@{subgoals[display,indent=0]}\vspace{-1.2em}\hspace{3cm}\vdots\vspace{1.2em}
The first subgoal expresses the completeness of the patterns. It has
- the form of an elimination rule and states that every @{term x} of
+ the form of an elimination rule and states that every \<^term>\<open>x\<close> of
the function's input type must match at least one of the patterns\footnote{Completeness could
be equivalently stated as a disjunction of existential statements:
-@{term "(\<exists>p. x = (T, p)) \<or> (\<exists>p. x = (p, T)) \<or> (\<exists>p. x = (p, F)) \<or>
- (\<exists>p. x = (F, p)) \<or> (x = (X, X))"}, and you can use the method \<open>atomize_elim\<close> to get that form instead.}. If the patterns just involve
+\<^term>\<open>(\<exists>p. x = (T, p)) \<or> (\<exists>p. x = (p, T)) \<or> (\<exists>p. x = (p, F)) \<or>
+ (\<exists>p. x = (F, p)) \<or> (x = (X, X))\<close>, and you can use the method \<open>atomize_elim\<close> to get that form instead.}. If the patterns just involve
datatypes, we can solve it with the \<open>pat_completeness\<close>
method:
\<close>
@@ -640,8 +638,8 @@
This kind of matching is again justified by the proof of pattern
completeness and compatibility.
The proof obligation for pattern completeness states that every natural number is
- either @{term "0::nat"}, @{term "1::nat"} or @{term "n +
- (2::nat)"}:
+ either \<^term>\<open>0::nat\<close>, \<^term>\<open>1::nat\<close> or \<^term>\<open>n +
+ (2::nat)\<close>:
@{subgoals[display,indent=0,goals_limit=1]}
@@ -746,8 +744,8 @@
section \<open>Partiality\<close>
text \<open>
- In HOL, all functions are total. A function @{term "f"} applied to
- @{term "x"} always has the value @{term "f x"}, and there is no notion
+ In HOL, all functions are total. A function \<^term>\<open>f\<close> applied to
+ \<^term>\<open>x\<close> always has the value \<^term>\<open>f x\<close>, and there is no notion
of undefinedness.
This is why we have to do termination
proofs when defining functions: The proof justifies that the
@@ -772,8 +770,8 @@
subsection \<open>Domain predicates\<close>
text \<open>
- The trick is that Isabelle has not only defined the function @{const findzero}, but also
- a predicate @{term "findzero_dom"} that characterizes the values where the function
+ The trick is that Isabelle has not only defined the function \<^const>\<open>findzero\<close>, but also
+ a predicate \<^term>\<open>findzero_dom\<close> that characterizes the values where the function
terminates: the \emph{domain} of the function. If we treat a
partial function just as a total function with an additional domain
predicate, we can derive simplification and
@@ -793,14 +791,14 @@
text \<open>
Remember that all we
are doing here is use some tricks to make a total function appear
- as if it was partial. We can still write the term @{term "findzero
- (\<lambda>x. 1) 0"} and like any other term of type @{typ nat} it is equal
+ as if it was partial. We can still write the term \<^term>\<open>findzero
+ (\<lambda>x. 1) 0\<close> and like any other term of type \<^typ>\<open>nat\<close> it is equal
to some natural number, although we might not be able to find out
which one. The function is \emph{underdefined}.
But it is defined enough to prove something interesting about it. We
- can prove that if @{term "findzero f n"}
- terminates, it indeed returns a zero of @{term f}:
+ can prove that if \<^term>\<open>findzero f n\<close>
+ terminates, it indeed returns a zero of \<^term>\<open>f\<close>:
\<close>
lemma findzero_zero: "findzero_dom (f, n) \<Longrightarrow> f (findzero f n) = 0"
@@ -815,9 +813,8 @@
@{subgoals[display,indent=0]}
\noindent The hypothesis in our lemma was used to satisfy the first premise in
- the induction rule. However, we also get @{term
- "findzero_dom (f, n)"} as a local assumption in the induction step. This
- allows unfolding @{term "findzero f n"} using the \<open>psimps\<close>
+ the induction rule. However, we also get \<^term>\<open>findzero_dom (f, n)\<close> as a local assumption in the induction step. This
+ allows unfolding \<^term>\<open>findzero f n\<close> using the \<open>psimps\<close>
rule, and the rest is trivial.
\<close>
apply (simp add: findzero.psimps)
@@ -829,7 +826,7 @@
complicated proof written in Isar. It is verbose enough to show how
partiality comes into play: From the partial induction, we get an
additional domain condition hypothesis. Observe how this condition
- is applied when calls to @{term findzero} are unfolded.
+ is applied when calls to \<^term>\<open>findzero\<close> are unfolded.
\<close>
text_raw \<open>
@@ -876,7 +873,7 @@
Now that we have proved some interesting properties about our
function, we should turn to the domain predicate and see if it is
actually true for some values. Otherwise we would have just proved
- lemmas with @{term False} as a premise.
+ lemmas with \<^term>\<open>False\<close> as a premise.
Essentially, we need some introduction rules for \<open>findzero_dom\<close>. The function package can prove such domain
introduction rules automatically. But since they are not used very
@@ -912,7 +909,7 @@
Figure \ref{findzero_term} gives a detailed Isar proof of the fact
that \<open>findzero\<close> terminates if there is a zero which is greater
- or equal to @{term n}. First we derive two useful rules which will
+ or equal to \<^term>\<open>n\<close>. First we derive two useful rules which will
solve the base case and the step case of the induction. The
induction is then straightforward, except for the unusual induction
principle.
@@ -983,28 +980,27 @@
@{abbrev[display] findzero_dom}
- The domain predicate is the \emph{accessible part} of a relation @{const
- findzero_rel}, which was also created internally by the function
- package. @{const findzero_rel} is just a normal
+ The domain predicate is the \emph{accessible part} of a relation \<^const>\<open>findzero_rel\<close>, which was also created internally by the function
+ package. \<^const>\<open>findzero_rel\<close> is just a normal
inductive predicate, so we can inspect its definition by
looking at the introduction rules @{thm [source] findzero_rel.intros}.
In our case there is just a single rule:
@{thm[display] findzero_rel.intros}
- The predicate @{const findzero_rel}
+ The predicate \<^const>\<open>findzero_rel\<close>
describes the \emph{recursion relation} of the function
definition. The recursion relation is a binary relation on
the arguments of the function that relates each argument to its
recursive calls. In general, there is one introduction rule for each
recursive call.
- The predicate @{term "Wellfounded.accp findzero_rel"} is the accessible part of
+ The predicate \<^term>\<open>Wellfounded.accp findzero_rel\<close> is the accessible part of
that relation. An argument belongs to the accessible part, if it can
be reached in a finite number of steps (cf.~its definition in \<open>Wellfounded.thy\<close>).
Since the domain predicate is just an abbreviation, you can use
- lemmas for @{const Wellfounded.accp} and @{const findzero_rel} directly. Some
+ lemmas for \<^const>\<open>Wellfounded.accp\<close> and \<^const>\<open>findzero_rel\<close> directly. Some
lemmas which are occasionally useful are @{thm [source] accpI}, @{thm [source]
accp_downward}, and of course the introduction and elimination rules
for the recursion relation @{thm [source] "findzero_rel.intros"} and @{thm
@@ -1041,7 +1037,7 @@
@{subgoals[display]}
- Of course this statement is true, since we know that @{const nz} is
+ Of course this statement is true, since we know that \<^const>\<open>nz\<close> is
the zero function. And in fact we have no problem proving this
property by induction.
\<close>
@@ -1051,7 +1047,7 @@
text \<open>
We formulate this as a partial correctness lemma with the condition
- @{term "nz_dom n"}. This allows us to prove it with the \<open>pinduct\<close> rule before we have proved termination. With this lemma,
+ \<^term>\<open>nz_dom n\<close>. This allows us to prove it with the \<open>pinduct\<close> rule before we have proved termination. With this lemma,
the termination proof works as expected:
\<close>
@@ -1111,8 +1107,7 @@
text \<open>
Higher-order recursion occurs when recursive calls
- are passed as arguments to higher-order combinators such as @{const
- map}, @{term filter} etc.
+ are passed as arguments to higher-order combinators such as \<^const>\<open>map\<close>, \<^term>\<open>filter\<close> etc.
As an example, imagine a datatype of n-ary trees:
\<close>
@@ -1122,7 +1117,7 @@
text \<open>\noindent We can define a function which swaps the left and right subtrees recursively, using the
- list functions @{const rev} and @{const map}:\<close>
+ list functions \<^const>\<open>rev\<close> and \<^const>\<open>map\<close>:\<close>
fun mirror :: "'a tree \<Rightarrow> 'a tree"
where
@@ -1139,39 +1134,37 @@
As usual, we have to give a wellfounded relation, such that the
arguments of the recursive calls get smaller. But what exactly are
the arguments of the recursive calls when mirror is given as an
- argument to @{const map}? Isabelle gives us the
+ argument to \<^const>\<open>map\<close>? Isabelle gives us the
subgoals
@{subgoals[display,indent=0]}
- So the system seems to know that @{const map} only
- applies the recursive call @{term "mirror"} to elements
- of @{term "l"}, which is essential for the termination proof.
+ So the system seems to know that \<^const>\<open>map\<close> only
+ applies the recursive call \<^term>\<open>mirror\<close> to elements
+ of \<^term>\<open>l\<close>, which is essential for the termination proof.
- This knowledge about @{const map} is encoded in so-called congruence rules,
+ This knowledge about \<^const>\<open>map\<close> is encoded in so-called congruence rules,
which are special theorems known to the \cmd{function} command. The
- rule for @{const map} is
+ rule for \<^const>\<open>map\<close> is
@{thm[display] map_cong}
- You can read this in the following way: Two applications of @{const
- map} are equal, if the list arguments are equal and the functions
+ You can read this in the following way: Two applications of \<^const>\<open>map\<close> are equal, if the list arguments are equal and the functions
coincide on the elements of the list. This means that for the value
- @{term "map f l"} we only have to know how @{term f} behaves on
- the elements of @{term l}.
+ \<^term>\<open>map f l\<close> we only have to know how \<^term>\<open>f\<close> behaves on
+ the elements of \<^term>\<open>l\<close>.
Usually, one such congruence rule is
needed for each higher-order construct that is used when defining
- new functions. In fact, even basic functions like @{const
- If} and @{const Let} are handled by this mechanism. The congruence
- rule for @{const If} states that the \<open>then\<close> branch is only
+ new functions. In fact, even basic functions like \<^const>\<open>If\<close> and \<^const>\<open>Let\<close> are handled by this mechanism. The congruence
+ rule for \<^const>\<open>If\<close> states that the \<open>then\<close> branch is only
relevant if the condition is true, and the \<open>else\<close> branch only if it
is false:
@{thm[display] if_cong}
Congruence rules can be added to the
- function package by giving them the @{term fundef_cong} attribute.
+ function package by giving them the \<^term>\<open>fundef_cong\<close> attribute.
The constructs that are predefined in Isabelle, usually
come with the respective congruence rules.
--- a/src/Doc/How_to_Prove_it/How_to_Prove_it.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/How_to_Prove_it/How_to_Prove_it.thy Sat Jan 05 17:24:33 2019 +0100
@@ -4,7 +4,7 @@
begin
(*>*)
text\<open>
-\chapter{@{theory Main}}
+\chapter{\<^theory>\<open>Main\<close>}
\section{Natural numbers}
@@ -19,7 +19,7 @@
\begin{quote}
(\<open>induction n rule: less_induct\<close>)
\end{quote}
-In fact, it is not restricted to @{typ nat} but works for any wellfounded
+In fact, it is not restricted to \<^typ>\<open>nat\<close> but works for any wellfounded
order \<open><\<close>.
There are many more special induction rules. You can find all of them
@@ -29,7 +29,7 @@
\end{quote}
-\paragraph{How to convert numerals into @{const Suc} terms}~\\
+\paragraph{How to convert numerals into \<^const>\<open>Suc\<close> terms}~\\
Solution: simplify with the lemma @{thm[source] numeral_eq_Suc}.
\noindent
@@ -40,7 +40,7 @@
by (simp add: numeral_eq_Suc)
text\<open>This is a typical situation: function ``\<open>^\<close>'' is defined
-by pattern matching on @{const Suc} but is applied to a numeral.
+by pattern matching on \<^const>\<open>Suc\<close> but is applied to a numeral.
Note: simplification with @{thm[source] numeral_eq_Suc} will convert all numerals.
One can be more specific with the lemmas @{thm [source] numeral_2_eq_2}
@@ -73,10 +73,10 @@
%Tobias Nipkow
\section{Algebraic simplification}
-On the numeric types @{typ nat}, @{typ int} and @{typ real},
+On the numeric types \<^typ>\<open>nat\<close>, \<^typ>\<open>int\<close> and \<^typ>\<open>real\<close>,
proof method \<open>simp\<close> and friends can deal with a limited amount of linear
arithmetic (no multiplication except by numerals) and method \<open>arith\<close> can
-handle full linear arithmetic (on @{typ nat}, @{typ int} including quantifiers).
+handle full linear arithmetic (on \<^typ>\<open>nat\<close>, \<^typ>\<open>int\<close> including quantifiers).
But what to do when proper multiplication is involved?
At this point it can be helpful to simplify with the lemma list
@{thm [source] algebra_simps}. Examples:
@@ -95,10 +95,10 @@
terms are rewritten into a normal form by multiplying out,
rearranging sums and products into some canonical order.
In the above lemma the normal form will be something like
-@{term"x*y + y*y - x*z - y*z"}.
-This works for concrete types like @{typ int} as well as for classes like
-@{class comm_ring} (commutative rings). For some classes (e.g.\ @{class ring}
-and @{class comm_ring}) this yields a decision procedure for equality.
+\<^term>\<open>x*y + y*y - x*z - y*z\<close>.
+This works for concrete types like \<^typ>\<open>int\<close> as well as for classes like
+\<^class>\<open>comm_ring\<close> (commutative rings). For some classes (e.g.\ \<^class>\<open>ring\<close>
+and \<^class>\<open>comm_ring\<close>) this yields a decision procedure for equality.
Additional function and predicate symbols are not a problem either:
\<close>
@@ -107,8 +107,8 @@
by(simp add: algebra_simps)
text\<open>Here @{thm[source]algebra_simps} merely has the effect of rewriting
-@{term"y*x"} to @{term"x*y"} (or the other way around). This yields
-a problem of the form @{prop"2*t - t < t + (1::int)"} and we are back in the
+\<^term>\<open>y*x\<close> to \<^term>\<open>x*y\<close> (or the other way around). This yields
+a problem of the form \<^prop>\<open>2*t - t < t + (1::int)\<close> and we are back in the
realm of linear arithmetic.
Because @{thm[source]algebra_simps} multiplies out, terms can explode.
--- a/src/Doc/Implementation/Eq.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Eq.thy Sat Jan 05 17:24:33 2019 +0100
@@ -43,12 +43,12 @@
same reasoning schemes as theorems that can be composed like object-level
rules as explained in \secref{sec:obj-rules}.
- For example, @{ML Thm.symmetric} as Pure inference is an ML function that
+ For example, \<^ML>\<open>Thm.symmetric\<close> as Pure inference is an ML function that
maps a theorem \<open>th\<close> stating \<open>t \<equiv> u\<close> to one stating \<open>u \<equiv> t\<close>. In contrast,
@{thm [source] Pure.symmetric} as Pure theorem expresses the same reasoning
in declarative form. If used like \<open>th [THEN Pure.symmetric]\<close> in Isar source
notation, it achieves a similar effect as the ML inference function,
- although the rule attribute @{attribute THEN} or ML operator @{ML "op RS"}
+ although the rule attribute @{attribute THEN} or ML operator \<^ML>\<open>op RS\<close>
involve the full machinery of higher-order unification (modulo
\<open>\<beta>\<eta>\<close>-conversion) and lifting of \<open>\<And>/\<Longrightarrow>\<close> contexts.
\<close>
@@ -99,21 +99,21 @@
@{index_ML fold_goals_tac: "Proof.context -> thm list -> tactic"} \\
\end{mldecls}
- \<^descr> @{ML rewrite_rule}~\<open>ctxt rules thm\<close> rewrites the whole theorem by the
+ \<^descr> \<^ML>\<open>rewrite_rule\<close>~\<open>ctxt rules thm\<close> rewrites the whole theorem by the
given rules.
- \<^descr> @{ML rewrite_goals_rule}~\<open>ctxt rules thm\<close> rewrites the outer premises of
+ \<^descr> \<^ML>\<open>rewrite_goals_rule\<close>~\<open>ctxt rules thm\<close> rewrites the outer premises of
the given theorem. Interpreting the same as a goal state
(\secref{sec:tactical-goals}) it means to rewrite all subgoals (in the same
- manner as @{ML rewrite_goals_tac}).
+ manner as \<^ML>\<open>rewrite_goals_tac\<close>).
- \<^descr> @{ML rewrite_goal_tac}~\<open>ctxt rules i\<close> rewrites subgoal \<open>i\<close> by the given
+ \<^descr> \<^ML>\<open>rewrite_goal_tac\<close>~\<open>ctxt rules i\<close> rewrites subgoal \<open>i\<close> by the given
rewrite rules.
- \<^descr> @{ML rewrite_goals_tac}~\<open>ctxt rules\<close> rewrites all subgoals by the given
+ \<^descr> \<^ML>\<open>rewrite_goals_tac\<close>~\<open>ctxt rules\<close> rewrites all subgoals by the given
rewrite rules.
- \<^descr> @{ML fold_goals_tac}~\<open>ctxt rules\<close> essentially uses @{ML rewrite_goals_tac}
+ \<^descr> \<^ML>\<open>fold_goals_tac\<close>~\<open>ctxt rules\<close> essentially uses \<^ML>\<open>rewrite_goals_tac\<close>
with the symmetric form of each member of \<open>rules\<close>, re-ordered to fold longer
expression first. This supports to idea to fold primitive definitions that
appear in expended form in the proof state.
--- a/src/Doc/Implementation/Integration.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Integration.thy Sat Jan 05 17:24:33 2019 +0100
@@ -44,20 +44,19 @@
@{index_ML Toplevel.proof_of: "Toplevel.state -> Proof.state"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Toplevel.state} represents Isar toplevel states, which are
+ \<^descr> Type \<^ML_type>\<open>Toplevel.state\<close> represents Isar toplevel states, which are
normally manipulated through the concept of toplevel transitions only
(\secref{sec:toplevel-transition}).
- \<^descr> @{ML Toplevel.UNDEF} is raised for undefined toplevel operations. Many
- operations work only partially for certain cases, since @{ML_type
- Toplevel.state} is a sum type.
+ \<^descr> \<^ML>\<open>Toplevel.UNDEF\<close> is raised for undefined toplevel operations. Many
+ operations work only partially for certain cases, since \<^ML_type>\<open>Toplevel.state\<close> is a sum type.
+
+ \<^descr> \<^ML>\<open>Toplevel.is_toplevel\<close>~\<open>state\<close> checks for an empty toplevel state.
- \<^descr> @{ML Toplevel.is_toplevel}~\<open>state\<close> checks for an empty toplevel state.
+ \<^descr> \<^ML>\<open>Toplevel.theory_of\<close>~\<open>state\<close> selects the background theory of \<open>state\<close>,
+ it raises \<^ML>\<open>Toplevel.UNDEF\<close> for an empty toplevel state.
- \<^descr> @{ML Toplevel.theory_of}~\<open>state\<close> selects the background theory of \<open>state\<close>,
- it raises @{ML Toplevel.UNDEF} for an empty toplevel state.
-
- \<^descr> @{ML Toplevel.proof_of}~\<open>state\<close> selects the Isar proof state if available,
+ \<^descr> \<^ML>\<open>Toplevel.proof_of\<close>~\<open>state\<close> selects the Isar proof state if available,
otherwise it raises an error.
\<close>
@@ -110,23 +109,23 @@
Toplevel.transition -> Toplevel.transition"} \\
\end{mldecls}
- \<^descr> @{ML Toplevel.keep}~\<open>tr\<close> adjoins a diagnostic function.
+ \<^descr> \<^ML>\<open>Toplevel.keep\<close>~\<open>tr\<close> adjoins a diagnostic function.
- \<^descr> @{ML Toplevel.theory}~\<open>tr\<close> adjoins a theory transformer.
+ \<^descr> \<^ML>\<open>Toplevel.theory\<close>~\<open>tr\<close> adjoins a theory transformer.
- \<^descr> @{ML Toplevel.theory_to_proof}~\<open>tr\<close> adjoins a global goal function, which
+ \<^descr> \<^ML>\<open>Toplevel.theory_to_proof\<close>~\<open>tr\<close> adjoins a global goal function, which
turns a theory into a proof state. The theory may be changed before entering
the proof; the generic Isar goal setup includes an \<^verbatim>\<open>after_qed\<close> argument
that specifies how to apply the proven result to the enclosing context, when
the proof is finished.
- \<^descr> @{ML Toplevel.proof}~\<open>tr\<close> adjoins a deterministic proof command, with a
+ \<^descr> \<^ML>\<open>Toplevel.proof\<close>~\<open>tr\<close> adjoins a deterministic proof command, with a
singleton result.
- \<^descr> @{ML Toplevel.proofs}~\<open>tr\<close> adjoins a general proof command, with zero or
+ \<^descr> \<^ML>\<open>Toplevel.proofs\<close>~\<open>tr\<close> adjoins a general proof command, with zero or
more result states (represented as a lazy list).
- \<^descr> @{ML Toplevel.end_proof}~\<open>tr\<close> adjoins a concluding proof command, that
+ \<^descr> \<^ML>\<open>Toplevel.end_proof\<close>~\<open>tr\<close> adjoins a concluding proof command, that
returns the resulting theory, after applying the resulting facts to the
target context.
\<close>
@@ -157,17 +156,17 @@
@{index_ML Thy_Info.register_thy: "theory -> unit"} \\
\end{mldecls}
- \<^descr> @{ML use_thy}~\<open>A\<close> ensures that theory \<open>A\<close> is fully up-to-date wrt.\ the
+ \<^descr> \<^ML>\<open>use_thy\<close>~\<open>A\<close> ensures that theory \<open>A\<close> is fully up-to-date wrt.\ the
external file store; outdated ancestors are reloaded on demand.
- \<^descr> @{ML Thy_Info.get_theory}~\<open>A\<close> retrieves the theory value presently
+ \<^descr> \<^ML>\<open>Thy_Info.get_theory\<close>~\<open>A\<close> retrieves the theory value presently
associated with name \<open>A\<close>. Note that the result might be outdated wrt.\ the
file-system content.
- \<^descr> @{ML Thy_Info.remove_thy}~\<open>A\<close> deletes theory \<open>A\<close> and all descendants from
+ \<^descr> \<^ML>\<open>Thy_Info.remove_thy\<close>~\<open>A\<close> deletes theory \<open>A\<close> and all descendants from
the theory database.
- \<^descr> @{ML Thy_Info.register_thy}~\<open>text thy\<close> registers an existing theory value
+ \<^descr> \<^ML>\<open>Thy_Info.register_thy\<close>~\<open>text thy\<close> registers an existing theory value
with the theory loader database and updates source version information
according to the file store.
\<close>
--- a/src/Doc/Implementation/Isar.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Isar.thy Sat Jan 05 17:24:33 2019 +0100
@@ -74,7 +74,7 @@
(term * term list) list list -> Proof.context -> Proof.state"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Proof.state} represents Isar proof states. This is a
+ \<^descr> Type \<^ML_type>\<open>Proof.state\<close> represents Isar proof states. This is a
block-structured configuration with proof context, linguistic mode, and
optional goal. The latter consists of goal context, goal facts
(``\<open>using\<close>''), and tactical goal state (see \secref{sec:tactical-goals}).
@@ -83,8 +83,7 @@
some parts of the tactical goal --- how exactly is defined by the proof
method that is applied in that situation.
- \<^descr> @{ML Proof.assert_forward}, @{ML Proof.assert_chain}, @{ML
- Proof.assert_backward} are partial identity functions that fail unless a
+ \<^descr> \<^ML>\<open>Proof.assert_forward\<close>, \<^ML>\<open>Proof.assert_chain\<close>, \<^ML>\<open>Proof.assert_backward\<close> are partial identity functions that fail unless a
certain linguistic mode is active, namely ``\<open>proof(state)\<close>'',
``\<open>proof(chain)\<close>'', ``\<open>proof(prove)\<close>'', respectively (using the terminology
of @{cite "isabelle-isar-ref"}).
@@ -92,22 +91,21 @@
It is advisable study the implementations of existing proof commands for
suitable modes to be asserted.
- \<^descr> @{ML Proof.simple_goal}~\<open>state\<close> returns the structured Isar goal (if
+ \<^descr> \<^ML>\<open>Proof.simple_goal\<close>~\<open>state\<close> returns the structured Isar goal (if
available) in the form seen by ``simple'' methods (like @{method simp} or
@{method blast}). The Isar goal facts are already inserted as premises into
- the subgoals, which are presented individually as in @{ML Proof.goal}.
+ the subgoals, which are presented individually as in \<^ML>\<open>Proof.goal\<close>.
- \<^descr> @{ML Proof.goal}~\<open>state\<close> returns the structured Isar goal (if available)
+ \<^descr> \<^ML>\<open>Proof.goal\<close>~\<open>state\<close> returns the structured Isar goal (if available)
in the form seen by regular methods (like @{method rule}). The auxiliary
internal encoding of Pure conjunctions is split into individual subgoals as
usual.
- \<^descr> @{ML Proof.raw_goal}~\<open>state\<close> returns the structured Isar goal (if
+ \<^descr> \<^ML>\<open>Proof.raw_goal\<close>~\<open>state\<close> returns the structured Isar goal (if
available) in the raw internal form seen by ``raw'' methods (like @{method
- induct}). This form is rarely appropriate for diagnostic tools; @{ML
- Proof.simple_goal} or @{ML Proof.goal} should be used in most situations.
+ induct}). This form is rarely appropriate for diagnostic tools; \<^ML>\<open>Proof.simple_goal\<close> or \<^ML>\<open>Proof.goal\<close> should be used in most situations.
- \<^descr> @{ML Proof.theorem}~\<open>before_qed after_qed statement ctxt\<close> initializes a
+ \<^descr> \<^ML>\<open>Proof.theorem\<close>~\<open>before_qed after_qed statement ctxt\<close> initializes a
toplevel Isar proof state within a given context.
The optional \<open>before_qed\<close> method is applied at the end of the proof, just
@@ -115,9 +113,8 @@
The \<open>after_qed\<close> continuation receives the extracted result in order to apply
it to the final context in a suitable way (e.g.\ storing named facts). Note
- that at this generic level the target context is specified as @{ML_type
- Proof.context}, but the usual wrapping of toplevel proofs into command
- transactions will provide a @{ML_type local_theory} here
+ that at this generic level the target context is specified as \<^ML_type>\<open>Proof.context\<close>, but the usual wrapping of toplevel proofs into command
+ transactions will provide a \<^ML_type>\<open>local_theory\<close> here
(\chref{ch:local-theory}). This affects the way how results are stored.
The \<open>statement\<close> is given as a nested list of terms, each associated with
@@ -148,7 +145,7 @@
have A and B and C
ML_val
\<open>val n = Thm.nprems_of (#goal @{Isar.goal});
- @{assert} (n = 3);\<close>
+ \<^assert> (n = 3);\<close>
sorry
end
@@ -285,30 +282,28 @@
string -> theory -> theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Proof.method} represents proof methods as abstract type.
+ \<^descr> Type \<^ML_type>\<open>Proof.method\<close> represents proof methods as abstract type.
- \<^descr> @{ML CONTEXT_METHOD}~\<open>(fn facts => context_tactic)\<close> wraps \<open>context_tactic\<close>
+ \<^descr> \<^ML>\<open>CONTEXT_METHOD\<close>~\<open>(fn facts => context_tactic)\<close> wraps \<open>context_tactic\<close>
depending on goal facts as a general proof method that may change the proof
- context dynamically. A typical operation is @{ML
- Proof_Context.update_cases}, which is wrapped up as combinator @{index_ML
+ context dynamically. A typical operation is \<^ML>\<open>Proof_Context.update_cases\<close>, which is wrapped up as combinator @{index_ML
CONTEXT_CASES} for convenience.
- \<^descr> @{ML METHOD}~\<open>(fn facts => tactic)\<close> wraps \<open>tactic\<close> depending on goal facts
+ \<^descr> \<^ML>\<open>METHOD\<close>~\<open>(fn facts => tactic)\<close> wraps \<open>tactic\<close> depending on goal facts
as regular proof method; the goal context is passed via method syntax.
- \<^descr> @{ML SIMPLE_METHOD}~\<open>tactic\<close> wraps a tactic that addresses all subgoals
+ \<^descr> \<^ML>\<open>SIMPLE_METHOD\<close>~\<open>tactic\<close> wraps a tactic that addresses all subgoals
uniformly as simple proof method. Goal facts are already inserted into all
subgoals before \<open>tactic\<close> is applied.
- \<^descr> @{ML SIMPLE_METHOD'}~\<open>tactic\<close> wraps a tactic that addresses a specific
+ \<^descr> \<^ML>\<open>SIMPLE_METHOD'\<close>~\<open>tactic\<close> wraps a tactic that addresses a specific
subgoal as simple proof method that operates on subgoal 1. Goal facts are
inserted into the subgoal then the \<open>tactic\<close> is applied.
- \<^descr> @{ML Method.insert_tac}~\<open>ctxt facts i\<close> inserts \<open>facts\<close> into subgoal \<open>i\<close>.
- This is convenient to reproduce part of the @{ML SIMPLE_METHOD} or @{ML
- SIMPLE_METHOD'} wrapping within regular @{ML METHOD}, for example.
+ \<^descr> \<^ML>\<open>Method.insert_tac\<close>~\<open>ctxt facts i\<close> inserts \<open>facts\<close> into subgoal \<open>i\<close>.
+ This is convenient to reproduce part of the \<^ML>\<open>SIMPLE_METHOD\<close> or \<^ML>\<open>SIMPLE_METHOD'\<close> wrapping within regular \<^ML>\<open>METHOD\<close>, for example.
- \<^descr> @{ML Method.setup}~\<open>name parser description\<close> provides the functionality of
+ \<^descr> \<^ML>\<open>Method.setup\<close>~\<open>name parser description\<close> provides the functionality of
the Isar command @{command method_setup} as ML function.
\<close>
@@ -319,8 +314,8 @@
\<^medskip>
The following toy examples illustrate how the goal facts and state are
passed to proof methods. The predefined proof method called ``@{method
- tactic}'' wraps ML source of type @{ML_type tactic} (abstracted over
- @{ML_text facts}). This allows immediate experimentation without parsing of
+ tactic}'' wraps ML source of type \<^ML_type>\<open>tactic\<close> (abstracted over
+ \<^ML_text>\<open>facts\<close>). This allows immediate experimentation without parsing of
concrete syntax.
\<close>
@@ -330,16 +325,16 @@
assume a: A and b: B
have "A \<and> B"
- apply (tactic \<open>resolve_tac @{context} @{thms conjI} 1\<close>)
- using a apply (tactic \<open>resolve_tac @{context} facts 1\<close>)
- using b apply (tactic \<open>resolve_tac @{context} facts 1\<close>)
+ apply (tactic \<open>resolve_tac \<^context> @{thms conjI} 1\<close>)
+ using a apply (tactic \<open>resolve_tac \<^context> facts 1\<close>)
+ using b apply (tactic \<open>resolve_tac \<^context> facts 1\<close>)
done
have "A \<and> B"
using a and b
ML_val \<open>@{Isar.goal}\<close>
- apply (tactic \<open>Method.insert_tac @{context} facts 1\<close>)
- apply (tactic \<open>(resolve_tac @{context} @{thms conjI} THEN_ALL_NEW assume_tac @{context}) 1\<close>)
+ apply (tactic \<open>Method.insert_tac \<^context> facts 1\<close>)
+ apply (tactic \<open>(resolve_tac \<^context> @{thms conjI} THEN_ALL_NEW assume_tac \<^context>) 1\<close>)
done
end
@@ -361,14 +356,14 @@
passes-through the proof context at the end of parsing, but it is not used
in this example.
- The @{ML Attrib.thms} parser produces a list of theorems from the usual Isar
+ The \<^ML>\<open>Attrib.thms\<close> parser produces a list of theorems from the usual Isar
syntax involving attribute expressions etc.\ (syntax category @{syntax
- thms}) @{cite "isabelle-isar-ref"}. The resulting @{ML_text thms} are
- added to @{ML HOL_basic_ss} which already contains the basic Simplifier
+ thms}) @{cite "isabelle-isar-ref"}. The resulting \<^ML_text>\<open>thms\<close> are
+ added to \<^ML>\<open>HOL_basic_ss\<close> which already contains the basic Simplifier
setup for HOL.
- The tactic @{ML asm_full_simp_tac} is the one that is also used in method
- @{method simp} by default. The extra wrapping by the @{ML CHANGED} tactical
+ The tactic \<^ML>\<open>asm_full_simp_tac\<close> is the one that is also used in method
+ @{method simp} by default. The extra wrapping by the \<^ML>\<open>CHANGED\<close> tactical
ensures progress of simplification: identical goal states are filtered out
explicitly to make the raw tactic conform to standard Isar method behaviour.
@@ -422,7 +417,7 @@
method_setup my_simp' =
\<open>Attrib.thms >> (fn thms => fn ctxt =>
let
- val my_simps = Named_Theorems.get ctxt @{named_theorems my_simp}
+ val my_simps = Named_Theorems.get ctxt \<^named_theorems>\<open>my_simp\<close>
in
SIMPLE_METHOD' (fn i =>
CHANGED (asm_full_simp_tac
@@ -447,8 +442,7 @@
text \<open>
\<^medskip>
The @{method my_simp} variants defined above are ``simple'' methods, i.e.\
- the goal facts are merely inserted as goal premises by the @{ML
- SIMPLE_METHOD'} or @{ML SIMPLE_METHOD} wrapper. For proof methods that are
+ the goal facts are merely inserted as goal premises by the \<^ML>\<open>SIMPLE_METHOD'\<close> or \<^ML>\<open>SIMPLE_METHOD\<close> wrapper. For proof methods that are
similar to the standard collection of @{method simp}, @{method blast},
@{method fast}, @{method auto} there is little more that can be done.
@@ -461,7 +455,7 @@
\<^medskip>
The technical treatment of rules from the context requires further
- attention. Above we rebuild a fresh @{ML_type simpset} from the arguments
+ attention. Above we rebuild a fresh \<^ML_type>\<open>simpset\<close> from the arguments
and \<^emph>\<open>all\<close> rules retrieved from the context on every invocation of the
method. This does not scale to really large collections of rules, which
easily emerges in the context of a big theory library, for example.
@@ -471,7 +465,7 @@
retrieval. More realistic applications require efficient index-structures
that organize theorems in a customized manner, such as a discrimination net
that is indexed by the left-hand sides of rewrite rules. For variations on
- the Simplifier, re-use of the existing type @{ML_type simpset} is adequate,
+ the Simplifier, re-use of the existing type \<^ML_type>\<open>simpset\<close> is adequate,
but scalability would require it be maintained statically within the context
data, not dynamically on each tool invocation.
\<close>
@@ -510,23 +504,23 @@
string -> theory -> theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type attribute} represents attributes as concrete type alias.
+ \<^descr> Type \<^ML_type>\<open>attribute\<close> represents attributes as concrete type alias.
- \<^descr> @{ML Thm.rule_attribute}~\<open>thms (fn context => rule)\<close> wraps a
- context-dependent rule (mapping on @{ML_type thm}) as attribute.
+ \<^descr> \<^ML>\<open>Thm.rule_attribute\<close>~\<open>thms (fn context => rule)\<close> wraps a
+ context-dependent rule (mapping on \<^ML_type>\<open>thm\<close>) as attribute.
The \<open>thms\<close> are additional parameters: when forming an abstract closure, the
system may provide dummy facts that are propagated according to strict
evaluation discipline. In that case, \<open>rule\<close> is bypassed.
- \<^descr> @{ML Thm.declaration_attribute}~\<open>(fn thm => decl)\<close> wraps a
- theorem-dependent declaration (mapping on @{ML_type Context.generic}) as
+ \<^descr> \<^ML>\<open>Thm.declaration_attribute\<close>~\<open>(fn thm => decl)\<close> wraps a
+ theorem-dependent declaration (mapping on \<^ML_type>\<open>Context.generic\<close>) as
attribute.
When forming an abstract closure, the system may provide a dummy fact as
\<open>thm\<close>. In that case, \<open>decl\<close> is bypassed.
- \<^descr> @{ML Attrib.setup}~\<open>name parser description\<close> provides the functionality of
+ \<^descr> \<^ML>\<open>Attrib.setup\<close>~\<open>name parser description\<close> provides the functionality of
the Isar command @{command attribute_setup} as ML function.
\<close>
@@ -535,13 +529,12 @@
@{ML_antiquotation_def attributes} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation attributes} attributes
- \<close>}
+ \<close>
\<^descr> \<open>@{attributes [\<dots>]}\<close> embeds attribute source representation into the ML
- text, which is particularly useful with declarations like @{ML
- Local_Theory.note}. Attribute names are internalized at compile time, but
+ text, which is particularly useful with declarations like \<^ML>\<open>Local_Theory.note\<close>. Attribute names are internalized at compile time, but
the source is unevaluated. This means attributes with formal arguments
(types, terms, theorems) may be subject to odd effects of dynamic scoping!
\<close>
--- a/src/Doc/Implementation/Local_Theory.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Local_Theory.thy Sat Jan 05 17:24:33 2019 +0100
@@ -98,21 +98,20 @@
local_theory -> (string * thm list) * local_theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type local_theory} represents local theories. Although this is
- merely an alias for @{ML_type Proof.context}, it is semantically a subtype
- of the same: a @{ML_type local_theory} holds target information as special
- context data. Subtyping means that any value \<open>lthy:\<close>~@{ML_type local_theory}
- can be also used with operations on expecting a regular \<open>ctxt:\<close>~@{ML_type
- Proof.context}.
+ \<^descr> Type \<^ML_type>\<open>local_theory\<close> represents local theories. Although this is
+ merely an alias for \<^ML_type>\<open>Proof.context\<close>, it is semantically a subtype
+ of the same: a \<^ML_type>\<open>local_theory\<close> holds target information as special
+ context data. Subtyping means that any value \<open>lthy:\<close>~\<^ML_type>\<open>local_theory\<close>
+ can be also used with operations on expecting a regular \<open>ctxt:\<close>~\<^ML_type>\<open>Proof.context\<close>.
- \<^descr> @{ML Named_Target.init}~\<open>before_exit name thy\<close> initializes a local theory
+ \<^descr> \<^ML>\<open>Named_Target.init\<close>~\<open>before_exit name thy\<close> initializes a local theory
derived from the given background theory. An empty name refers to a \<^emph>\<open>global
theory\<close> context, and a non-empty name refers to a @{command locale} or
@{command class} context (a fully-qualified internal name is expected here).
This is useful for experimentation --- normally the Isar toplevel already
takes care to initialize the local theory context.
- \<^descr> @{ML Local_Theory.define}~\<open>((b, mx), (a, rhs)) lthy\<close> defines a local
+ \<^descr> \<^ML>\<open>Local_Theory.define\<close>~\<open>((b, mx), (a, rhs)) lthy\<close> defines a local
entity according to the specification that is given relatively to the
current \<open>lthy\<close> context. In particular the term of the RHS may refer to
earlier local entities from the auxiliary context, or hypothetical
@@ -130,9 +129,8 @@
plain declarations such as @{attribute simp}, while non-trivial rules like
@{attribute simplified} are better avoided.
- \<^descr> @{ML Local_Theory.note}~\<open>(a, ths) lthy\<close> is analogous to @{ML
- Local_Theory.define}, but defines facts instead of terms. There is also a
- slightly more general variant @{ML Local_Theory.notes} that defines several
+ \<^descr> \<^ML>\<open>Local_Theory.note\<close>~\<open>(a, ths) lthy\<close> is analogous to \<^ML>\<open>Local_Theory.define\<close>, but defines facts instead of terms. There is also a
+ slightly more general variant \<^ML>\<open>Local_Theory.notes\<close> that defines several
facts (with attribute expressions) simultaneously.
This is essentially the internal version of the @{command lemmas} command,
--- a/src/Doc/Implementation/Logic.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Logic.thy Sat Jan 05 17:24:33 2019 +0100
@@ -120,43 +120,43 @@
@{index_ML Sign.primitive_arity: "arity -> theory -> theory"} \\
\end{mldecls}
- \<^descr> Type @{ML_type class} represents type classes.
+ \<^descr> Type \<^ML_type>\<open>class\<close> represents type classes.
- \<^descr> Type @{ML_type sort} represents sorts, i.e.\ finite intersections of
- classes. The empty list @{ML "[]: sort"} refers to the empty class
+ \<^descr> Type \<^ML_type>\<open>sort\<close> represents sorts, i.e.\ finite intersections of
+ classes. The empty list \<^ML>\<open>[]: sort\<close> refers to the empty class
intersection, i.e.\ the ``full sort''.
- \<^descr> Type @{ML_type arity} represents type arities. A triple \<open>(\<kappa>, \<^vec>s, s)
+ \<^descr> Type \<^ML_type>\<open>arity\<close> represents type arities. A triple \<open>(\<kappa>, \<^vec>s, s)
: arity\<close> represents \<open>\<kappa> :: (\<^vec>s)s\<close> as described above.
- \<^descr> Type @{ML_type typ} represents types; this is a datatype with constructors
- @{ML TFree}, @{ML TVar}, @{ML Type}.
+ \<^descr> Type \<^ML_type>\<open>typ\<close> represents types; this is a datatype with constructors
+ \<^ML>\<open>TFree\<close>, \<^ML>\<open>TVar\<close>, \<^ML>\<open>Type\<close>.
- \<^descr> @{ML Term.map_atyps}~\<open>f \<tau>\<close> applies the mapping \<open>f\<close> to all atomic types
- (@{ML TFree}, @{ML TVar}) occurring in \<open>\<tau>\<close>.
+ \<^descr> \<^ML>\<open>Term.map_atyps\<close>~\<open>f \<tau>\<close> applies the mapping \<open>f\<close> to all atomic types
+ (\<^ML>\<open>TFree\<close>, \<^ML>\<open>TVar\<close>) occurring in \<open>\<tau>\<close>.
- \<^descr> @{ML Term.fold_atyps}~\<open>f \<tau>\<close> iterates the operation \<open>f\<close> over all
- occurrences of atomic types (@{ML TFree}, @{ML TVar}) in \<open>\<tau>\<close>; the type
+ \<^descr> \<^ML>\<open>Term.fold_atyps\<close>~\<open>f \<tau>\<close> iterates the operation \<open>f\<close> over all
+ occurrences of atomic types (\<^ML>\<open>TFree\<close>, \<^ML>\<open>TVar\<close>) in \<open>\<tau>\<close>; the type
structure is traversed from left to right.
- \<^descr> @{ML Sign.subsort}~\<open>thy (s\<^sub>1, s\<^sub>2)\<close> tests the subsort relation \<open>s\<^sub>1 \<subseteq>
+ \<^descr> \<^ML>\<open>Sign.subsort\<close>~\<open>thy (s\<^sub>1, s\<^sub>2)\<close> tests the subsort relation \<open>s\<^sub>1 \<subseteq>
s\<^sub>2\<close>.
- \<^descr> @{ML Sign.of_sort}~\<open>thy (\<tau>, s)\<close> tests whether type \<open>\<tau>\<close> is of sort \<open>s\<close>.
+ \<^descr> \<^ML>\<open>Sign.of_sort\<close>~\<open>thy (\<tau>, s)\<close> tests whether type \<open>\<tau>\<close> is of sort \<open>s\<close>.
- \<^descr> @{ML Sign.add_type}~\<open>ctxt (\<kappa>, k, mx)\<close> declares a new type constructors \<open>\<kappa>\<close>
+ \<^descr> \<^ML>\<open>Sign.add_type\<close>~\<open>ctxt (\<kappa>, k, mx)\<close> declares a new type constructors \<open>\<kappa>\<close>
with \<open>k\<close> arguments and optional mixfix syntax.
- \<^descr> @{ML Sign.add_type_abbrev}~\<open>ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)\<close> defines a new type
+ \<^descr> \<^ML>\<open>Sign.add_type_abbrev\<close>~\<open>ctxt (\<kappa>, \<^vec>\<alpha>, \<tau>)\<close> defines a new type
abbreviation \<open>(\<^vec>\<alpha>)\<kappa> = \<tau>\<close>.
- \<^descr> @{ML Sign.primitive_class}~\<open>(c, [c\<^sub>1, \<dots>, c\<^sub>n])\<close> declares a new class \<open>c\<close>,
+ \<^descr> \<^ML>\<open>Sign.primitive_class\<close>~\<open>(c, [c\<^sub>1, \<dots>, c\<^sub>n])\<close> declares a new class \<open>c\<close>,
together with class relations \<open>c \<subseteq> c\<^sub>i\<close>, for \<open>i = 1, \<dots>, n\<close>.
- \<^descr> @{ML Sign.primitive_classrel}~\<open>(c\<^sub>1, c\<^sub>2)\<close> declares the class relation
+ \<^descr> \<^ML>\<open>Sign.primitive_classrel\<close>~\<open>(c\<^sub>1, c\<^sub>2)\<close> declares the class relation
\<open>c\<^sub>1 \<subseteq> c\<^sub>2\<close>.
- \<^descr> @{ML Sign.primitive_arity}~\<open>(\<kappa>, \<^vec>s, s)\<close> declares the arity \<open>\<kappa> ::
+ \<^descr> \<^ML>\<open>Sign.primitive_arity\<close>~\<open>(\<kappa>, \<^vec>s, s)\<close> declares the arity \<open>\<kappa> ::
(\<^vec>s)s\<close>.
\<close>
@@ -170,7 +170,7 @@
@{ML_antiquotation_def "typ"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation class} embedded
;
@@{ML_antiquotation sort} sort
@@ -180,25 +180,25 @@
@@{ML_antiquotation nonterminal}) embedded
;
@@{ML_antiquotation typ} type
- \<close>}
+ \<close>
- \<^descr> \<open>@{class c}\<close> inlines the internalized class \<open>c\<close> --- as @{ML_type string}
+ \<^descr> \<open>@{class c}\<close> inlines the internalized class \<open>c\<close> --- as \<^ML_type>\<open>string\<close>
literal.
- \<^descr> \<open>@{sort s}\<close> inlines the internalized sort \<open>s\<close> --- as @{ML_type "string
- list"} literal.
+ \<^descr> \<open>@{sort s}\<close> inlines the internalized sort \<open>s\<close> --- as \<^ML_type>\<open>string
+ list\<close> literal.
\<^descr> \<open>@{type_name c}\<close> inlines the internalized type constructor \<open>c\<close> --- as
- @{ML_type string} literal.
+ \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{type_abbrev c}\<close> inlines the internalized type abbreviation \<open>c\<close> --- as
- @{ML_type string} literal.
+ \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{nonterminal c}\<close> inlines the internalized syntactic type~/ grammar
- nonterminal \<open>c\<close> --- as @{ML_type string} literal.
+ nonterminal \<open>c\<close> --- as \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{typ \<tau>}\<close> inlines the internalized type \<open>\<tau>\<close> --- as constructor term for
- datatype @{ML_type typ}.
+ datatype \<^ML_type>\<open>typ\<close>.
\<close>
@@ -333,50 +333,49 @@
@{index_ML Sign.const_instance: "theory -> string * typ list -> typ"} \\
\end{mldecls}
- \<^descr> Type @{ML_type term} represents de-Bruijn terms, with comments in
+ \<^descr> Type \<^ML_type>\<open>term\<close> represents de-Bruijn terms, with comments in
abstractions, and explicitly named free variables and constants; this is a
datatype with constructors @{index_ML Bound}, @{index_ML Free}, @{index_ML
Var}, @{index_ML Const}, @{index_ML Abs}, @{index_ML_op "$"}.
- \<^descr> \<open>t\<close>~@{ML_text aconv}~\<open>u\<close> checks \<open>\<alpha>\<close>-equivalence of two terms. This is the
- basic equality relation on type @{ML_type term}; raw datatype equality
+ \<^descr> \<open>t\<close>~\<^ML_text>\<open>aconv\<close>~\<open>u\<close> checks \<open>\<alpha>\<close>-equivalence of two terms. This is the
+ basic equality relation on type \<^ML_type>\<open>term\<close>; raw datatype equality
should only be used for operations related to parsing or printing!
- \<^descr> @{ML Term.map_types}~\<open>f t\<close> applies the mapping \<open>f\<close> to all types occurring
+ \<^descr> \<^ML>\<open>Term.map_types\<close>~\<open>f t\<close> applies the mapping \<open>f\<close> to all types occurring
in \<open>t\<close>.
- \<^descr> @{ML Term.fold_types}~\<open>f t\<close> iterates the operation \<open>f\<close> over all
+ \<^descr> \<^ML>\<open>Term.fold_types\<close>~\<open>f t\<close> iterates the operation \<open>f\<close> over all
occurrences of types in \<open>t\<close>; the term structure is traversed from left to
right.
- \<^descr> @{ML Term.map_aterms}~\<open>f t\<close> applies the mapping \<open>f\<close> to all atomic terms
- (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML Const}) occurring in \<open>t\<close>.
+ \<^descr> \<^ML>\<open>Term.map_aterms\<close>~\<open>f t\<close> applies the mapping \<open>f\<close> to all atomic terms
+ (\<^ML>\<open>Bound\<close>, \<^ML>\<open>Free\<close>, \<^ML>\<open>Var\<close>, \<^ML>\<open>Const\<close>) occurring in \<open>t\<close>.
- \<^descr> @{ML Term.fold_aterms}~\<open>f t\<close> iterates the operation \<open>f\<close> over all
- occurrences of atomic terms (@{ML Bound}, @{ML Free}, @{ML Var}, @{ML
- Const}) in \<open>t\<close>; the term structure is traversed from left to right.
+ \<^descr> \<^ML>\<open>Term.fold_aterms\<close>~\<open>f t\<close> iterates the operation \<open>f\<close> over all
+ occurrences of atomic terms (\<^ML>\<open>Bound\<close>, \<^ML>\<open>Free\<close>, \<^ML>\<open>Var\<close>, \<^ML>\<open>Const\<close>) in \<open>t\<close>; the term structure is traversed from left to right.
- \<^descr> @{ML fastype_of}~\<open>t\<close> determines the type of a well-typed term. This
+ \<^descr> \<^ML>\<open>fastype_of\<close>~\<open>t\<close> determines the type of a well-typed term. This
operation is relatively slow, despite the omission of any sanity checks.
- \<^descr> @{ML lambda}~\<open>a b\<close> produces an abstraction \<open>\<lambda>a. b\<close>, where occurrences of
+ \<^descr> \<^ML>\<open>lambda\<close>~\<open>a b\<close> produces an abstraction \<open>\<lambda>a. b\<close>, where occurrences of
the atomic term \<open>a\<close> in the body \<open>b\<close> are replaced by bound variables.
- \<^descr> @{ML betapply}~\<open>(t, u)\<close> produces an application \<open>t u\<close>, with topmost
+ \<^descr> \<^ML>\<open>betapply\<close>~\<open>(t, u)\<close> produces an application \<open>t u\<close>, with topmost
\<open>\<beta>\<close>-conversion if \<open>t\<close> is an abstraction.
- \<^descr> @{ML incr_boundvars}~\<open>j\<close> increments a term's dangling bound variables by
+ \<^descr> \<^ML>\<open>incr_boundvars\<close>~\<open>j\<close> increments a term's dangling bound variables by
the offset \<open>j\<close>. This is required when moving a subterm into a context where
it is enclosed by a different number of abstractions. Bound variables with a
matching abstraction are unaffected.
- \<^descr> @{ML Sign.declare_const}~\<open>ctxt ((c, \<sigma>), mx)\<close> declares a new constant \<open>c ::
+ \<^descr> \<^ML>\<open>Sign.declare_const\<close>~\<open>ctxt ((c, \<sigma>), mx)\<close> declares a new constant \<open>c ::
\<sigma>\<close> with optional mixfix syntax.
- \<^descr> @{ML Sign.add_abbrev}~\<open>print_mode (c, t)\<close> introduces a new term
+ \<^descr> \<^ML>\<open>Sign.add_abbrev\<close>~\<open>print_mode (c, t)\<close> introduces a new term
abbreviation \<open>c \<equiv> t\<close>.
- \<^descr> @{ML Sign.const_typargs}~\<open>thy (c, \<tau>)\<close> and @{ML Sign.const_instance}~\<open>thy
+ \<^descr> \<^ML>\<open>Sign.const_typargs\<close>~\<open>thy (c, \<tau>)\<close> and \<^ML>\<open>Sign.const_instance\<close>~\<open>thy
(c, [\<tau>\<^sub>1, \<dots>, \<tau>\<^sub>n])\<close> convert between two representations of polymorphic
constants: full type instance vs.\ compact type arguments form.
\<close>
@@ -390,7 +389,7 @@
@{ML_antiquotation_def "prop"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
(@@{ML_antiquotation const_name} |
@@{ML_antiquotation const_abbrev}) embedded
;
@@ -399,23 +398,22 @@
@@{ML_antiquotation term} term
;
@@{ML_antiquotation prop} prop
- \<close>}
+ \<close>
\<^descr> \<open>@{const_name c}\<close> inlines the internalized logical constant name \<open>c\<close> ---
- as @{ML_type string} literal.
+ as \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{const_abbrev c}\<close> inlines the internalized abbreviated constant name \<open>c\<close>
- --- as @{ML_type string} literal.
+ --- as \<^ML_type>\<open>string\<close> literal.
\<^descr> \<open>@{const c(\<^vec>\<tau>)}\<close> inlines the internalized constant \<open>c\<close> with precise
- type instantiation in the sense of @{ML Sign.const_instance} --- as @{ML
- Const} constructor term for datatype @{ML_type term}.
+ type instantiation in the sense of \<^ML>\<open>Sign.const_instance\<close> --- as \<^ML>\<open>Const\<close> constructor term for datatype \<^ML_type>\<open>term\<close>.
\<^descr> \<open>@{term t}\<close> inlines the internalized term \<open>t\<close> --- as constructor term for
- datatype @{ML_type term}.
+ datatype \<^ML_type>\<open>term\<close>.
\<^descr> \<open>@{prop \<phi>}\<close> inlines the internalized proposition \<open>\<phi>\<close> --- as constructor
- term for datatype @{ML_type term}.
+ term for datatype \<^ML_type>\<open>term\<close>.
\<close>
@@ -601,84 +599,82 @@
Defs.entry -> Defs.entry list -> theory -> theory"} \\
\end{mldecls}
- \<^descr> @{ML Thm.peek_status}~\<open>thm\<close> informs about the current status of the
+ \<^descr> \<^ML>\<open>Thm.peek_status\<close>~\<open>thm\<close> informs about the current status of the
derivation object behind the given theorem. This is a snapshot of a
potentially ongoing (parallel) evaluation of proofs. The three Boolean
values indicate the following: \<^verbatim>\<open>oracle\<close> if the finished part contains some
oracle invocation; \<^verbatim>\<open>unfinished\<close> if some future proofs are still pending;
\<^verbatim>\<open>failed\<close> if some future proof has failed, rendering the theorem invalid!
- \<^descr> @{ML Logic.all}~\<open>a B\<close> produces a Pure quantification \<open>\<And>a. B\<close>, where
+ \<^descr> \<^ML>\<open>Logic.all\<close>~\<open>a B\<close> produces a Pure quantification \<open>\<And>a. B\<close>, where
occurrences of the atomic term \<open>a\<close> in the body proposition \<open>B\<close> are replaced
- by bound variables. (See also @{ML lambda} on terms.)
+ by bound variables. (See also \<^ML>\<open>lambda\<close> on terms.)
- \<^descr> @{ML Logic.mk_implies}~\<open>(A, B)\<close> produces a Pure implication \<open>A \<Longrightarrow> B\<close>.
+ \<^descr> \<^ML>\<open>Logic.mk_implies\<close>~\<open>(A, B)\<close> produces a Pure implication \<open>A \<Longrightarrow> B\<close>.
- \<^descr> Types @{ML_type ctyp} and @{ML_type cterm} represent certified types and
+ \<^descr> Types \<^ML_type>\<open>ctyp\<close> and \<^ML_type>\<open>cterm\<close> represent certified types and
terms, respectively. These are abstract datatypes that guarantee that its
values have passed the full well-formedness (and well-typedness) checks,
relative to the declarations of type constructors, constants etc.\ in the
- background theory. The abstract types @{ML_type ctyp} and @{ML_type cterm}
+ background theory. The abstract types \<^ML_type>\<open>ctyp\<close> and \<^ML_type>\<open>cterm\<close>
are part of the same inference kernel that is mainly responsible for
- @{ML_type thm}. Thus syntactic operations on @{ML_type ctyp} and @{ML_type
- cterm} are located in the @{ML_structure Thm} module, even though theorems
+ \<^ML_type>\<open>thm\<close>. Thus syntactic operations on \<^ML_type>\<open>ctyp\<close> and \<^ML_type>\<open>cterm\<close> are located in the \<^ML_structure>\<open>Thm\<close> module, even though theorems
are not yet involved at that stage.
- \<^descr> @{ML Thm.ctyp_of}~\<open>ctxt \<tau>\<close> and @{ML Thm.cterm_of}~\<open>ctxt t\<close> explicitly
+ \<^descr> \<^ML>\<open>Thm.ctyp_of\<close>~\<open>ctxt \<tau>\<close> and \<^ML>\<open>Thm.cterm_of\<close>~\<open>ctxt t\<close> explicitly
check types and terms, respectively. This also involves some basic
normalizations, such expansion of type and term abbreviations from the
underlying theory context. Full re-certification is relatively slow and
should be avoided in tight reasoning loops.
- \<^descr> @{ML Thm.apply}, @{ML Thm.lambda}, @{ML Thm.all}, @{ML Drule.mk_implies}
+ \<^descr> \<^ML>\<open>Thm.apply\<close>, \<^ML>\<open>Thm.lambda\<close>, \<^ML>\<open>Thm.all\<close>, \<^ML>\<open>Drule.mk_implies\<close>
etc.\ compose certified terms (or propositions) incrementally. This is
- equivalent to @{ML Thm.cterm_of} after unchecked @{ML_op "$"}, @{ML lambda},
- @{ML Logic.all}, @{ML Logic.mk_implies} etc., but there can be a big
+ equivalent to \<^ML>\<open>Thm.cterm_of\<close> after unchecked \<^ML_op>\<open>$\<close>, \<^ML>\<open>lambda\<close>,
+ \<^ML>\<open>Logic.all\<close>, \<^ML>\<open>Logic.mk_implies\<close> etc., but there can be a big
difference in performance when large existing entities are composed by a few
extra constructions on top. There are separate operations to decompose
certified terms and theorems to produce certified terms again.
- \<^descr> Type @{ML_type thm} represents proven propositions. This is an abstract
+ \<^descr> Type \<^ML_type>\<open>thm\<close> represents proven propositions. This is an abstract
datatype that guarantees that its values have been constructed by basic
- principles of the @{ML_structure Thm} module. Every @{ML_type thm} value
+ principles of the \<^ML_structure>\<open>Thm\<close> module. Every \<^ML_type>\<open>thm\<close> value
refers its background theory, cf.\ \secref{sec:context-theory}.
- \<^descr> @{ML Thm.transfer}~\<open>thy thm\<close> transfers the given theorem to a \<^emph>\<open>larger\<close>
+ \<^descr> \<^ML>\<open>Thm.transfer\<close>~\<open>thy thm\<close> transfers the given theorem to a \<^emph>\<open>larger\<close>
theory, see also \secref{sec:context}. This formal adjustment of the
background context has no logical significance, but is occasionally required
for formal reasons, e.g.\ when theorems that are imported from more basic
theories are used in the current situation.
- \<^descr> @{ML Thm.assume}, @{ML Thm.forall_intr}, @{ML Thm.forall_elim}, @{ML
- Thm.implies_intr}, and @{ML Thm.implies_elim} correspond to the primitive
+ \<^descr> \<^ML>\<open>Thm.assume\<close>, \<^ML>\<open>Thm.forall_intr\<close>, \<^ML>\<open>Thm.forall_elim\<close>, \<^ML>\<open>Thm.implies_intr\<close>, and \<^ML>\<open>Thm.implies_elim\<close> correspond to the primitive
inferences of \figref{fig:prim-rules}.
- \<^descr> @{ML Thm.generalize}~\<open>(\<^vec>\<alpha>, \<^vec>x)\<close> corresponds to the
+ \<^descr> \<^ML>\<open>Thm.generalize\<close>~\<open>(\<^vec>\<alpha>, \<^vec>x)\<close> corresponds to the
\<open>generalize\<close> rules of \figref{fig:subst-rules}. Here collections of type and
term variables are generalized simultaneously, specified by the given basic
names.
- \<^descr> @{ML Thm.instantiate}~\<open>(\<^vec>\<alpha>\<^sub>s, \<^vec>x\<^sub>\<tau>)\<close> corresponds to the
+ \<^descr> \<^ML>\<open>Thm.instantiate\<close>~\<open>(\<^vec>\<alpha>\<^sub>s, \<^vec>x\<^sub>\<tau>)\<close> corresponds to the
\<open>instantiate\<close> rules of \figref{fig:subst-rules}. Type variables are
substituted before term variables. Note that the types in \<open>\<^vec>x\<^sub>\<tau>\<close> refer
to the instantiated versions.
- \<^descr> @{ML Thm.add_axiom}~\<open>ctxt (name, A)\<close> declares an arbitrary proposition as
+ \<^descr> \<^ML>\<open>Thm.add_axiom\<close>~\<open>ctxt (name, A)\<close> declares an arbitrary proposition as
axiom, and retrieves it as a theorem from the resulting theory, cf.\ \<open>axiom\<close>
in \figref{fig:prim-rules}. Note that the low-level representation in the
axiom table may differ slightly from the returned theorem.
- \<^descr> @{ML Thm.add_oracle}~\<open>(binding, oracle)\<close> produces a named oracle rule,
+ \<^descr> \<^ML>\<open>Thm.add_oracle\<close>~\<open>(binding, oracle)\<close> produces a named oracle rule,
essentially generating arbitrary axioms on the fly, cf.\ \<open>axiom\<close> in
\figref{fig:prim-rules}.
- \<^descr> @{ML Thm.add_def}~\<open>ctxt unchecked overloaded (name, c \<^vec>x \<equiv> t)\<close>
+ \<^descr> \<^ML>\<open>Thm.add_def\<close>~\<open>ctxt unchecked overloaded (name, c \<^vec>x \<equiv> t)\<close>
states a definitional axiom for an existing constant \<open>c\<close>. Dependencies are
- recorded via @{ML Theory.add_deps}, unless the \<open>unchecked\<close> option is set.
+ recorded via \<^ML>\<open>Theory.add_deps\<close>, unless the \<open>unchecked\<close> option is set.
Note that the low-level representation in the axiom table may differ
slightly from the returned theorem.
- \<^descr> @{ML Theory.add_deps}~\<open>ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>\<close> declares dependencies of
+ \<^descr> \<^ML>\<open>Theory.add_deps\<close>~\<open>ctxt name c\<^sub>\<tau> \<^vec>d\<^sub>\<sigma>\<close> declares dependencies of
a named specification for constant \<open>c\<^sub>\<tau>\<close>, relative to existing
specifications for constants \<open>\<^vec>d\<^sub>\<sigma>\<close>. This also works for type
constructors.
@@ -694,7 +690,7 @@
@{ML_antiquotation_def "lemma"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation ctyp} typ
;
@@{ML_antiquotation cterm} term
@@ -707,23 +703,23 @@
;
@@{ML_antiquotation lemma} ('(' @'open' ')')? ((prop +) + @'and') \<newline>
@'by' method method?
- \<close>}
+ \<close>
\<^descr> \<open>@{ctyp \<tau>}\<close> produces a certified type wrt.\ the current background theory
- --- as abstract value of type @{ML_type ctyp}.
+ --- as abstract value of type \<^ML_type>\<open>ctyp\<close>.
\<^descr> \<open>@{cterm t}\<close> and \<open>@{cprop \<phi>}\<close> produce a certified term wrt.\ the current
- background theory --- as abstract value of type @{ML_type cterm}.
+ background theory --- as abstract value of type \<^ML_type>\<open>cterm\<close>.
\<^descr> \<open>@{thm a}\<close> produces a singleton fact --- as abstract value of type
- @{ML_type thm}.
+ \<^ML_type>\<open>thm\<close>.
\<^descr> \<open>@{thms a}\<close> produces a general fact --- as abstract value of type
- @{ML_type "thm list"}.
+ \<^ML_type>\<open>thm list\<close>.
\<^descr> \<open>@{lemma \<phi> by meth}\<close> produces a fact that is proven on the spot according
to the minimal proof, which imitates a terminal Isar proof. The result is an
- abstract value of type @{ML_type thm} or @{ML_type "thm list"}, depending on
+ abstract value of type \<^ML_type>\<open>thm\<close> or \<^ML_type>\<open>thm list\<close>, depending on
the number of propositions given here.
The internal derivation object lacks a proper theorem name, but it is
@@ -800,17 +796,17 @@
@{index_ML Logic.dest_type: "term -> typ"} \\
\end{mldecls}
- \<^descr> @{ML Conjunction.intr} derives \<open>A &&& B\<close> from \<open>A\<close> and \<open>B\<close>.
+ \<^descr> \<^ML>\<open>Conjunction.intr\<close> derives \<open>A &&& B\<close> from \<open>A\<close> and \<open>B\<close>.
- \<^descr> @{ML Conjunction.elim} derives \<open>A\<close> and \<open>B\<close> from \<open>A &&& B\<close>.
+ \<^descr> \<^ML>\<open>Conjunction.elim\<close> derives \<open>A\<close> and \<open>B\<close> from \<open>A &&& B\<close>.
- \<^descr> @{ML Drule.mk_term} derives \<open>TERM t\<close>.
+ \<^descr> \<^ML>\<open>Drule.mk_term\<close> derives \<open>TERM t\<close>.
- \<^descr> @{ML Drule.dest_term} recovers term \<open>t\<close> from \<open>TERM t\<close>.
+ \<^descr> \<^ML>\<open>Drule.dest_term\<close> recovers term \<open>t\<close> from \<open>TERM t\<close>.
- \<^descr> @{ML Logic.mk_type}~\<open>\<tau>\<close> produces the term \<open>TYPE(\<tau>)\<close>.
+ \<^descr> \<^ML>\<open>Logic.mk_type\<close>~\<open>\<tau>\<close> produces the term \<open>TYPE(\<tau>)\<close>.
- \<^descr> @{ML Logic.dest_type}~\<open>TYPE(\<tau>)\<close> recovers the type \<open>\<tau>\<close>.
+ \<^descr> \<^ML>\<open>Logic.dest_type\<close>~\<open>TYPE(\<tau>)\<close> recovers the type \<open>\<tau>\<close>.
\<close>
@@ -846,17 +842,16 @@
@{index_ML Thm.strip_shyps: "thm -> thm"} \\
\end{mldecls}
- \<^descr> @{ML Thm.extra_shyps}~\<open>thm\<close> determines the extraneous sort hypotheses of
+ \<^descr> \<^ML>\<open>Thm.extra_shyps\<close>~\<open>thm\<close> determines the extraneous sort hypotheses of
the given theorem, i.e.\ the sorts that are not present within type
variables of the statement.
- \<^descr> @{ML Thm.strip_shyps}~\<open>thm\<close> removes any extraneous sort hypotheses that
+ \<^descr> \<^ML>\<open>Thm.strip_shyps\<close>~\<open>thm\<close> removes any extraneous sort hypotheses that
can be witnessed from the type signature.
\<close>
text %mlex \<open>
- The following artificial example demonstrates the derivation of @{prop
- False} with a pending sort hypothesis involving a logically empty sort.
+ The following artificial example demonstrates the derivation of \<^prop>\<open>False\<close> with a pending sort hypothesis involving a logically empty sort.
\<close>
class empty =
@@ -865,7 +860,7 @@
theorem (in empty) false: False
using bad by blast
-ML_val \<open>@{assert} (Thm.extra_shyps @{thm false} = [@{sort empty}])\<close>
+ML_val \<open>\<^assert> (Thm.extra_shyps @{thm false} = [\<^sort>\<open>empty\<close>])\<close>
text \<open>
Thanks to the inference kernel managing sort hypothesis according to their
@@ -951,7 +946,7 @@
@{index_ML Simplifier.norm_hhf: "Proof.context -> thm -> thm"} \\
\end{mldecls}
- \<^descr> @{ML Simplifier.norm_hhf}~\<open>ctxt thm\<close> normalizes the given theorem
+ \<^descr> \<^ML>\<open>Simplifier.norm_hhf\<close>~\<open>ctxt thm\<close> normalizes the given theorem
according to the canonical form specified above. This is occasionally
helpful to repair some low-level tools that do not handle Hereditary Harrop
Formulae properly.
@@ -1032,7 +1027,7 @@
\<^descr> \<open>rule\<^sub>1 RSN (i, rule\<^sub>2)\<close> resolves the conclusion of \<open>rule\<^sub>1\<close> with the
\<open>i\<close>-th premise of \<open>rule\<^sub>2\<close>, according to the @{inference resolution}
principle explained above. Unless there is precisely one resolvent it raises
- exception @{ML THM}.
+ exception \<^ML>\<open>THM\<close>.
This corresponds to the rule attribute @{attribute THEN} in Isar source
language.
@@ -1044,7 +1039,7 @@
with the \<open>i\<close>-th premise of \<open>rule\<^sub>2\<close>, accumulating multiple results in one
big list. Note that such strict enumerations of higher-order unifications
can be inefficient compared to the lazy variant seen in elementary tactics
- like @{ML resolve_tac}.
+ like \<^ML>\<open>resolve_tac\<close>.
\<^descr> \<open>rules\<^sub>1 RL rules\<^sub>2\<close> abbreviates \<open>rules\<^sub>1 RLN (1, rules\<^sub>2)\<close>.
@@ -1196,32 +1191,32 @@
@{index_ML Proof_Syntax.pretty_proof: "Proof.context -> proof -> Pretty.T"} \\
\end{mldecls}
- \<^descr> Type @{ML_type proof} represents proof terms; this is a datatype with
+ \<^descr> Type \<^ML_type>\<open>proof\<close> represents proof terms; this is a datatype with
constructors @{index_ML Abst}, @{index_ML AbsP}, @{index_ML_op "%"},
@{index_ML_op "%%"}, @{index_ML PBound}, @{index_ML MinProof}, @{index_ML
Hyp}, @{index_ML PAxm}, @{index_ML Oracle}, @{index_ML Promise}, @{index_ML
PThm} as explained above. %FIXME OfClass (!?)
- \<^descr> Type @{ML_type proof_body} represents the nested proof information of a
+ \<^descr> Type \<^ML_type>\<open>proof_body\<close> represents the nested proof information of a
named theorem, consisting of a digest of oracles and named theorem over some
proof term. The digest only covers the directly visible part of the proof:
in order to get the full information, the implicit graph of nested theorems
- needs to be traversed (e.g.\ using @{ML Proofterm.fold_body_thms}).
+ needs to be traversed (e.g.\ using \<^ML>\<open>Proofterm.fold_body_thms\<close>).
- \<^descr> @{ML Thm.proof_of}~\<open>thm\<close> and @{ML Thm.proof_body_of}~\<open>thm\<close> produce the
+ \<^descr> \<^ML>\<open>Thm.proof_of\<close>~\<open>thm\<close> and \<^ML>\<open>Thm.proof_body_of\<close>~\<open>thm\<close> produce the
proof term or proof body (with digest of oracles and theorems) from a given
theorem. Note that this involves a full join of internal futures that
fulfill pending proof promises, and thus disrupts the natural bottom-up
construction of proofs by introducing dynamic ad-hoc dependencies. Parallel
performance may suffer by inspecting proof terms at run-time.
- \<^descr> @{ML Proofterm.proofs} specifies the detail of proof recording within
- @{ML_type thm} values produced by the inference kernel: @{ML 0} records only
- the names of oracles, @{ML 1} records oracle names and propositions, @{ML 2}
+ \<^descr> \<^ML>\<open>Proofterm.proofs\<close> specifies the detail of proof recording within
+ \<^ML_type>\<open>thm\<close> values produced by the inference kernel: \<^ML>\<open>0\<close> records only
+ the names of oracles, \<^ML>\<open>1\<close> records oracle names and propositions, \<^ML>\<open>2\<close>
additionally records full proof terms. Officially named theorems that
contribute to a result are recorded in any case.
- \<^descr> @{ML Reconstruct.reconstruct_proof}~\<open>ctxt prop prf\<close> turns the implicit
+ \<^descr> \<^ML>\<open>Reconstruct.reconstruct_proof\<close>~\<open>ctxt prop prf\<close> turns the implicit
proof term \<open>prf\<close> into a full proof of the given proposition.
Reconstruction may fail if \<open>prf\<close> is not a proof of \<open>prop\<close>, or if it does not
@@ -1229,21 +1224,21 @@
for proofs that are constructed manually, but not for those produced
automatically by the inference kernel.
- \<^descr> @{ML Reconstruct.expand_proof}~\<open>ctxt [thm\<^sub>1, \<dots>, thm\<^sub>n] prf\<close> expands and
+ \<^descr> \<^ML>\<open>Reconstruct.expand_proof\<close>~\<open>ctxt [thm\<^sub>1, \<dots>, thm\<^sub>n] prf\<close> expands and
reconstructs the proofs of all specified theorems, with the given (full)
proof. Theorems that are not unique specified via their name may be
disambiguated by giving their proposition.
- \<^descr> @{ML Proof_Checker.thm_of_proof}~\<open>thy prf\<close> turns the given (full) proof
+ \<^descr> \<^ML>\<open>Proof_Checker.thm_of_proof\<close>~\<open>thy prf\<close> turns the given (full) proof
into a theorem, by replaying it using only primitive rules of the inference
kernel.
- \<^descr> @{ML Proof_Syntax.read_proof}~\<open>thy b\<^sub>1 b\<^sub>2 s\<close> reads in a proof term. The
+ \<^descr> \<^ML>\<open>Proof_Syntax.read_proof\<close>~\<open>thy b\<^sub>1 b\<^sub>2 s\<close> reads in a proof term. The
Boolean flags indicate the use of sort and type information. Usually, typing
information is left implicit and is inferred during proof reconstruction.
%FIXME eliminate flags!?
- \<^descr> @{ML Proof_Syntax.pretty_proof}~\<open>ctxt prf\<close> pretty-prints the given proof
+ \<^descr> \<^ML>\<open>Proof_Syntax.pretty_proof\<close>~\<open>ctxt prf\<close> pretty-prints the given proof
term.
\<close>
--- a/src/Doc/Implementation/ML.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/ML.thy Sat Jan 05 17:24:33 2019 +0100
@@ -115,34 +115,32 @@
\<^medskip>
\begin{tabular}{lll}
variant & example & ML categories \\\hline
- lower-case & @{ML_text foo_bar} & values, types, record fields \\
- capitalized & @{ML_text Foo_Bar} & datatype constructors, structures, functors \\
- upper-case & @{ML_text FOO_BAR} & special values, exception constructors, signatures \\
+ lower-case & \<^ML_text>\<open>foo_bar\<close> & values, types, record fields \\
+ capitalized & \<^ML_text>\<open>Foo_Bar\<close> & datatype constructors, structures, functors \\
+ upper-case & \<^ML_text>\<open>FOO_BAR\<close> & special values, exception constructors, signatures \\
\end{tabular}
\<^medskip>
For historical reasons, many capitalized names omit underscores, e.g.\
- old-style @{ML_text FooBar} instead of @{ML_text Foo_Bar}. Genuine
+ old-style \<^ML_text>\<open>FooBar\<close> instead of \<^ML_text>\<open>Foo_Bar\<close>. Genuine
mixed-case names are \<^emph>\<open>not\<close> used, because clear division of words is
essential for readability.\<^footnote>\<open>Camel-case was invented to workaround the lack
of underscore in some early non-ASCII character sets. Later it became
habitual in some language communities that are now strong in numbers.\<close>
A single (capital) character does not count as ``word'' in this respect:
- some Isabelle/ML names are suffixed by extra markers like this: @{ML_text
- foo_barT}.
-
- Name variants are produced by adding 1--3 primes, e.g.\ @{ML_text foo'},
- @{ML_text foo''}, or @{ML_text foo'''}, but not @{ML_text foo''''} or more.
- Decimal digits scale better to larger numbers, e.g.\ @{ML_text foo0},
- @{ML_text foo1}, @{ML_text foo42}.
+ some Isabelle/ML names are suffixed by extra markers like this: \<^ML_text>\<open>foo_barT\<close>.
+
+ Name variants are produced by adding 1--3 primes, e.g.\ \<^ML_text>\<open>foo'\<close>,
+ \<^ML_text>\<open>foo''\<close>, or \<^ML_text>\<open>foo'''\<close>, but not \<^ML_text>\<open>foo''''\<close> or more.
+ Decimal digits scale better to larger numbers, e.g.\ \<^ML_text>\<open>foo0\<close>,
+ \<^ML_text>\<open>foo1\<close>, \<^ML_text>\<open>foo42\<close>.
\<close>
paragraph \<open>Scopes.\<close>
text \<open>
Apart from very basic library modules, ML structures are not ``opened'', but
- names are referenced with explicit qualification, as in @{ML
- Syntax.string_of_term} for example. When devising names for structures and
+ names are referenced with explicit qualification, as in \<^ML>\<open>Syntax.string_of_term\<close> for example. When devising names for structures and
their components it is important to aim at eye-catching compositions of both
parts, because this is how they are seen in the sources and documentation.
For the same reasons, aliases of well-known library functions should be
@@ -150,8 +148,8 @@
Local names of function abstraction or case/let bindings are typically
shorter, sometimes using only rudiments of ``words'', while still avoiding
- cryptic shorthands. An auxiliary function called @{ML_text helper},
- @{ML_text aux}, or @{ML_text f} is considered bad style.
+ cryptic shorthands. An auxiliary function called \<^ML_text>\<open>helper\<close>,
+ \<^ML_text>\<open>aux\<close>, or \<^ML_text>\<open>f\<close> is considered bad style.
Example:
@@ -187,15 +185,13 @@
text \<open>
Here are some specific name forms that occur frequently in the sources.
- \<^item> A function that maps @{ML_text foo} to @{ML_text bar} is called @{ML_text
- foo_to_bar} or @{ML_text bar_of_foo} (never @{ML_text foo2bar}, nor
- @{ML_text bar_from_foo}, nor @{ML_text bar_for_foo}, nor @{ML_text
- bar4foo}).
-
- \<^item> The name component @{ML_text legacy} means that the operation is about to
+ \<^item> A function that maps \<^ML_text>\<open>foo\<close> to \<^ML_text>\<open>bar\<close> is called \<^ML_text>\<open>foo_to_bar\<close> or \<^ML_text>\<open>bar_of_foo\<close> (never \<^ML_text>\<open>foo2bar\<close>, nor
+ \<^ML_text>\<open>bar_from_foo\<close>, nor \<^ML_text>\<open>bar_for_foo\<close>, nor \<^ML_text>\<open>bar4foo\<close>).
+
+ \<^item> The name component \<^ML_text>\<open>legacy\<close> means that the operation is about to
be discontinued soon.
- \<^item> The name component @{ML_text global} means that this works with the
+ \<^item> The name component \<^ML_text>\<open>global\<close> means that this works with the
background theory instead of the regular local context
(\secref{sec:context}), sometimes for historical reasons, sometimes due a
genuine lack of locality of the concept involved, sometimes as a fall-back
@@ -207,58 +203,57 @@
(\secref{sec:context} and \chref{ch:local-theory}) have firm naming
conventions as follows:
- \<^item> theories are called @{ML_text thy}, rarely @{ML_text theory}
- (never @{ML_text thry})
-
- \<^item> proof contexts are called @{ML_text ctxt}, rarely @{ML_text
- context} (never @{ML_text ctx})
-
- \<^item> generic contexts are called @{ML_text context}
-
- \<^item> local theories are called @{ML_text lthy}, except for local
+ \<^item> theories are called \<^ML_text>\<open>thy\<close>, rarely \<^ML_text>\<open>theory\<close>
+ (never \<^ML_text>\<open>thry\<close>)
+
+ \<^item> proof contexts are called \<^ML_text>\<open>ctxt\<close>, rarely \<^ML_text>\<open>context\<close> (never \<^ML_text>\<open>ctx\<close>)
+
+ \<^item> generic contexts are called \<^ML_text>\<open>context\<close>
+
+ \<^item> local theories are called \<^ML_text>\<open>lthy\<close>, except for local
theories that are treated as proof context (which is a semantic
super-type)
Variations with primed or decimal numbers are always possible, as well as
- semantic prefixes like @{ML_text foo_thy} or @{ML_text bar_ctxt}, but the
+ semantic prefixes like \<^ML_text>\<open>foo_thy\<close> or \<^ML_text>\<open>bar_ctxt\<close>, but the
base conventions above need to be preserved. This allows to emphasize their
data flow via plain regular expressions in the text editor.
\<^item> The main logical entities (\secref{ch:logic}) have established naming
convention as follows:
- \<^item> sorts are called @{ML_text S}
-
- \<^item> types are called @{ML_text T}, @{ML_text U}, or @{ML_text ty} (never
- @{ML_text t})
-
- \<^item> terms are called @{ML_text t}, @{ML_text u}, or @{ML_text tm} (never
- @{ML_text trm})
-
- \<^item> certified types are called @{ML_text cT}, rarely @{ML_text T}, with
+ \<^item> sorts are called \<^ML_text>\<open>S\<close>
+
+ \<^item> types are called \<^ML_text>\<open>T\<close>, \<^ML_text>\<open>U\<close>, or \<^ML_text>\<open>ty\<close> (never
+ \<^ML_text>\<open>t\<close>)
+
+ \<^item> terms are called \<^ML_text>\<open>t\<close>, \<^ML_text>\<open>u\<close>, or \<^ML_text>\<open>tm\<close> (never
+ \<^ML_text>\<open>trm\<close>)
+
+ \<^item> certified types are called \<^ML_text>\<open>cT\<close>, rarely \<^ML_text>\<open>T\<close>, with
variants as for types
- \<^item> certified terms are called @{ML_text ct}, rarely @{ML_text t}, with
- variants as for terms (never @{ML_text ctrm})
-
- \<^item> theorems are called @{ML_text th}, or @{ML_text thm}
+ \<^item> certified terms are called \<^ML_text>\<open>ct\<close>, rarely \<^ML_text>\<open>t\<close>, with
+ variants as for terms (never \<^ML_text>\<open>ctrm\<close>)
+
+ \<^item> theorems are called \<^ML_text>\<open>th\<close>, or \<^ML_text>\<open>thm\<close>
Proper semantic names override these conventions completely. For example,
- the left-hand side of an equation (as a term) can be called @{ML_text lhs}
- (not @{ML_text lhs_tm}). Or a term that is known to be a variable can be
- called @{ML_text v} or @{ML_text x}.
+ the left-hand side of an equation (as a term) can be called \<^ML_text>\<open>lhs\<close>
+ (not \<^ML_text>\<open>lhs_tm\<close>). Or a term that is known to be a variable can be
+ called \<^ML_text>\<open>v\<close> or \<^ML_text>\<open>x\<close>.
\<^item> Tactics (\secref{sec:tactics}) are sufficiently important to have specific
naming conventions. The name of a basic tactic definition always has a
- @{ML_text "_tac"} suffix, the subgoal index (if applicable) is always called
- @{ML_text i}, and the goal state (if made explicit) is usually called
- @{ML_text st} instead of the somewhat misleading @{ML_text thm}. Any other
+ \<^ML_text>\<open>_tac\<close> suffix, the subgoal index (if applicable) is always called
+ \<^ML_text>\<open>i\<close>, and the goal state (if made explicit) is usually called
+ \<^ML_text>\<open>st\<close> instead of the somewhat misleading \<^ML_text>\<open>thm\<close>. Any other
arguments are given before the latter two, and the general context is given
first. Example:
@{verbatim [display] \<open> fun my_tac ctxt arg1 arg2 i st = ...\<close>}
- Note that the goal state @{ML_text st} above is rarely made explicit, if
+ Note that the goal state \<^ML_text>\<open>st\<close> above is rarely made explicit, if
tactic combinators (tacticals) are used as usual.
A tactic that requires a proof context needs to make that explicit as seen
@@ -314,16 +309,16 @@
c);
\<close>}
- Some special infixes (e.g.\ @{ML_text "|>"}) work better at the start of the
+ Some special infixes (e.g.\ \<^ML_text>\<open>|>\<close>) work better at the start of the
line, but punctuation is always at the end.
Function application follows the tradition of \<open>\<lambda>\<close>-calculus, not informal
- mathematics. For example: @{ML_text "f a b"} for a curried function, or
- @{ML_text "g (a, b)"} for a tupled function. Note that the space between
- @{ML_text g} and the pair @{ML_text "(a, b)"} follows the important
- principle of \<^emph>\<open>compositionality\<close>: the layout of @{ML_text "g p"} does not
- change when @{ML_text "p"} is refined to the concrete pair @{ML_text "(a,
- b)"}.
+ mathematics. For example: \<^ML_text>\<open>f a b\<close> for a curried function, or
+ \<^ML_text>\<open>g (a, b)\<close> for a tupled function. Note that the space between
+ \<^ML_text>\<open>g\<close> and the pair \<^ML_text>\<open>(a, b)\<close> follows the important
+ principle of \<^emph>\<open>compositionality\<close>: the layout of \<^ML_text>\<open>g p\<close> does not
+ change when \<^ML_text>\<open>p\<close> is refined to the concrete pair \<^ML_text>\<open>(a,
+ b)\<close>.
\<close>
paragraph \<open>Indentation\<close>
@@ -372,13 +367,13 @@
paragraph \<open>Complex expressions\<close>
text \<open>
- that consist of multi-clausal function definitions, @{ML_text handle},
- @{ML_text case}, @{ML_text let} (and combinations) require special
+ that consist of multi-clausal function definitions, \<^ML_text>\<open>handle\<close>,
+ \<^ML_text>\<open>case\<close>, \<^ML_text>\<open>let\<close> (and combinations) require special
attention. The syntax of Standard ML is quite ambitious and admits a lot of
variance that can distort the meaning of the text.
- Multiple clauses of @{ML_text fun}, @{ML_text fn}, @{ML_text handle},
- @{ML_text case} get extra indentation to indicate the nesting clearly.
+ Multiple clauses of \<^ML_text>\<open>fun\<close>, \<^ML_text>\<open>fn\<close>, \<^ML_text>\<open>handle\<close>,
+ \<^ML_text>\<open>case\<close> get extra indentation to indicate the nesting clearly.
Example:
@{verbatim [display]
@@ -397,7 +392,7 @@
| foo p2 =
expr2\<close>}
- Body expressions consisting of @{ML_text case} or @{ML_text let} require
+ Body expressions consisting of \<^ML_text>\<open>case\<close> or \<^ML_text>\<open>let\<close> require
care to maintain compositionality, to prevent loss of logical indentation
where it is especially important to see the structure of the text. Example:
@@ -428,14 +423,14 @@
...
end\<close>}
- Extra parentheses around @{ML_text case} expressions are optional, but help
+ Extra parentheses around \<^ML_text>\<open>case\<close> expressions are optional, but help
to analyse the nesting based on character matching in the text editor.
\<^medskip>
There are two main exceptions to the overall principle of compositionality
in the layout of complex expressions.
- \<^enum> @{ML_text "if"} expressions are iterated as if ML had multi-branch
+ \<^enum> \<^ML_text>\<open>if\<close> expressions are iterated as if ML had multi-branch
conditionals, e.g.
@{verbatim [display]
@@ -445,7 +440,7 @@
else if b2 then e2
else e3\<close>}
- \<^enum> @{ML_text fn} abstractions are often layed-out as if they would lack any
+ \<^enum> \<^ML_text>\<open>fn\<close> abstractions are often layed-out as if they would lack any
structure by themselves. This traditional form is motivated by the
possibility to shift function arguments back and forth wrt.\ additional
combinators. Example:
@@ -456,12 +451,12 @@
fun foo x y = fold (fn z =>
expr)\<close>}
- Here the visual appearance is that of three arguments @{ML_text x},
- @{ML_text y}, @{ML_text z} in a row.
+ Here the visual appearance is that of three arguments \<^ML_text>\<open>x\<close>,
+ \<^ML_text>\<open>y\<close>, \<^ML_text>\<open>z\<close> in a row.
Such weakly structured layout should be use with great care. Here are some
- counter-examples involving @{ML_text let} expressions:
+ counter-examples involving \<^ML_text>\<open>let\<close> expressions:
@{verbatim [display]
\<open> (* WRONG *)
@@ -537,7 +532,7 @@
more commands that refer to ML source, such as @{command_ref setup} or
@{command_ref declaration}. Even more fine-grained embedding of ML into Isar
is encountered in the proof method @{method_ref tactic}, which refines the
- pending goal state via a given expression of type @{ML_type tactic}.
+ pending goal state via a given expression of type \<^ML_type>\<open>tactic\<close>.
\<close>
text %mlex \<open>
@@ -552,8 +547,7 @@
\<close>
text \<open>
- Here the ML environment is already managed by Isabelle, i.e.\ the @{ML
- factorial} function is not yet accessible in the preceding paragraph, nor in
+ Here the ML environment is already managed by Isabelle, i.e.\ the \<^ML>\<open>factorial\<close> function is not yet accessible in the preceding paragraph, nor in
a different theory that is independent from the current one in the import
hierarchy.
@@ -589,8 +583,7 @@
Two further ML commands are useful in certain situations: @{command_ref
ML_val} and @{command_ref ML_command} are \<^emph>\<open>diagnostic\<close> in the sense that
there is no effect on the underlying environment, and can thus be used
- anywhere. The examples below produce long strings of digits by invoking @{ML
- factorial}: @{command ML_val} takes care of printing the ML toplevel result,
+ anywhere. The examples below produce long strings of digits by invoking \<^ML>\<open>factorial\<close>: @{command ML_val} takes care of printing the ML toplevel result,
but @{command ML_command} is silent so we produce an explicit output
message.
\<close>
@@ -624,19 +617,19 @@
@{index_ML ML_Thms.bind_thm: "string * thm -> unit"} \\
\end{mldecls}
- \<^descr> @{ML "Context.the_generic_context ()"} refers to the theory context of
+ \<^descr> \<^ML>\<open>Context.the_generic_context ()\<close> refers to the theory context of
the ML toplevel --- at compile time. ML code needs to take care to refer to
- @{ML "Context.the_generic_context ()"} correctly. Recall that evaluation
+ \<^ML>\<open>Context.the_generic_context ()\<close> correctly. Recall that evaluation
of a function body is delayed until actual run-time.
- \<^descr> @{ML "Context.>>"}~\<open>f\<close> applies context transformation \<open>f\<close> to the implicit
+ \<^descr> \<^ML>\<open>Context.>>\<close>~\<open>f\<close> applies context transformation \<open>f\<close> to the implicit
context of the ML toplevel.
- \<^descr> @{ML ML_Thms.bind_thms}~\<open>(name, thms)\<close> stores a list of theorems produced
+ \<^descr> \<^ML>\<open>ML_Thms.bind_thms\<close>~\<open>(name, thms)\<close> stores a list of theorems produced
in ML both in the (global) theory context and the ML toplevel, associating
it with the provided name.
- \<^descr> @{ML ML_Thms.bind_thm} is similar to @{ML ML_Thms.bind_thms} but refers to
+ \<^descr> \<^ML>\<open>ML_Thms.bind_thm\<close> is similar to \<^ML>\<open>ML_Thms.bind_thms\<close> but refers to
a singleton fact.
It is important to note that the above functions are really restricted to
@@ -654,9 +647,9 @@
\<^emph>\<open>ML antiquotation\<close>. The standard token language of ML is augmented by
special syntactic entities of the following form:
- @{rail \<open>
+ \<^rail>\<open>
@{syntax_def antiquote}: '@{' name args '}'
- \<close>}
+ \<close>
Here @{syntax name} and @{syntax args} are outer syntax categories, as
defined in @{cite "isabelle-isar-ref"}.
@@ -692,11 +685,11 @@
@{ML_antiquotation_def "print"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation make_string}
;
@@{ML_antiquotation print} embedded?
- \<close>}
+ \<close>
\<^descr> \<open>@{make_string}\<close> inlines a function to print arbitrary values similar to
the ML toplevel. The result is compiler dependent and may fall back on "?"
@@ -705,7 +698,7 @@
\<^descr> \<open>@{print f}\<close> uses the ML function \<open>f: string -> unit\<close> to output the result
of \<open>@{make_string}\<close> above, together with the source position of the
- antiquotation. The default output function is @{ML writeln}.
+ antiquotation. The default output function is \<^ML>\<open>writeln\<close>.
\<close>
text %mlex \<open>
@@ -717,10 +710,10 @@
val x = 42;
val y = true;
- writeln (@{make_string} {x = x, y = y});
-
- @{print} {x = x, y = y};
- @{print tracing} {x = x, y = y};
+ writeln (\<^make_string> {x = x, y = y});
+
+ \<^print> {x = x, y = y};
+ \<^print>\<open>tracing\<close> {x = x, y = y};
\<close>
@@ -865,23 +858,23 @@
@{index_ML fold_map: "('a -> 'b -> 'c * 'b) -> 'a list -> 'b -> 'c list * 'b"} \\
\end{mldecls}
- \<^descr> @{ML fold}~\<open>f\<close> lifts the parametrized update function \<open>f\<close> to a list of
+ \<^descr> \<^ML>\<open>fold\<close>~\<open>f\<close> lifts the parametrized update function \<open>f\<close> to a list of
parameters.
- \<^descr> @{ML fold_rev}~\<open>f\<close> is similar to @{ML fold}~\<open>f\<close>, but works inside-out, as
+ \<^descr> \<^ML>\<open>fold_rev\<close>~\<open>f\<close> is similar to \<^ML>\<open>fold\<close>~\<open>f\<close>, but works inside-out, as
if the list would be reversed.
- \<^descr> @{ML fold_map}~\<open>f\<close> lifts the parametrized update function \<open>f\<close> (with
+ \<^descr> \<^ML>\<open>fold_map\<close>~\<open>f\<close> lifts the parametrized update function \<open>f\<close> (with
side-result) to a list of parameters and cumulative side-results.
\begin{warn}
The literature on functional programming provides a confusing multitude of
combinators called \<open>foldl\<close>, \<open>foldr\<close> etc. SML97 provides its own variations
- as @{ML List.foldl} and @{ML List.foldr}, while the classic Isabelle library
- also has the historic @{ML Library.foldl} and @{ML Library.foldr}. To avoid
+ as \<^ML>\<open>List.foldl\<close> and \<^ML>\<open>List.foldr\<close>, while the classic Isabelle library
+ also has the historic \<^ML>\<open>Library.foldl\<close> and \<^ML>\<open>Library.foldr\<close>. To avoid
unnecessary complication, all these historical versions should be ignored,
- and the canonical @{ML fold} (or @{ML fold_rev}) used exclusively.
+ and the canonical \<^ML>\<open>fold\<close> (or \<^ML>\<open>fold_rev\<close>) used exclusively.
\end{warn}
\<close>
@@ -897,12 +890,11 @@
|> fold (Buffer.add o string_of_int) (0 upto 9)
|> Buffer.content;
- @{assert} (s = "digits: 0123456789");
+ \<^assert> (s = "digits: 0123456789");
\<close>
text \<open>
- Note how @{ML "fold (Buffer.add o string_of_int)"} above saves an extra @{ML
- "map"} over the given list. This kind of peephole optimization reduces both
+ Note how \<^ML>\<open>fold (Buffer.add o string_of_int)\<close> above saves an extra \<^ML>\<open>map\<close> over the given list. This kind of peephole optimization reduces both
the code size and the tree structures in memory (``deforestation''), but it
requires some practice to read and write fluently.
@@ -931,28 +923,26 @@
\<close>
text \<open>
- The slowness of @{ML slow_content} is due to the @{ML implode} of the
+ The slowness of \<^ML>\<open>slow_content\<close> is due to the \<^ML>\<open>implode\<close> of the
recursive results, because it copies previously produced strings again and
again.
- The incremental @{ML add_content} avoids this by operating on a buffer that
- is passed through in a linear fashion. Using @{ML_text "#>"} and contraction
+ The incremental \<^ML>\<open>add_content\<close> avoids this by operating on a buffer that
+ is passed through in a linear fashion. Using \<^ML_text>\<open>#>\<close> and contraction
over the actual buffer argument saves some additional boiler-plate. Of
- course, the two @{ML "Buffer.add"} invocations with concatenated strings
+ course, the two \<^ML>\<open>Buffer.add\<close> invocations with concatenated strings
could have been split into smaller parts, but this would have obfuscated the
source without making a big difference in performance. Here we have done
some peephole-optimization for the sake of readability.
- Another benefit of @{ML add_content} is its ``open'' form as a function on
+ Another benefit of \<^ML>\<open>add_content\<close> is its ``open'' form as a function on
buffers that can be continued in further linear transformations, folding
- etc. Thus it is more compositional than the naive @{ML slow_content}. As
+ etc. Thus it is more compositional than the naive \<^ML>\<open>slow_content\<close>. As
realistic example, compare the old-style
- @{ML "Term.maxidx_of_term: term -> int"} with the newer @{ML
- "Term.maxidx_term: term -> int -> int"} in Isabelle/Pure.
-
- Note that @{ML fast_content} above is only defined as example. In many
- practical situations, it is customary to provide the incremental @{ML
- add_content} only and leave the initialization and termination to the
+ \<^ML>\<open>Term.maxidx_of_term: term -> int\<close> with the newer \<^ML>\<open>Term.maxidx_term: term -> int -> int\<close> in Isabelle/Pure.
+
+ Note that \<^ML>\<open>fast_content\<close> above is only defined as example. In many
+ practical situations, it is customary to provide the incremental \<^ML>\<open>add_content\<close> only and leave the initialization and termination to the
concrete application to the user.
\<close>
@@ -985,10 +975,10 @@
@{index_ML error: "string -> 'a"} % FIXME Output.error_message (!?) \\
\end{mldecls}
- \<^descr> @{ML writeln}~\<open>text\<close> outputs \<open>text\<close> as regular message. This is the
+ \<^descr> \<^ML>\<open>writeln\<close>~\<open>text\<close> outputs \<open>text\<close> as regular message. This is the
primary message output operation of Isabelle and should be used by default.
- \<^descr> @{ML tracing}~\<open>text\<close> outputs \<open>text\<close> as special tracing message, indicating
+ \<^descr> \<^ML>\<open>tracing\<close>~\<open>text\<close> outputs \<open>text\<close> as special tracing message, indicating
potential high-volume output to the front-end (hundreds or thousands of
messages issued by a single command). The idea is to allow the
user-interface to downgrade the quality of message display to achieve higher
@@ -998,27 +988,26 @@
e.g.\ switch to a different output window. So this channel should not be
used for regular output.
- \<^descr> @{ML warning}~\<open>text\<close> outputs \<open>text\<close> as warning, which typically means some
+ \<^descr> \<^ML>\<open>warning\<close>~\<open>text\<close> outputs \<open>text\<close> as warning, which typically means some
extra emphasis on the front-end side (color highlighting, icons, etc.).
- \<^descr> @{ML error}~\<open>text\<close> raises exception @{ML ERROR}~\<open>text\<close> and thus lets the
+ \<^descr> \<^ML>\<open>error\<close>~\<open>text\<close> raises exception \<^ML>\<open>ERROR\<close>~\<open>text\<close> and thus lets the
Isar toplevel print \<open>text\<close> on the error channel, which typically means some
extra emphasis on the front-end side (color highlighting, icons, etc.).
This assumes that the exception is not handled before the command
- terminates. Handling exception @{ML ERROR}~\<open>text\<close> is a perfectly legal
+ terminates. Handling exception \<^ML>\<open>ERROR\<close>~\<open>text\<close> is a perfectly legal
alternative: it means that the error is absorbed without any message output.
\begin{warn}
- The actual error channel is accessed via @{ML Output.error_message}, but
+ The actual error channel is accessed via \<^ML>\<open>Output.error_message\<close>, but
this is normally not used directly in user code.
\end{warn}
\begin{warn}
Regular Isabelle/ML code should output messages exclusively by the official
- channels. Using raw I/O on \<^emph>\<open>stdout\<close> or \<^emph>\<open>stderr\<close> instead (e.g.\ via @{ML
- TextIO.output}) is apt to cause problems in the presence of parallel and
+ channels. Using raw I/O on \<^emph>\<open>stdout\<close> or \<^emph>\<open>stderr\<close> instead (e.g.\ via \<^ML>\<open>TextIO.output\<close>) is apt to cause problems in the presence of parallel and
asynchronous processing of Isabelle theories. Such raw output might be
displayed by the front-end in some system console log, with a low chance
that the user will ever see it. Moreover, as a genuine side-effect on global
@@ -1029,7 +1018,7 @@
\begin{warn}
The message channels should be used in a message-oriented manner. This means
that multi-line output that logically belongs together is issued by a single
- invocation of @{ML writeln} etc.\ with the functional concatenation of all
+ invocation of \<^ML>\<open>writeln\<close> etc.\ with the functional concatenation of all
message constituents.
\end{warn}
\<close>
@@ -1081,11 +1070,11 @@
text \<open>
These are meant to provide informative feedback about malformed input etc.
- The \<^emph>\<open>error\<close> function raises the corresponding @{ML ERROR} exception, with a
- plain text message as argument. @{ML ERROR} exceptions can be handled
+ The \<^emph>\<open>error\<close> function raises the corresponding \<^ML>\<open>ERROR\<close> exception, with a
+ plain text message as argument. \<^ML>\<open>ERROR\<close> exceptions can be handled
internally, in order to be ignored, turned into other exceptions, or
cascaded by appending messages. If the corresponding Isabelle/Isar command
- terminates with an @{ML ERROR} exception state, the system will print the
+ terminates with an \<^ML>\<open>ERROR\<close> exception state, the system will print the
result on the error channel (see \secref{sec:message-channels}).
It is considered bad style to refer to internal function names or values in
@@ -1109,7 +1098,7 @@
purpose is to determine quickly what has happened where. Traditionally, the
(short) exception message would include the name of an ML function, although
this is no longer necessary, because the ML runtime system attaches detailed
- source position stemming from the corresponding @{ML_text raise} keyword.
+ source position stemming from the corresponding \<^ML_text>\<open>raise\<close> keyword.
\<^medskip>
User modules can always introduce their own custom exceptions locally, e.g.\
@@ -1123,7 +1112,7 @@
text \<open>
These indicate arbitrary system events: both the ML runtime system and the
Isabelle/ML infrastructure signal various exceptional situations by raising
- the special @{ML Exn.Interrupt} exception in user code.
+ the special \<^ML>\<open>Exn.Interrupt\<close> exception in user code.
This is the one and only way that physical events can intrude an Isabelle/ML
program. Such an interrupt can mean out-of-memory, stack overflow, timeout,
@@ -1160,32 +1149,32 @@
@{index_ML Runtime.exn_trace: "(unit -> 'a) -> 'a"} \\
\end{mldecls}
- \<^descr> @{ML try}~\<open>f x\<close> makes the partiality of evaluating \<open>f x\<close> explicit via the
+ \<^descr> \<^ML>\<open>try\<close>~\<open>f x\<close> makes the partiality of evaluating \<open>f x\<close> explicit via the
option datatype. Interrupts are \<^emph>\<open>not\<close> handled here, i.e.\ this form serves
- as safe replacement for the \<^emph>\<open>unsafe\<close> version @{ML_text "(SOME"}~\<open>f
- x\<close>~@{ML_text "handle _ => NONE)"} that is occasionally seen in books about
+ as safe replacement for the \<^emph>\<open>unsafe\<close> version \<^ML_text>\<open>(SOME\<close>~\<open>f
+ x\<close>~\<^ML_text>\<open>handle _ => NONE)\<close> that is occasionally seen in books about
SML97, but not in Isabelle/ML.
- \<^descr> @{ML can} is similar to @{ML try} with more abstract result.
-
- \<^descr> @{ML ERROR}~\<open>msg\<close> represents user errors; this exception is normally
- raised indirectly via the @{ML error} function (see
+ \<^descr> \<^ML>\<open>can\<close> is similar to \<^ML>\<open>try\<close> with more abstract result.
+
+ \<^descr> \<^ML>\<open>ERROR\<close>~\<open>msg\<close> represents user errors; this exception is normally
+ raised indirectly via the \<^ML>\<open>error\<close> function (see
\secref{sec:message-channels}).
- \<^descr> @{ML Fail}~\<open>msg\<close> represents general program failures.
-
- \<^descr> @{ML Exn.is_interrupt} identifies interrupts robustly, without mentioning
+ \<^descr> \<^ML>\<open>Fail\<close>~\<open>msg\<close> represents general program failures.
+
+ \<^descr> \<^ML>\<open>Exn.is_interrupt\<close> identifies interrupts robustly, without mentioning
concrete exception constructors in user code. Handled interrupts need to be
re-raised promptly!
- \<^descr> @{ML Exn.reraise}~\<open>exn\<close> raises exception \<open>exn\<close> while preserving its implicit
+ \<^descr> \<^ML>\<open>Exn.reraise\<close>~\<open>exn\<close> raises exception \<open>exn\<close> while preserving its implicit
position information (if possible, depending on the ML platform).
- \<^descr> @{ML Runtime.exn_trace}~@{ML_text "(fn () =>"}~\<open>e\<close>@{ML_text ")"} evaluates
+ \<^descr> \<^ML>\<open>Runtime.exn_trace\<close>~\<^ML_text>\<open>(fn () =>\<close>~\<open>e\<close>\<^ML_text>\<open>)\<close> evaluates
expression \<open>e\<close> while printing a full trace of its stack of nested exceptions
(if possible, depending on the ML platform).
- Inserting @{ML Runtime.exn_trace} into ML code temporarily is useful for
+ Inserting \<^ML>\<open>Runtime.exn_trace\<close> into ML code temporarily is useful for
debugging, but not suitable for production code.
\<close>
@@ -1195,16 +1184,15 @@
@{ML_antiquotation_def "undefined"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- \<^descr> \<open>@{assert}\<close> inlines a function @{ML_type "bool -> unit"} that raises @{ML
- Fail} if the argument is @{ML false}. Due to inlining the source position of
+ \<^descr> \<open>@{assert}\<close> inlines a function \<^ML_type>\<open>bool -> unit\<close> that raises \<^ML>\<open>Fail\<close> if the argument is \<^ML>\<open>false\<close>. Due to inlining the source position of
failed assertions is included in the error output.
- \<^descr> \<open>@{undefined}\<close> inlines @{verbatim raise}~@{ML Match}, i.e.\ the ML program
+ \<^descr> \<open>@{undefined}\<close> inlines \<^verbatim>\<open>raise\<close>~\<^ML>\<open>Match\<close>, i.e.\ the ML program
behaves as in some function application of an undefined case.
\<close>
text %mlex \<open>
- The ML function @{ML undefined} is defined in \<^file>\<open>~~/src/Pure/library.ML\<close>
+ The ML function \<^ML>\<open>undefined\<close> is defined in \<^file>\<open>~~/src/Pure/library.ML\<close>
as follows:
\<close>
@@ -1216,7 +1204,7 @@
instead:
\<close>
-ML \<open>fun undefined _ = @{undefined}\<close>
+ML \<open>fun undefined _ = \<^undefined>\<close>
text \<open>
\<^medskip>
@@ -1284,33 +1272,30 @@
@{index_ML Symbol.decode: "Symbol.symbol -> Symbol.sym"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "Symbol.symbol"} represents individual Isabelle symbols.
-
- \<^descr> @{ML "Symbol.explode"}~\<open>str\<close> produces a symbol list from the packed form.
- This function supersedes @{ML "String.explode"} for virtually all purposes
+ \<^descr> Type \<^ML_type>\<open>Symbol.symbol\<close> represents individual Isabelle symbols.
+
+ \<^descr> \<^ML>\<open>Symbol.explode\<close>~\<open>str\<close> produces a symbol list from the packed form.
+ This function supersedes \<^ML>\<open>String.explode\<close> for virtually all purposes
of manipulating text in Isabelle!\<^footnote>\<open>The runtime overhead for exploded strings
is mainly that of the list structure: individual symbols that happen to be a
singleton string do not require extra memory in Poly/ML.\<close>
- \<^descr> @{ML "Symbol.is_letter"}, @{ML "Symbol.is_digit"}, @{ML
- "Symbol.is_quasi"}, @{ML "Symbol.is_blank"} classify standard symbols
+ \<^descr> \<^ML>\<open>Symbol.is_letter\<close>, \<^ML>\<open>Symbol.is_digit\<close>, \<^ML>\<open>Symbol.is_quasi\<close>, \<^ML>\<open>Symbol.is_blank\<close> classify standard symbols
according to fixed syntactic conventions of Isabelle, cf.\ @{cite
"isabelle-isar-ref"}.
- \<^descr> Type @{ML_type "Symbol.sym"} is a concrete datatype that represents the
- different kinds of symbols explicitly, with constructors @{ML
- "Symbol.Char"}, @{ML "Symbol.UTF8"}, @{ML "Symbol.Sym"}, @{ML
- "Symbol.Control"}, @{ML "Symbol.Malformed"}.
-
- \<^descr> @{ML "Symbol.decode"} converts the string representation of a symbol into
+ \<^descr> Type \<^ML_type>\<open>Symbol.sym\<close> is a concrete datatype that represents the
+ different kinds of symbols explicitly, with constructors \<^ML>\<open>Symbol.Char\<close>, \<^ML>\<open>Symbol.UTF8\<close>, \<^ML>\<open>Symbol.Sym\<close>, \<^ML>\<open>Symbol.Control\<close>, \<^ML>\<open>Symbol.Malformed\<close>.
+
+ \<^descr> \<^ML>\<open>Symbol.decode\<close> converts the string representation of a symbol into
the datatype version.
\<close>
paragraph \<open>Historical note.\<close>
text \<open>
- In the original SML90 standard the primitive ML type @{ML_type char} did not
- exists, and @{ML_text "explode: string -> string list"} produced a list of
- singleton strings like @{ML "raw_explode: string -> string list"} in
+ In the original SML90 standard the primitive ML type \<^ML_type>\<open>char\<close> did not
+ exists, and \<^ML_text>\<open>explode: string -> string list\<close> produced a list of
+ singleton strings like \<^ML>\<open>raw_explode: string -> string list\<close> in
Isabelle/ML today. When SML97 came out, Isabelle did not adopt its somewhat
anachronistic 8-bit or 16-bit characters, but the idea of exploding a string
into a list of small strings was extended to ``symbols'' as explained above.
@@ -1327,8 +1312,7 @@
of its operations simply do not fit with important Isabelle/ML conventions
(like ``canonical argument order'', see
\secref{sec:canonical-argument-order}), others cause problems with the
- parallel evaluation model of Isabelle/ML (such as @{ML TextIO.print} or @{ML
- OS.Process.system}).
+ parallel evaluation model of Isabelle/ML (such as \<^ML>\<open>TextIO.print\<close> or \<^ML>\<open>OS.Process.system\<close>).
Subsequently we give a brief overview of important operations on basic ML
data types.
@@ -1342,7 +1326,7 @@
@{index_ML_type char} \\
\end{mldecls}
- \<^descr> Type @{ML_type char} is \<^emph>\<open>not\<close> used. The smallest textual unit in Isabelle
+ \<^descr> Type \<^ML_type>\<open>char\<close> is \<^emph>\<open>not\<close> used. The smallest textual unit in Isabelle
is represented as a ``symbol'' (see \secref{sec:symbols}).
\<close>
@@ -1354,7 +1338,7 @@
@{index_ML_type string} \\
\end{mldecls}
- \<^descr> Type @{ML_type string} represents immutable vectors of 8-bit characters.
+ \<^descr> Type \<^ML_type>\<open>string\<close> represents immutable vectors of 8-bit characters.
There are operations in SML to convert back and forth to actual byte
vectors, which are seldom used.
@@ -1362,11 +1346,10 @@
Isabelle-specific purposes with the following implicit substructures packed
into the string content:
- \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}), with @{ML
- Symbol.explode} as key operation;
+ \<^enum> sequence of Isabelle symbols (see also \secref{sec:symbols}), with \<^ML>\<open>Symbol.explode\<close> as key operation;
\<^enum> XML tree structure via YXML (see also @{cite "isabelle-system"}), with
- @{ML YXML.parse_body} as key operation.
+ \<^ML>\<open>YXML.parse_body\<close> as key operation.
Note that Isabelle/ML string literals may refer Isabelle symbols like
``\<^verbatim>\<open>\<alpha>\<close>'' natively, \<^emph>\<open>without\<close> escaping the backslash. This is a consequence
@@ -1382,8 +1365,8 @@
ML_val \<open>
val s = "\<A>";
- @{assert} (length (Symbol.explode s) = 1);
- @{assert} (size s = 4);
+ \<^assert> (length (Symbol.explode s) = 1);
+ \<^assert> (size s = 4);
\<close>
text \<open>
@@ -1403,13 +1386,13 @@
@{index_ML_type int} \\
\end{mldecls}
- \<^descr> Type @{ML_type int} represents regular mathematical integers, which are
+ \<^descr> Type \<^ML_type>\<open>int\<close> represents regular mathematical integers, which are
\<^emph>\<open>unbounded\<close>. Overflow is treated properly, but should never happen in
practice.\<^footnote>\<open>The size limit for integer bit patterns in memory is 64\,MB for
32-bit Poly/ML, and much higher for 64-bit systems.\<close>
- Structure @{ML_structure IntInf} of SML97 is obsolete and superseded by
- @{ML_structure Int}. Structure @{ML_structure Integer} in
+ Structure \<^ML_structure>\<open>IntInf\<close> of SML97 is obsolete and superseded by
+ \<^ML_structure>\<open>Int\<close>. Structure \<^ML_structure>\<open>Integer\<close> in
\<^file>\<open>~~/src/Pure/General/integer.ML\<close> provides some additional operations.
\<close>
@@ -1421,7 +1404,7 @@
@{index_ML_type Rat.rat} \\
\end{mldecls}
- \<^descr> Type @{ML_type Rat.rat} represents rational numbers, based on the
+ \<^descr> Type \<^ML_type>\<open>Rat.rat\<close> represents rational numbers, based on the
unbounded integers of Poly/ML.
Literal rationals may be written with special antiquotation syntax
@@ -1441,11 +1424,11 @@
@{index_ML seconds: "real -> Time.time"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Time.time} represents time abstractly according to the
+ \<^descr> Type \<^ML_type>\<open>Time.time\<close> represents time abstractly according to the
SML97 basis library definition. This is adequate for internal ML operations,
but awkward in concrete time specifications.
- \<^descr> @{ML seconds}~\<open>s\<close> turns the concrete scalar \<open>s\<close> (measured in seconds) into
+ \<^descr> \<^ML>\<open>seconds\<close>~\<open>s\<close> turns the concrete scalar \<open>s\<close> (measured in seconds) into
an abstract time value. Floating point numbers are easy to use as
configuration options in the context (see \secref{sec:config-options}) or
system options that are maintained externally.
@@ -1467,8 +1450,8 @@
\<close>
text \<open>
- Apart from @{ML Option.map} most other operations defined in structure
- @{ML_structure Option} are alien to Isabelle/ML and never used. The
+ Apart from \<^ML>\<open>Option.map\<close> most other operations defined in structure
+ \<^ML_structure>\<open>Option\<close> are alien to Isabelle/ML and never used. The
operations shown above are defined in \<^file>\<open>~~/src/Pure/General/basics.ML\<close>.
\<close>
@@ -1490,29 +1473,29 @@
@{index_ML update: "('a * 'a -> bool) -> 'a -> 'a list -> 'a list"} \\
\end{mldecls}
- \<^descr> @{ML cons}~\<open>x xs\<close> evaluates to \<open>x :: xs\<close>.
+ \<^descr> \<^ML>\<open>cons\<close>~\<open>x xs\<close> evaluates to \<open>x :: xs\<close>.
Tupled infix operators are a historical accident in Standard ML. The curried
- @{ML cons} amends this, but it should be only used when partial application
+ \<^ML>\<open>cons\<close> amends this, but it should be only used when partial application
is required.
- \<^descr> @{ML member}, @{ML insert}, @{ML remove}, @{ML update} treat lists as a
+ \<^descr> \<^ML>\<open>member\<close>, \<^ML>\<open>insert\<close>, \<^ML>\<open>remove\<close>, \<^ML>\<open>update\<close> treat lists as a
set-like container that maintains the order of elements. See
\<^file>\<open>~~/src/Pure/library.ML\<close> for the full specifications (written in ML).
- There are some further derived operations like @{ML union} or @{ML inter}.
-
- Note that @{ML insert} is conservative about elements that are already a
- @{ML member} of the list, while @{ML update} ensures that the latest entry
+ There are some further derived operations like \<^ML>\<open>union\<close> or \<^ML>\<open>inter\<close>.
+
+ Note that \<^ML>\<open>insert\<close> is conservative about elements that are already a
+ \<^ML>\<open>member\<close> of the list, while \<^ML>\<open>update\<close> ensures that the latest entry
is always put in front. The latter discipline is often more appropriate in
declarations of context data (\secref{sec:context-data}) that are issued by
the user in Isar source: later declarations take precedence over earlier
ones. \<close>
text %mlex \<open>
- Using canonical @{ML fold} together with @{ML cons} (or similar standard
+ Using canonical \<^ML>\<open>fold\<close> together with \<^ML>\<open>cons\<close> (or similar standard
operations) alternates the orientation of data. The is quite natural and
- should not be altered forcible by inserting extra applications of @{ML rev}.
- The alternative @{ML fold_rev} can be used in the few situations, where
+ should not be altered forcible by inserting extra applications of \<^ML>\<open>rev\<close>.
+ The alternative \<^ML>\<open>fold_rev\<close> can be used in the few situations, where
alternation should be prevented.
\<close>
@@ -1520,10 +1503,10 @@
val items = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
val list1 = fold cons items [];
- @{assert} (list1 = rev items);
+ \<^assert> (list1 = rev items);
val list2 = fold_rev cons items [];
- @{assert} (list2 = items);
+ \<^assert> (list2 = items);
\<close>
text \<open>
@@ -1537,11 +1520,10 @@
text \<open>
Here the first list is treated conservatively: only the new elements from
- the second list are inserted. The inside-out order of insertion via @{ML
- fold_rev} attempts to preserve the order of elements in the result.
+ the second list are inserted. The inside-out order of insertion via \<^ML>\<open>fold_rev\<close> attempts to preserve the order of elements in the result.
This way of merging lists is typical for context data
- (\secref{sec:context-data}). See also @{ML merge} as defined in
+ (\secref{sec:context-data}). See also \<^ML>\<open>merge\<close> as defined in
\<^file>\<open>~~/src/Pure/library.ML\<close>.
\<close>
@@ -1562,7 +1544,7 @@
@{index_ML AList.update: "('a * 'a -> bool) -> 'a * 'b -> ('a * 'b) list -> ('a * 'b) list"} \\
\end{mldecls}
- \<^descr> @{ML AList.lookup}, @{ML AList.defined}, @{ML AList.update} implement the
+ \<^descr> \<^ML>\<open>AList.lookup\<close>, \<^ML>\<open>AList.defined\<close>, \<^ML>\<open>AList.update\<close> implement the
main ``framework operations'' for mappings in Isabelle/ML, following
standard conventions for their names and types.
@@ -1570,7 +1552,7 @@
via an explicit option element. There is no choice to raise an exception,
without changing the name to something like \<open>the_element\<close> or \<open>get\<close>.
- The \<open>defined\<close> operation is essentially a contraction of @{ML is_some} and
+ The \<open>defined\<close> operation is essentially a contraction of \<^ML>\<open>is_some\<close> and
\<^verbatim>\<open>lookup\<close>, but this is sufficiently frequent to justify its independent
existence. This also gives the implementation some opportunity for peep-hole
optimization.
@@ -1600,15 +1582,15 @@
are notorious for causing problems. In a highly parallel system, both
correctness \<^emph>\<open>and\<close> performance are easily degraded when using mutable data.
- The unwieldy name of @{ML Unsynchronized.ref} for the constructor for
+ The unwieldy name of \<^ML>\<open>Unsynchronized.ref\<close> for the constructor for
references in Isabelle/ML emphasizes the inconveniences caused by
- mutability. Existing operations @{ML "!"} and @{ML_op ":="} are unchanged,
+ mutability. Existing operations \<^ML>\<open>!\<close> and \<^ML_op>\<open>:=\<close> are unchanged,
but should be used with special precautions, say in a strictly local
situation that is guaranteed to be restricted to sequential evaluation ---
now and in the future.
\begin{warn}
- Never @{ML_text "open Unsynchronized"}, not even in a local scope!
+ Never \<^ML_text>\<open>open Unsynchronized\<close>, not even in a local scope!
Pretending that mutable state is no problem is a very bad idea.
\end{warn}
\<close>
@@ -1746,10 +1728,10 @@
@{index_ML serial_string: "unit -> string"} \\
\end{mldecls}
- \<^descr> @{ML File.tmp_path}~\<open>path\<close> relocates the base component of \<open>path\<close> into the
+ \<^descr> \<^ML>\<open>File.tmp_path\<close>~\<open>path\<close> relocates the base component of \<open>path\<close> into the
unique temporary directory of the running Isabelle/ML process.
- \<^descr> @{ML serial_string}~\<open>()\<close> creates a new serial number that is unique over
+ \<^descr> \<^ML>\<open>serial_string\<close>~\<open>()\<close> creates a new serial number that is unique over
the runtime of the Isabelle/ML process.
\<close>
@@ -1760,7 +1742,7 @@
ML_val \<open>
val tmp1 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
val tmp2 = File.tmp_path (Path.basic ("foo" ^ serial_string ()));
- @{assert} (tmp1 <> tmp2);
+ \<^assert> (tmp1 <> tmp2);
\<close>
@@ -1790,21 +1772,21 @@
('a -> ('b * 'a) option) -> 'b"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a Synchronized.var"} represents synchronized variables
- with state of type @{ML_type 'a}.
-
- \<^descr> @{ML Synchronized.var}~\<open>name x\<close> creates a synchronized variable that is
+ \<^descr> Type \<^ML_type>\<open>'a Synchronized.var\<close> represents synchronized variables
+ with state of type \<^ML_type>\<open>'a\<close>.
+
+ \<^descr> \<^ML>\<open>Synchronized.var\<close>~\<open>name x\<close> creates a synchronized variable that is
initialized with value \<open>x\<close>. The \<open>name\<close> is used for tracing.
- \<^descr> @{ML Synchronized.guarded_access}~\<open>var f\<close> lets the function \<open>f\<close> operate
+ \<^descr> \<^ML>\<open>Synchronized.guarded_access\<close>~\<open>var f\<close> lets the function \<open>f\<close> operate
within a critical section on the state \<open>x\<close> as follows: if \<open>f x\<close> produces
- @{ML NONE}, it continues to wait on the internal condition variable,
+ \<^ML>\<open>NONE\<close>, it continues to wait on the internal condition variable,
expecting that some other thread will eventually change the content in a
- suitable manner; if \<open>f x\<close> produces @{ML SOME}~\<open>(y, x')\<close> it is satisfied and
+ suitable manner; if \<open>f x\<close> produces \<^ML>\<open>SOME\<close>~\<open>(y, x')\<close> it is satisfied and
assigns the new state value \<open>x'\<close>, broadcasts a signal to all waiting threads
on the associated condition variable, and returns the result \<open>y\<close>.
- There are some further variants of the @{ML Synchronized.guarded_access}
+ There are some further variants of the \<^ML>\<open>Synchronized.guarded_access\<close>
combinator, see \<^file>\<open>~~/src/Pure/Concurrent/synchronized.ML\<close> for details.
\<close>
@@ -1826,7 +1808,7 @@
val a = next ();
val b = next ();
- @{assert} (a <> b);
+ \<^assert> (a <> b);
\<close>
text \<open>
@@ -1892,29 +1874,28 @@
@{index_ML Par_Exn.release_first: "'a Exn.result list -> 'a list"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a Exn.result"} represents the disjoint sum of ML results
- explicitly, with constructor @{ML Exn.Res} for regular values and @{ML
- "Exn.Exn"} for exceptions.
-
- \<^descr> @{ML Exn.capture}~\<open>f x\<close> manages the evaluation of \<open>f x\<close> such that
- exceptions are made explicit as @{ML "Exn.Exn"}. Note that this includes
+ \<^descr> Type \<^ML_type>\<open>'a Exn.result\<close> represents the disjoint sum of ML results
+ explicitly, with constructor \<^ML>\<open>Exn.Res\<close> for regular values and \<^ML>\<open>Exn.Exn\<close> for exceptions.
+
+ \<^descr> \<^ML>\<open>Exn.capture\<close>~\<open>f x\<close> manages the evaluation of \<open>f x\<close> such that
+ exceptions are made explicit as \<^ML>\<open>Exn.Exn\<close>. Note that this includes
physical interrupts (see also \secref{sec:exceptions}), so the same
precautions apply to user code: interrupts must not be absorbed
accidentally!
- \<^descr> @{ML Exn.interruptible_capture} is similar to @{ML Exn.capture}, but
+ \<^descr> \<^ML>\<open>Exn.interruptible_capture\<close> is similar to \<^ML>\<open>Exn.capture\<close>, but
interrupts are immediately re-raised as required for user code.
- \<^descr> @{ML Exn.release}~\<open>result\<close> releases the original runtime result, exposing
+ \<^descr> \<^ML>\<open>Exn.release\<close>~\<open>result\<close> releases the original runtime result, exposing
its regular value or raising the reified exception.
- \<^descr> @{ML Par_Exn.release_all}~\<open>results\<close> combines results that were produced
+ \<^descr> \<^ML>\<open>Par_Exn.release_all\<close>~\<open>results\<close> combines results that were produced
independently (e.g.\ by parallel evaluation). If all results are regular
values, that list is returned. Otherwise, the collection of all exceptions
is raised, wrapped-up as collective parallel exception. Note that the latter
prevents access to individual exceptions by conventional \<^verbatim>\<open>handle\<close> of ML.
- \<^descr> @{ML Par_Exn.release_first} is similar to @{ML Par_Exn.release_all}, but
+ \<^descr> \<^ML>\<open>Par_Exn.release_first\<close> is similar to \<^ML>\<open>Par_Exn.release_all\<close>, but
only the first (meaningful) exception that has occurred in the original
evaluation process is raised again, the others are ignored. That single
exception may get handled by conventional means in ML.
@@ -1944,23 +1925,23 @@
@{index_ML Par_List.get_some: "('a -> 'b option) -> 'a list -> 'b option"} \\
\end{mldecls}
- \<^descr> @{ML Par_List.map}~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> is like @{ML "map"}~\<open>f [x\<^sub>1, \<dots>,
+ \<^descr> \<^ML>\<open>Par_List.map\<close>~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> is like \<^ML>\<open>map\<close>~\<open>f [x\<^sub>1, \<dots>,
x\<^sub>n]\<close>, but the evaluation of \<open>f x\<^sub>i\<close> for \<open>i = 1, \<dots>, n\<close> is performed in
parallel.
An exception in any \<open>f x\<^sub>i\<close> cancels the overall evaluation process. The
- final result is produced via @{ML Par_Exn.release_first} as explained above,
+ final result is produced via \<^ML>\<open>Par_Exn.release_first\<close> as explained above,
which means the first program exception that happened to occur in the
parallel evaluation is propagated, and all other failures are ignored.
- \<^descr> @{ML Par_List.get_some}~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> produces some \<open>f x\<^sub>i\<close> that is of
+ \<^descr> \<^ML>\<open>Par_List.get_some\<close>~\<open>f [x\<^sub>1, \<dots>, x\<^sub>n]\<close> produces some \<open>f x\<^sub>i\<close> that is of
the form \<open>SOME y\<^sub>i\<close>, if that exists, otherwise \<open>NONE\<close>. Thus it is similar to
- @{ML Library.get_first}, but subject to a non-deterministic parallel choice
+ \<^ML>\<open>Library.get_first\<close>, but subject to a non-deterministic parallel choice
process. The first successful result cancels the overall evaluation process;
- other exceptions are propagated as for @{ML Par_List.map}.
+ other exceptions are propagated as for \<^ML>\<open>Par_List.map\<close>.
This generic parallel choice combinator is the basis for derived forms, such
- as @{ML Par_List.find_some}, @{ML Par_List.exists}, @{ML Par_List.forall}.
+ as \<^ML>\<open>Par_List.find_some\<close>, \<^ML>\<open>Par_List.exists\<close>, \<^ML>\<open>Par_List.forall\<close>.
\<close>
text %mlex \<open>
@@ -2010,18 +1991,18 @@
@{index_ML Lazy.force: "'a lazy -> 'a"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a lazy"} represents lazy values over type \<^verbatim>\<open>'a\<close>.
-
- \<^descr> @{ML Lazy.lazy}~\<open>(fn () => e)\<close> wraps the unevaluated expression \<open>e\<close> as
+ \<^descr> Type \<^ML_type>\<open>'a lazy\<close> represents lazy values over type \<^verbatim>\<open>'a\<close>.
+
+ \<^descr> \<^ML>\<open>Lazy.lazy\<close>~\<open>(fn () => e)\<close> wraps the unevaluated expression \<open>e\<close> as
unfinished lazy value.
- \<^descr> @{ML Lazy.value}~\<open>a\<close> wraps the value \<open>a\<close> as finished lazy value. When
+ \<^descr> \<^ML>\<open>Lazy.value\<close>~\<open>a\<close> wraps the value \<open>a\<close> as finished lazy value. When
forced, it returns \<open>a\<close> without any further evaluation.
There is very low overhead for this proforma wrapping of strict values as
lazy values.
- \<^descr> @{ML Lazy.force}~\<open>x\<close> produces the result of the lazy value in a
+ \<^descr> \<^ML>\<open>Lazy.force\<close>~\<open>x\<close> produces the result of the lazy value in a
thread-safe manner as explained above. Thus it may cause the current thread
to wait on a pending evaluation attempt by another thread.
\<close>
@@ -2098,33 +2079,32 @@
@{index_ML Future.fulfill: "'a future -> 'a -> unit"} \\
\end{mldecls}
- \<^descr> Type @{ML_type "'a future"} represents future values over type \<^verbatim>\<open>'a\<close>.
-
- \<^descr> @{ML Future.fork}~\<open>(fn () => e)\<close> registers the unevaluated expression \<open>e\<close>
+ \<^descr> Type \<^ML_type>\<open>'a future\<close> represents future values over type \<^verbatim>\<open>'a\<close>.
+
+ \<^descr> \<^ML>\<open>Future.fork\<close>~\<open>(fn () => e)\<close> registers the unevaluated expression \<open>e\<close>
as unfinished future value, to be evaluated eventually on the parallel
- worker-thread farm. This is a shorthand for @{ML Future.forks} below, with
+ worker-thread farm. This is a shorthand for \<^ML>\<open>Future.forks\<close> below, with
default parameters and a single expression.
- \<^descr> @{ML Future.forks}~\<open>params exprs\<close> is the general interface to fork several
+ \<^descr> \<^ML>\<open>Future.forks\<close>~\<open>params exprs\<close> is the general interface to fork several
futures simultaneously. The \<open>params\<close> consist of the following fields:
- \<^item> \<open>name : string\<close> (default @{ML "\"\""}) specifies a common name for the
+ \<^item> \<open>name : string\<close> (default \<^ML>\<open>""\<close>) specifies a common name for the
tasks of the forked futures, which serves diagnostic purposes.
- \<^item> \<open>group : Future.group option\<close> (default @{ML NONE}) specifies an optional
- task group for the forked futures. @{ML NONE} means that a new sub-group
+ \<^item> \<open>group : Future.group option\<close> (default \<^ML>\<open>NONE\<close>) specifies an optional
+ task group for the forked futures. \<^ML>\<open>NONE\<close> means that a new sub-group
of the current worker-thread task context is created. If this is not a
worker thread, the group will be a new root in the group hierarchy.
- \<^item> \<open>deps : Future.task list\<close> (default @{ML "[]"}) specifies dependencies on
+ \<^item> \<open>deps : Future.task list\<close> (default \<^ML>\<open>[]\<close>) specifies dependencies on
other future tasks, i.e.\ the adjacency relation in the global task queue.
Dependencies on already finished tasks are ignored.
- \<^item> \<open>pri : int\<close> (default @{ML 0}) specifies a priority within the task
+ \<^item> \<open>pri : int\<close> (default \<^ML>\<open>0\<close>) specifies a priority within the task
queue.
- Typically there is only little deviation from the default priority @{ML
- 0}. As a rule of thumb, @{ML "~1"} means ``low priority" and @{ML 1} means
+ Typically there is only little deviation from the default priority \<^ML>\<open>0\<close>. As a rule of thumb, \<^ML>\<open>~1\<close> means ``low priority" and \<^ML>\<open>1\<close> means
``high priority''.
Note that the task priority only affects the position in the queue, not
@@ -2133,7 +2113,7 @@
Higher priority tasks that are queued later need to wait until this (or
another) worker thread becomes free again.
- \<^item> \<open>interrupts : bool\<close> (default @{ML true}) tells whether the worker thread
+ \<^item> \<open>interrupts : bool\<close> (default \<^ML>\<open>true\<close>) tells whether the worker thread
that processes the corresponding task is initially put into interruptible
state. This state may change again while running, by modifying the thread
attributes.
@@ -2142,7 +2122,7 @@
the responsibility of the programmer that this special state is retained
only briefly.
- \<^descr> @{ML Future.join}~\<open>x\<close> retrieves the value of an already finished future,
+ \<^descr> \<^ML>\<open>Future.join\<close>~\<open>x\<close> retrieves the value of an already finished future,
which may lead to an exception, according to the result of its previous
evaluation.
@@ -2164,8 +2144,8 @@
explicitly when forked (see \<open>deps\<close> above). Thus the evaluation can work from
the bottom up, without join conflicts and wait states.
- \<^descr> @{ML Future.joins}~\<open>xs\<close> joins the given list of futures simultaneously,
- which is more efficient than @{ML "map Future.join"}~\<open>xs\<close>.
+ \<^descr> \<^ML>\<open>Future.joins\<close>~\<open>xs\<close> joins the given list of futures simultaneously,
+ which is more efficient than \<^ML>\<open>map Future.join\<close>~\<open>xs\<close>.
Based on the dependency graph of tasks, the current thread takes over the
responsibility to evaluate future expressions that are required for the main
@@ -2173,23 +2153,22 @@
presently evaluated on other threads only happens as last resort, when no
other unfinished futures are left over.
- \<^descr> @{ML Future.value}~\<open>a\<close> wraps the value \<open>a\<close> as finished future value,
+ \<^descr> \<^ML>\<open>Future.value\<close>~\<open>a\<close> wraps the value \<open>a\<close> as finished future value,
bypassing the worker-thread farm. When joined, it returns \<open>a\<close> without any
further evaluation.
There is very low overhead for this proforma wrapping of strict values as
futures.
- \<^descr> @{ML Future.map}~\<open>f x\<close> is a fast-path implementation of @{ML
- Future.fork}~\<open>(fn () => f (\<close>@{ML Future.join}~\<open>x))\<close>, which avoids the full
+ \<^descr> \<^ML>\<open>Future.map\<close>~\<open>f x\<close> is a fast-path implementation of \<^ML>\<open>Future.fork\<close>~\<open>(fn () => f (\<close>\<^ML>\<open>Future.join\<close>~\<open>x))\<close>, which avoids the full
overhead of the task queue and worker-thread farm as far as possible. The
function \<open>f\<close> is supposed to be some trivial post-processing or projection of
the future result.
- \<^descr> @{ML Future.cancel}~\<open>x\<close> cancels the task group of the given future, using
- @{ML Future.cancel_group} below.
-
- \<^descr> @{ML Future.cancel_group}~\<open>group\<close> cancels all tasks of the given task
+ \<^descr> \<^ML>\<open>Future.cancel\<close>~\<open>x\<close> cancels the task group of the given future, using
+ \<^ML>\<open>Future.cancel_group\<close> below.
+
+ \<^descr> \<^ML>\<open>Future.cancel_group\<close>~\<open>group\<close> cancels all tasks of the given task
group for all time. Threads that are presently processing a task of the
given group are interrupted: it may take some time until they are actually
terminated. Tasks that are queued but not yet processed are dequeued and
@@ -2197,10 +2176,10 @@
any further attempt to fork a future that belongs to it will yield a
canceled result as well.
- \<^descr> @{ML Future.promise}~\<open>abort\<close> registers a passive future with the given
+ \<^descr> \<^ML>\<open>Future.promise\<close>~\<open>abort\<close> registers a passive future with the given
\<open>abort\<close> operation: it is invoked when the future task group is canceled.
- \<^descr> @{ML Future.fulfill}~\<open>x a\<close> finishes the passive future \<open>x\<close> by the given
+ \<^descr> \<^ML>\<open>Future.fulfill\<close>~\<open>x a\<close> finishes the passive future \<open>x\<close> by the given
value \<open>a\<close>. If the promise has already been canceled, the attempt to fulfill
it causes an exception.
\<close>
--- a/src/Doc/Implementation/Prelim.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Prelim.thy Sat Jan 05 17:24:33 2019 +0100
@@ -116,22 +116,22 @@
@{index_ML Theory.ancestors_of: "theory -> theory list"} \\
\end{mldecls}
- \<^descr> Type @{ML_type theory} represents theory contexts.
+ \<^descr> Type \<^ML_type>\<open>theory\<close> represents theory contexts.
- \<^descr> @{ML "Context.eq_thy"}~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> check strict identity of two
+ \<^descr> \<^ML>\<open>Context.eq_thy\<close>~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> check strict identity of two
theories.
- \<^descr> @{ML "Context.subthy"}~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> compares theories according to the
+ \<^descr> \<^ML>\<open>Context.subthy\<close>~\<open>(thy\<^sub>1, thy\<^sub>2)\<close> compares theories according to the
intrinsic graph structure of the construction. This sub-theory relation is a
nominal approximation of inclusion (\<open>\<subseteq>\<close>) of the corresponding content
(according to the semantics of the ML modules that implement the data).
- \<^descr> @{ML "Theory.begin_theory"}~\<open>name parents\<close> constructs a new theory based
+ \<^descr> \<^ML>\<open>Theory.begin_theory\<close>~\<open>name parents\<close> constructs a new theory based
on the given parents. This ML function is normally not invoked directly.
- \<^descr> @{ML "Theory.parents_of"}~\<open>thy\<close> returns the direct ancestors of \<open>thy\<close>.
+ \<^descr> \<^ML>\<open>Theory.parents_of\<close>~\<open>thy\<close> returns the direct ancestors of \<open>thy\<close>.
- \<^descr> @{ML "Theory.ancestors_of"}~\<open>thy\<close> returns all ancestors of \<open>thy\<close> (not
+ \<^descr> \<^ML>\<open>Theory.ancestors_of\<close>~\<open>thy\<close> returns all ancestors of \<open>thy\<close> (not
including \<open>thy\<close> itself).
\<close>
@@ -141,11 +141,11 @@
@{ML_antiquotation_def "theory_context"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation theory} embedded?
;
@@{ML_antiquotation theory_context} embedded
- \<close>}
+ \<close>
\<^descr> \<open>@{theory}\<close> refers to the background theory of the current context --- as
abstract value.
@@ -154,8 +154,7 @@
background theory of the current context --- as abstract value.
\<^descr> \<open>@{theory_context A}\<close> is similar to \<open>@{theory A}\<close>, but presents the result
- as initial @{ML_type Proof.context} (see also @{ML
- Proof_Context.init_global}).
+ as initial \<^ML_type>\<open>Proof.context\<close> (see also \<^ML>\<open>Proof_Context.init_global\<close>).
\<close>
@@ -194,15 +193,15 @@
@{index_ML Proof_Context.transfer: "theory -> Proof.context -> Proof.context"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Proof.context} represents proof contexts.
+ \<^descr> Type \<^ML_type>\<open>Proof.context\<close> represents proof contexts.
- \<^descr> @{ML Proof_Context.init_global}~\<open>thy\<close> produces a proof context derived
+ \<^descr> \<^ML>\<open>Proof_Context.init_global\<close>~\<open>thy\<close> produces a proof context derived
from \<open>thy\<close>, initializing all data.
- \<^descr> @{ML Proof_Context.theory_of}~\<open>ctxt\<close> selects the background theory from
+ \<^descr> \<^ML>\<open>Proof_Context.theory_of\<close>~\<open>ctxt\<close> selects the background theory from
\<open>ctxt\<close>.
- \<^descr> @{ML Proof_Context.transfer}~\<open>thy ctxt\<close> promotes the background theory of
+ \<^descr> \<^ML>\<open>Proof_Context.transfer\<close>~\<open>thy ctxt\<close> promotes the background theory of
\<open>ctxt\<close> to the super theory \<open>thy\<close>.
\<close>
@@ -242,15 +241,14 @@
@{index_ML Context.proof_of: "Context.generic -> Proof.context"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Context.generic} is the direct sum of @{ML_type "theory"}
- and @{ML_type "Proof.context"}, with the datatype constructors @{ML
- "Context.Theory"} and @{ML "Context.Proof"}.
+ \<^descr> Type \<^ML_type>\<open>Context.generic\<close> is the direct sum of \<^ML_type>\<open>theory\<close>
+ and \<^ML_type>\<open>Proof.context\<close>, with the datatype constructors \<^ML>\<open>Context.Theory\<close> and \<^ML>\<open>Context.Proof\<close>.
- \<^descr> @{ML Context.theory_of}~\<open>context\<close> always produces a theory from the
- generic \<open>context\<close>, using @{ML "Proof_Context.theory_of"} as required.
+ \<^descr> \<^ML>\<open>Context.theory_of\<close>~\<open>context\<close> always produces a theory from the
+ generic \<open>context\<close>, using \<^ML>\<open>Proof_Context.theory_of\<close> as required.
- \<^descr> @{ML Context.proof_of}~\<open>context\<close> always produces a proof context from the
- generic \<open>context\<close>, using @{ML "Proof_Context.init_global"} as required (note
+ \<^descr> \<^ML>\<open>Context.proof_of\<close>~\<open>context\<close> always produces a proof context from the
+ generic \<open>context\<close>, using \<^ML>\<open>Proof_Context.init_global\<close> as required (note
that this re-initializes the context data with each invocation).
\<close>
@@ -282,7 +280,7 @@
Implementing \<open>merge\<close> can be tricky. The general idea is that \<open>merge (data\<^sub>1,
data\<^sub>2)\<close> inserts those parts of \<open>data\<^sub>2\<close> into \<open>data\<^sub>1\<close> that are not yet
- present, while keeping the general order of things. The @{ML Library.merge}
+ present, while keeping the general order of things. The \<^ML>\<open>Library.merge\<close>
function on plain lists may serve as canonical template.
Particularly note that shared parts of the data must not be duplicated by
@@ -342,15 +340,14 @@
@{index_ML_functor Generic_Data} \\
\end{mldecls}
- \<^descr> @{ML_functor Theory_Data}\<open>(spec)\<close> declares data for type @{ML_type theory}
+ \<^descr> \<^ML_functor>\<open>Theory_Data\<close>\<open>(spec)\<close> declares data for type \<^ML_type>\<open>theory\<close>
according to the specification provided as argument structure. The resulting
structure provides data init and access operations as described above.
- \<^descr> @{ML_functor Proof_Data}\<open>(spec)\<close> is analogous to @{ML_functor Theory_Data}
- for type @{ML_type Proof.context}.
+ \<^descr> \<^ML_functor>\<open>Proof_Data\<close>\<open>(spec)\<close> is analogous to \<^ML_functor>\<open>Theory_Data\<close>
+ for type \<^ML_type>\<open>Proof.context\<close>.
- \<^descr> @{ML_functor Generic_Data}\<open>(spec)\<close> is analogous to @{ML_functor
- Theory_Data} for type @{ML_type Context.generic}. \<close>
+ \<^descr> \<^ML_functor>\<open>Generic_Data\<close>\<open>(spec)\<close> is analogous to \<^ML_functor>\<open>Theory_Data\<close> for type \<^ML_type>\<open>Context.generic\<close>. \<close>
text %mlex \<open>
The following artificial example demonstrates theory data: we maintain a set
@@ -397,32 +394,29 @@
\<close>
text \<open>
- Type @{ML_type "term Ord_List.T"} is used for reasonably efficient
+ Type \<^ML_type>\<open>term Ord_List.T\<close> is used for reasonably efficient
representation of a set of terms: all operations are linear in the number of
stored elements. Here we assume that users of this module do not care about
the declaration order, since that data structure forces its own arrangement
of elements.
- Observe how the @{ML_text merge} operation joins the data slots of the two
- constituents: @{ML Ord_List.union} prevents duplication of common data from
+ Observe how the \<^ML_text>\<open>merge\<close> operation joins the data slots of the two
+ constituents: \<^ML>\<open>Ord_List.union\<close> prevents duplication of common data from
different branches, thus avoiding the danger of exponential blowup. Plain
list append etc.\ must never be used for theory data merges!
\<^medskip>
Our intended invariant is achieved as follows:
- \<^enum> @{ML Wellformed_Terms.add} only admits terms that have passed the @{ML
- Sign.cert_term} check of the given theory at that point.
+ \<^enum> \<^ML>\<open>Wellformed_Terms.add\<close> only admits terms that have passed the \<^ML>\<open>Sign.cert_term\<close> check of the given theory at that point.
- \<^enum> Wellformedness in the sense of @{ML Sign.cert_term} is monotonic wrt.\
+ \<^enum> Wellformedness in the sense of \<^ML>\<open>Sign.cert_term\<close> is monotonic wrt.\
the sub-theory relation. So our data can move upwards in the hierarchy
(via extension or merges), and maintain wellformedness without further
checks.
- Note that all basic operations of the inference kernel (which includes @{ML
- Sign.cert_term}) observe this monotonicity principle, but other user-space
- tools don't. For example, fully-featured type-inference via @{ML
- Syntax.check_term} (cf.\ \secref{sec:term-check}) is not necessarily
+ Note that all basic operations of the inference kernel (which includes \<^ML>\<open>Sign.cert_term\<close>) observe this monotonicity principle, but other user-space
+ tools don't. For example, fully-featured type-inference via \<^ML>\<open>Syntax.check_term\<close> (cf.\ \secref{sec:term-check}) is not necessarily
monotonic wrt.\ the background theory, since constraints of term constants
can be modified by later declarations, for example.
@@ -496,62 +490,61 @@
string Config.T"} \\
\end{mldecls}
- \<^descr> @{ML Config.get}~\<open>ctxt config\<close> gets the value of \<open>config\<close> in the given
+ \<^descr> \<^ML>\<open>Config.get\<close>~\<open>ctxt config\<close> gets the value of \<open>config\<close> in the given
context.
- \<^descr> @{ML Config.map}~\<open>config f ctxt\<close> updates the context by updating the value
+ \<^descr> \<^ML>\<open>Config.map\<close>~\<open>config f ctxt\<close> updates the context by updating the value
of \<open>config\<close>.
- \<^descr> \<open>config =\<close>~@{ML Attrib.setup_config_bool}~\<open>name default\<close> creates a named
- configuration option of type @{ML_type bool}, with the given \<open>default\<close>
+ \<^descr> \<open>config =\<close>~\<^ML>\<open>Attrib.setup_config_bool\<close>~\<open>name default\<close> creates a named
+ configuration option of type \<^ML_type>\<open>bool\<close>, with the given \<open>default\<close>
depending on the application context. The resulting \<open>config\<close> can be used to
get/map its value in a given context. There is an implicit update of the
background theory that registers the option as attribute with some concrete
syntax.
- \<^descr> @{ML Attrib.config_int}, @{ML Attrib.config_real}, and @{ML
- Attrib.config_string} work like @{ML Attrib.config_bool}, but for types
- @{ML_type int} and @{ML_type string}, respectively.
+ \<^descr> \<^ML>\<open>Attrib.config_int\<close>, \<^ML>\<open>Attrib.config_real\<close>, and \<^ML>\<open>Attrib.config_string\<close> work like \<^ML>\<open>Attrib.config_bool\<close>, but for types
+ \<^ML_type>\<open>int\<close> and \<^ML_type>\<open>string\<close>, respectively.
\<close>
text %mlex \<open>
The following example shows how to declare and use a Boolean configuration
- option called \<open>my_flag\<close> with constant default value @{ML false}.
+ option called \<open>my_flag\<close> with constant default value \<^ML>\<open>false\<close>.
\<close>
ML \<open>
val my_flag =
- Attrib.setup_config_bool @{binding my_flag} (K false)
+ Attrib.setup_config_bool \<^binding>\<open>my_flag\<close> (K false)
\<close>
text \<open>
Now the user can refer to @{attribute my_flag} in declarations, while ML
- tools can retrieve the current value from the context via @{ML Config.get}.
+ tools can retrieve the current value from the context via \<^ML>\<open>Config.get\<close>.
\<close>
-ML_val \<open>@{assert} (Config.get @{context} my_flag = false)\<close>
+ML_val \<open>\<^assert> (Config.get \<^context> my_flag = false)\<close>
declare [[my_flag = true]]
-ML_val \<open>@{assert} (Config.get @{context} my_flag = true)\<close>
+ML_val \<open>\<^assert> (Config.get \<^context> my_flag = true)\<close>
notepad
begin
{
note [[my_flag = false]]
- ML_val \<open>@{assert} (Config.get @{context} my_flag = false)\<close>
+ ML_val \<open>\<^assert> (Config.get \<^context> my_flag = false)\<close>
}
- ML_val \<open>@{assert} (Config.get @{context} my_flag = true)\<close>
+ ML_val \<open>\<^assert> (Config.get \<^context> my_flag = true)\<close>
end
text \<open>
- Here is another example involving ML type @{ML_type real} (floating-point
+ Here is another example involving ML type \<^ML_type>\<open>real\<close> (floating-point
numbers).
\<close>
ML \<open>
val airspeed_velocity =
- Attrib.setup_config_real @{binding airspeed_velocity} (K 0.0)
+ Attrib.setup_config_real \<^binding>\<open>airspeed_velocity\<close> (K 0.0)
\<close>
declare [[airspeed_velocity = 10]]
@@ -630,43 +623,43 @@
@{index_ML Variable.names_of: "Proof.context -> Name.context"} \\
\end{mldecls}
- \<^descr> @{ML Name.internal}~\<open>name\<close> produces an internal name by adding one
+ \<^descr> \<^ML>\<open>Name.internal\<close>~\<open>name\<close> produces an internal name by adding one
underscore.
- \<^descr> @{ML Name.skolem}~\<open>name\<close> produces a Skolem name by adding two underscores.
+ \<^descr> \<^ML>\<open>Name.skolem\<close>~\<open>name\<close> produces a Skolem name by adding two underscores.
- \<^descr> Type @{ML_type Name.context} represents the context of already used names;
- the initial value is @{ML "Name.context"}.
+ \<^descr> Type \<^ML_type>\<open>Name.context\<close> represents the context of already used names;
+ the initial value is \<^ML>\<open>Name.context\<close>.
- \<^descr> @{ML Name.declare}~\<open>name\<close> enters a used name into the context.
+ \<^descr> \<^ML>\<open>Name.declare\<close>~\<open>name\<close> enters a used name into the context.
- \<^descr> @{ML Name.invent}~\<open>context name n\<close> produces \<open>n\<close> fresh names derived from
+ \<^descr> \<^ML>\<open>Name.invent\<close>~\<open>context name n\<close> produces \<open>n\<close> fresh names derived from
\<open>name\<close>.
- \<^descr> @{ML Name.variant}~\<open>name context\<close> produces a fresh variant of \<open>name\<close>; the
+ \<^descr> \<^ML>\<open>Name.variant\<close>~\<open>name context\<close> produces a fresh variant of \<open>name\<close>; the
result is declared to the context.
- \<^descr> @{ML Variable.names_of}~\<open>ctxt\<close> retrieves the context of declared type and
+ \<^descr> \<^ML>\<open>Variable.names_of\<close>~\<open>ctxt\<close> retrieves the context of declared type and
term variable names. Projecting a proof context down to a primitive name
context is occasionally useful when invoking lower-level operations. Regular
management of ``fresh variables'' is done by suitable operations of
- structure @{ML_structure Variable}, which is also able to provide an
+ structure \<^ML_structure>\<open>Variable\<close>, which is also able to provide an
official status of ``locally fixed variable'' within the logical environment
(cf.\ \secref{sec:variables}).
\<close>
text %mlex \<open>
The following simple examples demonstrate how to produce fresh names from
- the initial @{ML Name.context}.
+ the initial \<^ML>\<open>Name.context\<close>.
\<close>
ML_val \<open>
val list1 = Name.invent Name.context "a" 5;
- @{assert} (list1 = ["a", "b", "c", "d", "e"]);
+ \<^assert> (list1 = ["a", "b", "c", "d", "e"]);
val list2 =
#1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] Name.context);
- @{assert} (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
+ \<^assert> (list2 = ["x", "xa", "a", "aa", "'a", "'aa"]);
\<close>
text \<open>
@@ -677,14 +670,14 @@
begin
ML_val \<open>
- val names = Variable.names_of @{context};
+ val names = Variable.names_of \<^context>;
val list1 = Name.invent names "a" 5;
- @{assert} (list1 = ["d", "e", "f", "g", "h"]);
+ \<^assert> (list1 = ["d", "e", "f", "g", "h"]);
val list2 =
#1 (fold_map Name.variant ["x", "x", "a", "a", "'a", "'a"] names);
- @{assert} (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
+ \<^assert> (list2 = ["x", "xa", "aa", "ab", "'aa", "'ab"]);
\<close>
end
@@ -726,8 +719,8 @@
@{index_ML_type indexname: "string * int"} \\
\end{mldecls}
- \<^descr> Type @{ML_type indexname} represents indexed names. This is an
- abbreviation for @{ML_type "string * int"}. The second component is usually
+ \<^descr> Type \<^ML_type>\<open>indexname\<close> represents indexed names. This is an
+ abbreviation for \<^ML_type>\<open>string * int\<close>. The second component is usually
non-negative, except for situations where \<open>(x, -1)\<close> is used to inject basic
names into this type. Other negative indexes should not be used.
\<close>
@@ -765,13 +758,13 @@
@{index_ML Long_Name.explode: "string -> string list"} \\
\end{mldecls}
- \<^descr> @{ML Long_Name.base_name}~\<open>name\<close> returns the base name of a long name.
+ \<^descr> \<^ML>\<open>Long_Name.base_name\<close>~\<open>name\<close> returns the base name of a long name.
- \<^descr> @{ML Long_Name.qualifier}~\<open>name\<close> returns the qualifier of a long name.
+ \<^descr> \<^ML>\<open>Long_Name.qualifier\<close>~\<open>name\<close> returns the qualifier of a long name.
- \<^descr> @{ML Long_Name.append}~\<open>name\<^sub>1 name\<^sub>2\<close> appends two long names.
+ \<^descr> \<^ML>\<open>Long_Name.append\<close>~\<open>name\<^sub>1 name\<^sub>2\<close> appends two long names.
- \<^descr> @{ML Long_Name.implode}~\<open>names\<close> and @{ML Long_Name.explode}~\<open>name\<close> convert
+ \<^descr> \<^ML>\<open>Long_Name.implode\<close>~\<open>names\<close> and \<^ML>\<open>Long_Name.explode\<close>~\<open>name\<close> convert
between the packed string representation and the explicit list form of long
names.
\<close>
@@ -860,75 +853,74 @@
@{index_ML Name_Space.is_concealed: "Name_Space.T -> string -> bool"}
\end{mldecls}
- \<^descr> Type @{ML_type binding} represents the abstract concept of name bindings.
+ \<^descr> Type \<^ML_type>\<open>binding\<close> represents the abstract concept of name bindings.
- \<^descr> @{ML Binding.empty} is the empty binding.
+ \<^descr> \<^ML>\<open>Binding.empty\<close> is the empty binding.
- \<^descr> @{ML Binding.name}~\<open>name\<close> produces a binding with base name \<open>name\<close>. Note
+ \<^descr> \<^ML>\<open>Binding.name\<close>~\<open>name\<close> produces a binding with base name \<open>name\<close>. Note
that this lacks proper source position information; see also the ML
antiquotation @{ML_antiquotation binding}.
- \<^descr> @{ML Binding.qualify}~\<open>mandatory name binding\<close> prefixes qualifier \<open>name\<close>
+ \<^descr> \<^ML>\<open>Binding.qualify\<close>~\<open>mandatory name binding\<close> prefixes qualifier \<open>name\<close>
to \<open>binding\<close>. The \<open>mandatory\<close> flag tells if this name component always needs
to be given in name space accesses --- this is mostly \<open>false\<close> in practice.
Note that this part of qualification is typically used in derived
specification mechanisms.
- \<^descr> @{ML Binding.prefix} is similar to @{ML Binding.qualify}, but affects the
+ \<^descr> \<^ML>\<open>Binding.prefix\<close> is similar to \<^ML>\<open>Binding.qualify\<close>, but affects the
system prefix. This part of extra qualification is typically used in the
infrastructure for modular specifications, notably ``local theory targets''
(see also \chref{ch:local-theory}).
- \<^descr> @{ML Binding.concealed}~\<open>binding\<close> indicates that the binding shall refer
+ \<^descr> \<^ML>\<open>Binding.concealed\<close>~\<open>binding\<close> indicates that the binding shall refer
to an entity that serves foundational purposes only. This flag helps to mark
implementation details of specification mechanism etc. Other tools should
- not depend on the particulars of concealed entities (cf.\ @{ML
- Name_Space.is_concealed}).
+ not depend on the particulars of concealed entities (cf.\ \<^ML>\<open>Name_Space.is_concealed\<close>).
- \<^descr> @{ML Binding.print}~\<open>binding\<close> produces a string representation for
+ \<^descr> \<^ML>\<open>Binding.print\<close>~\<open>binding\<close> produces a string representation for
human-readable output, together with some formal markup that might get used
in GUI front-ends, for example.
- \<^descr> Type @{ML_type Name_Space.naming} represents the abstract concept of a
+ \<^descr> Type \<^ML_type>\<open>Name_Space.naming\<close> represents the abstract concept of a
naming policy.
- \<^descr> @{ML Name_Space.global_naming} is the default naming policy: it is global
+ \<^descr> \<^ML>\<open>Name_Space.global_naming\<close> is the default naming policy: it is global
and lacks any path prefix. In a regular theory context this is augmented by
a path prefix consisting of the theory name.
- \<^descr> @{ML Name_Space.add_path}~\<open>path naming\<close> augments the naming policy by
+ \<^descr> \<^ML>\<open>Name_Space.add_path\<close>~\<open>path naming\<close> augments the naming policy by
extending its path component.
- \<^descr> @{ML Name_Space.full_name}~\<open>naming binding\<close> turns a name binding (usually
+ \<^descr> \<^ML>\<open>Name_Space.full_name\<close>~\<open>naming binding\<close> turns a name binding (usually
a basic name) into the fully qualified internal name, according to the given
naming policy.
- \<^descr> Type @{ML_type Name_Space.T} represents name spaces.
+ \<^descr> Type \<^ML_type>\<open>Name_Space.T\<close> represents name spaces.
- \<^descr> @{ML Name_Space.empty}~\<open>kind\<close> and @{ML Name_Space.merge}~\<open>(space\<^sub>1,
+ \<^descr> \<^ML>\<open>Name_Space.empty\<close>~\<open>kind\<close> and \<^ML>\<open>Name_Space.merge\<close>~\<open>(space\<^sub>1,
space\<^sub>2)\<close> are the canonical operations for maintaining name spaces according
to theory data management (\secref{sec:context-data}); \<open>kind\<close> is a formal
comment to characterize the purpose of a name space.
- \<^descr> @{ML Name_Space.declare}~\<open>context strict binding space\<close> enters a name
+ \<^descr> \<^ML>\<open>Name_Space.declare\<close>~\<open>context strict binding space\<close> enters a name
binding as fully qualified internal name into the name space, using the
naming of the context.
- \<^descr> @{ML Name_Space.intern}~\<open>space name\<close> internalizes a (partially qualified)
+ \<^descr> \<^ML>\<open>Name_Space.intern\<close>~\<open>space name\<close> internalizes a (partially qualified)
external name.
This operation is mostly for parsing! Note that fully qualified names
- stemming from declarations are produced via @{ML "Name_Space.full_name"} and
- @{ML "Name_Space.declare"} (or their derivatives for @{ML_type theory} and
- @{ML_type Proof.context}).
+ stemming from declarations are produced via \<^ML>\<open>Name_Space.full_name\<close> and
+ \<^ML>\<open>Name_Space.declare\<close> (or their derivatives for \<^ML_type>\<open>theory\<close> and
+ \<^ML_type>\<open>Proof.context\<close>).
- \<^descr> @{ML Name_Space.extern}~\<open>ctxt space name\<close> externalizes a (fully qualified)
+ \<^descr> \<^ML>\<open>Name_Space.extern\<close>~\<open>ctxt space name\<close> externalizes a (fully qualified)
internal name.
This operation is mostly for printing! User code should not rely on the
precise result too much.
- \<^descr> @{ML Name_Space.is_concealed}~\<open>space name\<close> indicates whether \<open>name\<close> refers
+ \<^descr> \<^ML>\<open>Name_Space.is_concealed\<close>~\<open>space name\<close> indicates whether \<open>name\<close> refers
to a strictly private entity that other tools are supposed to ignore!
\<close>
@@ -937,13 +929,13 @@
@{ML_antiquotation_def "binding"} & : & \<open>ML_antiquotation\<close> \\
\end{matharray}
- @{rail \<open>
+ \<^rail>\<open>
@@{ML_antiquotation binding} embedded
- \<close>}
+ \<close>
\<^descr> \<open>@{binding name}\<close> produces a binding with base name \<open>name\<close> and the source
position taken from the concrete syntax of this antiquotation. In many
- situations this is more appropriate than the more basic @{ML Binding.name}
+ situations this is more appropriate than the more basic \<^ML>\<open>Binding.name\<close>
function.
\<close>
@@ -952,7 +944,7 @@
inlined into the text:
\<close>
-ML_val \<open>Binding.pos_of @{binding here}\<close>
+ML_val \<open>Binding.pos_of \<^binding>\<open>here\<close>\<close>
text \<open>
\<^medskip>
@@ -961,7 +953,7 @@
ML_command
\<open>writeln
- ("Look here" ^ Position.here (Binding.pos_of @{binding here}))\<close>
+ ("Look here" ^ Position.here (Binding.pos_of \<^binding>\<open>here\<close>))\<close>
text \<open>
This illustrates a key virtue of formalized bindings as opposed to raw
@@ -973,6 +965,6 @@
occasionally useful for experimentation and diagnostic purposes:
\<close>
-ML_command \<open>warning ("Look here" ^ Position.here @{here})\<close>
+ML_command \<open>warning ("Look here" ^ Position.here \<^here>)\<close>
end
--- a/src/Doc/Implementation/Proof.thy Sat Jan 05 17:00:43 2019 +0100
+++ b/src/Doc/Implementation/Proof.thy Sat Jan 05 17:24:33 2019 +0100
@@ -107,40 +107,40 @@
((string * (string * typ)) list * term) * Proof.context"} \\
\end{mldecls}
- \<^descr> @{ML Variable.add_fixes}~\<open>xs ctxt\<close> fixes term variables \<open>xs\<close>, returning
+ \<^descr> \<^ML>\<open>Variable.add_fixes\<close>~\<open>xs ctxt\<close> fixes term variables \<open>xs\<close>, returning
the resulting internal names. By default, the internal representation
coincides with the external one, which also means that the given variables
must not be fixed already. There is a different policy within a local proof
body: the given names are just hints for newly invented Skolem variables.
- \<^descr> @{ML Variable.variant_fixes} is similar to @{ML Variable.add_fixes}, but
+ \<^descr> \<^ML>\<open>Variable.variant_fixes\<close> is similar to \<^ML>\<open>Variable.add_fixes\<close>, but
always produces fresh variants of the given names.
- \<^descr> @{ML Variable.declare_term}~\<open>t ctxt\<close> declares term \<open>t\<close> to belong to the
+ \<^descr> \<^ML>\<open>Variable.declare_term\<close>~\<open>t ctxt\<close> declares term \<open>t\<close> to belong to the
context. This automatically fixes new type variables, but not term
variables. Syntactic constraints for type and term variables are declared
uniformly, though.
- \<^descr> @{ML Variable.declare_constraints}~\<open>t ctxt\<close> declares syntactic constraints
+ \<^descr> \<^ML>\<open>Variable.declare_constraints\<close>~\<open>t ctxt\<close> declares syntactic constraints
from term \<open>t\<close>, without making it part of the context yet.
- \<^descr> @{ML Variable.export}~\<open>inner outer thms\<close> generalizes fixed type and term
+ \<^descr> \<^ML>\<open>Variable.export\<close>~\<open>inner outer thms\<close> generalizes fixed type and term
variables in \<open>thms\<close> according to the difference of the \<open>inner\<close> and \<open>outer\<close>
context, following the principles sketched above.
- \<^descr> @{ML Variable.polymorphic}~\<open>ctxt ts\<close> generalizes type variables in \<open>ts\<close> as
+ \<^descr> \<^ML>\<open>Variable.polymorphic\<close>~\<open>ctxt ts\<close> generalizes type variables in \<open>ts\<close> as
far as possible, even those occurring in fixed term variables. The default
policy of type-inference is to fix newly introduced type variables, which is
- essentially reversed with @{ML Variable.polymorphic}: here the given terms
+ essentially reversed with \<^ML>\<open>Variable.polymorphic\<close>: here the given terms
are detached from the context as far as possible.
- \<^descr> @{ML Variable.import}~\<open>open thms ctxt\<close> invents fixed type and term
+ \<^descr> \<^ML>\<open>Variable.import\<close>~\<open>open thms ctxt\<close> invents fixed type and term
variables for the schematic ones occurring in \<open>thms\<close>. The \<open>open\<close> flag
indicates whether the fixed names should be accessible to the user,
otherwise newly introduced names are marked as ``internal''
(\secref{sec:names}).
- \<^descr> @{ML Variable.focus}~\<open>bindings B\<close> decomposes the outermost \<open>\<And>\<close> prefix of
+ \<^descr> \<^ML>\<open>Variable.focus\<close>~\<open>bindings B\<close> decomposes the outermost \<open>\<And>\<close> prefix of
proposition \<open>B\<close>, using the given name bindings.
\<close>
@@ -151,7 +151,7 @@
ML_val \<open>
(*static compile-time context -- for testing only*)
- val ctxt0 = @{context};
+ val ctxt0 = \<^context>;
(*locally fixed parameters -- no type assignment yet*)
val ([x, y], ctxt1) = ctxt0 |> Variable.add_fixes ["x", "y"];
@@ -177,7 +177,7 @@
\<close>
ML_val \<open>
- val ctxt0 = @{context};
+ val ctxt0 = \<^context>;
val ([x1, x2, x3], ctxt1) =
ctxt0 |> Variable.variant_fixes ["x", "x", "x"];
\<close>
@@ -192,7 +192,7 @@
notepad
begin
ML_prf %"ML"
- \<open>val ctxt0 = @{context};
+ \<open>val ctxt0 = \<^context>;
val ([x1], ctxt1) = ctxt0 |> Variable.add_fixes ["x"];
val ([x2], ctxt2) = ctxt1 |> Variable.add_fixes ["x"];
@@ -203,7 +203,7 @@
end
text \<open>
- In this situation @{ML Variable.add_fixes} and @{ML Variable.variant_fixes}
+ In this situation \<^ML>\<open>Variable.add_fixes\<close> and \<^ML>\<open>Variable.variant_fixes\<close>
are very similar, but identical name proposals given in a row are only
accepted by the second version.
\<close>
@@ -273,41 +273,39 @@
@{index_ML Assumption.export: "bool -> Proof.context -> Proof.context -> thm -> thm"} \\
\end{mldecls}
- \<^descr> Type @{ML_type Assumption.export} represents arbitrary export rules, which
- is any function of type @{ML_type "bool -> cterm list -> thm -> thm"}, where
- the @{ML_type "bool"} indicates goal mode, and the @{ML_type "cterm list"}
+ \<^descr> Type \<^ML_type>\<open>Assumption.export\<close> represents arbitrary export rules, which
+ is any function of type \<^ML_type>\<open>bool -> cterm list -> thm -> thm\<close>, where
+ the \<^ML_type>\<open>bool\<close> indicates goal mode, and the \<^ML_type>\<open>cterm list\<close>
the collection of assumptions to be discharged simultaneously.
- \<^descr> @{ML Assumption.assume}~\<open>ctxt A\<close> turns proposition \<open>A\<close> into a primitive
+ \<^descr> \<^ML>\<open>Assumption.assume\<close>~\<open>ctxt A\<close> turns proposition \<open>A\<close> into a primitive
assumption \<open>A \<turnstile> A'\<close>, where the conclusion \<open>A'\<close> is in HHF normal form.
- \<^descr> @{ML Assumption.add_assms}~\<open>r As\<close> augments the context by assumptions \<open>As\<close>
+ \<^descr> \<^ML>\<open>Assumption.add_assms\<close>~\<open>r As\<close> augments the context by assumptions \<open>As\<close>
with export rule \<open>r\<close>. The resulting facts are hypothetical theorems as
- produced by the raw @{ML Assumption.assume}.
+ produced by the raw \<^ML>\<open>Assumption.assume\<close>.
- \<^descr> @{ML Assumption.add_assumes}~\<open>As\<close> is a special case of @{ML
- Assumption.add_assms} where the export rule performs \<open>\<Longrightarrow>\<hyphen>intro\<close> or
+ \<^descr> \<^ML>\<open>Assumption.add_assumes\<close>~\<open>As\<close> is a special case of \<^ML>\<open>Assumption.add_assms\<close> where the export rule performs \<open>\<Longrightarrow>\<hyphen>int